Unverified Commit 45ccae89 authored by Harkrishn Patro's avatar Harkrishn Patro Committed by GitHub
Browse files

Add new cluster shards command (#10293)



Implement a new cluster shards command, which provides a flexible and extensible API for topology discovery.
Co-authored-by: default avatarMadelyn Olson <madelyneolson@gmail.com>
parent 416c9ac2
......@@ -74,6 +74,7 @@ void clusterCloseAllSlots(void);
void clusterSetNodeAsMaster(clusterNode *n);
void clusterDelNode(clusterNode *delnode);
sds representClusterNodeFlags(sds ci, uint16_t flags);
sds representSlotInfo(sds ci, list *slot_info_pairs);
uint64_t clusterGetMaxEpoch(void);
int clusterBumpConfigEpochWithoutConsensus(void);
void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8_t type, const unsigned char *payload, uint32_t len);
......@@ -942,7 +943,7 @@ clusterNode *createClusterNode(char *nodename, int flags) {
node->configEpoch = 0;
node->flags = flags;
memset(node->slots,0,sizeof(node->slots));
node->slots_info = NULL;
node->slot_info_pairs = NULL;
node->numslots = 0;
node->numslaves = 0;
node->slaves = NULL;
......@@ -4561,6 +4562,28 @@ sds representClusterNodeFlags(sds ci, uint16_t flags) {
return ci;
}
/* Concatenate the slot ownership information to the given SDS string 'ci'.
* If the slot ownership is in a contiguous block, it's represented as start-end pair,
* else each slot is added separately. */
sds representSlotInfo(sds ci, list *slot_info_pairs) {
listIter li;
listNode *ln;
listRewind(slot_info_pairs, &li);
while((ln = listNext(&li))) {
unsigned long start = (unsigned long)ln->value;
ln = listNext(&li);
/* List should have even number of elements */
serverAssert(ln != NULL);
unsigned long end = (unsigned long)ln->value;
if (start == end) {
ci = sdscatfmt(ci, " %i", start);
} else {
ci = sdscatfmt(ci, " %i-%i", start, end);
}
}
return ci;
}
/* Generate a csv-alike representation of the specified cluster node.
* See clusterGenNodesDescription() top comment for more information.
*
......@@ -4609,8 +4632,8 @@ sds clusterGenNodeDescription(clusterNode *node, int use_pport) {
/* Slots served by this instance. If we already have slots info,
* append it directly, otherwise, generate slots only if it has. */
if (node->slots_info) {
ci = sdscatsds(ci, node->slots_info);
if (node->slot_info_pairs) {
ci = representSlotInfo(ci, node->slot_info_pairs);
} else if (node->numslots > 0) {
start = -1;
for (j = 0; j < CLUSTER_SLOTS; j++) {
......@@ -4670,12 +4693,11 @@ void clusterGenNodesSlotsInfo(int filter) {
* or end of slot. */
if (i == CLUSTER_SLOTS || n != server.cluster->slots[i]) {
if (!(n->flags & filter)) {
if (n->slots_info == NULL) n->slots_info = sdsempty();
if (start == i-1) {
n->slots_info = sdscatfmt(n->slots_info," %i",start);
} else {
n->slots_info = sdscatfmt(n->slots_info," %i-%i",start,i-1);
if (n->slot_info_pairs == NULL) {
n->slot_info_pairs = listCreate();
}
listAddNodeTail(n->slot_info_pairs, (void *)(unsigned long)start);
listAddNodeTail(n->slot_info_pairs, (void *)(unsigned long)(i-1));
}
if (i == CLUSTER_SLOTS) break;
n = server.cluster->slots[i];
......@@ -4718,9 +4740,9 @@ sds clusterGenNodesDescription(int filter, int use_pport) {
ci = sdscatlen(ci,"\n",1);
/* Release slots info. */
if (node->slots_info) {
sdsfree(node->slots_info);
node->slots_info = NULL;
if (node->slot_info_pairs != NULL) {
listRelease(node->slot_info_pairs);
node->slot_info_pairs = NULL;
}
}
dictReleaseIterator(di);
......@@ -4942,6 +4964,146 @@ void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, in
setDeferredArrayLen(c, nested_replylen, nested_elements);
}
/* Add detailed information of a node to the output buffer of the given client. */
void addNodeDetailsToShardReply(client *c, clusterNode *node) {
int reply_count = 0;
void *node_replylen = addReplyDeferredLen(c);
addReplyBulkCString(c, "id");
addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
reply_count++;
int port = server.cluster_announce_port ? server.cluster_announce_port : server.port;
if (port) {
addReplyBulkCString(c, "port");
addReplyLongLong(c, node->port);
reply_count++;
}
int tls_port = server.cluster_announce_tls_port ? server.cluster_announce_tls_port : server.tls_port;
if (tls_port) {
addReplyBulkCString(c, "tls-port");
addReplyLongLong(c, tls_port);
reply_count++;
}
addReplyBulkCString(c, "ip");
addReplyBulkCString(c, node->ip);
reply_count++;
addReplyBulkCString(c, "endpoint");
addReplyBulkCString(c, getPreferredEndpoint(node));
reply_count++;
if (node->hostname) {
addReplyBulkCString(c, "hostname");
addReplyBulkCString(c, node->hostname);
reply_count++;
}
long long node_offset;
if (node->flags & CLUSTER_NODE_MYSELF) {
node_offset = nodeIsSlave(node) ? replicationGetSlaveOffset() : server.master_repl_offset;
} else {
node_offset = node->repl_offset;
}
addReplyBulkCString(c, "role");
addReplyBulkCString(c, nodeIsSlave(node) ? "replica" : "master");
reply_count++;
addReplyBulkCString(c, "replication-offset");
addReplyLongLong(c, node_offset);
reply_count++;
addReplyBulkCString(c, "health");
const char *health_msg = NULL;
if (nodeFailed(node)) {
health_msg = "fail";
} else if (nodeIsSlave(node) && node_offset == 0) {
health_msg = "loading";
} else {
health_msg = "online";
}
addReplyBulkCString(c, health_msg);
reply_count++;
setDeferredMapLen(c, node_replylen, reply_count);
}
/* Add the shard reply of a single shard based off the given primary node. */
void addShardReplyForClusterShards(client *c, clusterNode *node, list *slot_info_pairs) {
addReplyMapLen(c, 2);
addReplyBulkCString(c, "slots");
uint16_t slot_pair_count = 0;
if (slot_info_pairs) {
slot_pair_count = listLength(slot_info_pairs);
serverAssert((slot_pair_count % 2) == 0);
addReplyArrayLen(c, slot_pair_count);
listIter li;
listRewind(slot_info_pairs, &li);
listNode *ln;
while((ln = listNext(&li))) {
addReplyBulkLongLong(c, (unsigned long)listNodeValue(ln));
ln = listNext(&li);
addReplyBulkLongLong(c, (unsigned long)listNodeValue(ln));
}
} else {
/* If no slot info pair is provided, the node owns no slots */
addReplyArrayLen(c, 0);
}
addReplyBulkCString(c, "nodes");
list *nodes_for_slot = clusterGetNodesServingMySlots(node);
/* At least the provided node should be serving its slots */
serverAssert(nodes_for_slot);
addReplyArrayLen(c, listLength(nodes_for_slot));
if (listLength(nodes_for_slot) != 0) {
listIter li;
listNode *ln;
listRewind(nodes_for_slot, &li);
while ((ln = listNext(&li))) {
clusterNode *node = listNodeValue(ln);
addNodeDetailsToShardReply(c, node);
}
listRelease(nodes_for_slot);
}
}
/* Add to the output buffer of the given client, an array of slot (start, end)
* pair owned by the shard, also the primary and set of replica(s) along with
* information about each node. */
void clusterReplyShards(client *c) {
void *shard_replylen = addReplyDeferredLen(c);
int shard_count = 0;
/* This call will add slot_info_pairs to all nodes */
clusterGenNodesSlotsInfo(0);
dictIterator *di = dictGetSafeIterator(server.cluster->nodes);
dictEntry *de;
/* Iterate over all the available nodes in the cluster, for each primary
* node return generate the cluster shards response. if the primary node
* doesn't own any slot, cluster shard response contains the node related
* information and an empty slots array. */
while((de = dictNext(di)) != NULL) {
clusterNode *n = dictGetVal(de);
if (nodeIsSlave(n)) {
/* You can force a replica to own slots, even though it'll get reverted,
* so freeing the slot pair here just in case. */
if (n->slot_info_pairs) listRelease(n->slot_info_pairs);
n->slot_info_pairs = NULL;
continue;
}
shard_count++;
/* n->slot_info_pairs is set to NULL when the the node owns no slots. */
addShardReplyForClusterShards(c, n, n->slot_info_pairs);
if (n->slot_info_pairs) {
listRelease(n->slot_info_pairs);
n->slot_info_pairs = NULL;
}
}
dictReleaseIterator(di);
setDeferredArrayLen(c, shard_replylen, shard_count);
}
void clusterReplyMultiBulkSlots(client * c) {
/* Format: 1) 1) start slot
* 2) end slot
......@@ -5035,6 +5197,8 @@ void clusterCommand(client *c) {
"SLOTS",
" Return information about slots range mappings. Each range is made of:",
" start, end, master and replicas IP addresses, ports and ids",
"SHARDS",
" Return information about slot range mappings and the nodes associated with them.",
"LINKS",
" Return information about all network links between this node and its peers.",
" Output format is an array where each array element is a map containing attributes of a link",
......@@ -5084,6 +5248,9 @@ NULL
} else if (!strcasecmp(c->argv[1]->ptr,"slots") && c->argc == 2) {
/* CLUSTER SLOTS */
clusterReplyMultiBulkSlots(c);
} else if (!strcasecmp(c->argv[1]->ptr,"shards") && c->argc == 2) {
/* CLUSTER SHARDS */
clusterReplyShards(c);
} else if (!strcasecmp(c->argv[1]->ptr,"flushslots") && c->argc == 2) {
/* CLUSTER FLUSHSLOTS */
if (dictSize(server.db[0].dict) != 0) {
......
......@@ -118,7 +118,7 @@ typedef struct clusterNode {
int flags; /* CLUSTER_NODE_... */
uint64_t configEpoch; /* Last configEpoch observed for this node */
unsigned char slots[CLUSTER_SLOTS/8]; /* slots handled by this node */
sds slots_info; /* Slots info represented by string. */
list *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */
int numslots; /* Number of slots handled by this node */
int numslaves; /* Number of slave nodes, if this is a master */
struct clusterNode **slaves; /* pointers to slave nodes */
......
......@@ -588,9 +588,9 @@ NULL
/* CLUSTER SETSLOT subcommand argument table */
struct redisCommandArg CLUSTER_SETSLOT_subcommand_Subargs[] = {
{"node-id",ARG_TYPE_STRING,-1,"IMPORTING",NULL,NULL,CMD_ARG_NONE},
{"node-id",ARG_TYPE_STRING,-1,"MIGRATING",NULL,NULL,CMD_ARG_NONE},
{"node-id",ARG_TYPE_STRING,-1,"NODE",NULL,NULL,CMD_ARG_NONE},
{"node-id",ARG_TYPE_INTEGER,-1,"IMPORTING",NULL,NULL,CMD_ARG_NONE},
{"node-id",ARG_TYPE_INTEGER,-1,"MIGRATING",NULL,NULL,CMD_ARG_NONE},
{"node-id",ARG_TYPE_INTEGER,-1,"NODE",NULL,NULL,CMD_ARG_NONE},
{"stable",ARG_TYPE_PURE_TOKEN,-1,"STABLE",NULL,NULL,CMD_ARG_NONE},
{0}
};
......@@ -602,6 +602,17 @@ struct redisCommandArg CLUSTER_SETSLOT_Args[] = {
{0}
};
/********** CLUSTER SHARDS ********************/
/* CLUSTER SHARDS history */
#define CLUSTER_SHARDS_History NULL
/* CLUSTER SHARDS tips */
const char *CLUSTER_SHARDS_tips[] = {
"nondeterministic_output",
NULL
};
/********** CLUSTER SLAVES ********************/
/* CLUSTER SLAVES history */
......@@ -660,7 +671,8 @@ struct redisCommand CLUSTER_Subcommands[] = {
{"saveconfig","Forces the node to save cluster state on disk","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SAVECONFIG_History,CLUSTER_SAVECONFIG_tips,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0},
{"set-config-epoch","Set the configuration epoch in a new node","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SET_CONFIG_EPOCH_History,CLUSTER_SET_CONFIG_EPOCH_tips,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SET_CONFIG_EPOCH_Args},
{"setslot","Bind a hash slot to a specific node","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,CLUSTER_SETSLOT_tips,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SETSLOT_Args},
{"slaves","List replica nodes of the specified master node","O(1)","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,CLUSTER_SLAVES_tips,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SLAVES_Args},
{"shards","Get array of cluster slots to node mappings","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,CLUSTER_SHARDS_tips,clusterCommand,2,CMD_STALE,0},
{"slaves","List replica nodes of the specified master node","O(1)","3.0.0",CMD_DOC_NONE,"`CLUSTER REPLICAS`","5.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,CLUSTER_SLAVES_tips,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SLAVES_Args},
{"slots","Get array of Cluster slot to node mappings","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,CLUSTER_SLOTS_tips,clusterCommand,2,CMD_STALE,0},
{0}
};
......@@ -3449,9 +3461,8 @@ struct redisCommandArg FUNCTION_RESTORE_Args[] = {
/* FUNCTION STATS tips */
const char *FUNCTION_STATS_tips[] = {
"nondeterministic_output",
"request_policy:all_shards",
"response_policy:special",
"response_policy:one_succeeded",
NULL
};
......@@ -4515,12 +4526,7 @@ struct redisCommandArg LATENCY_GRAPH_Args[] = {
#define LATENCY_HISTOGRAM_History NULL
/* LATENCY HISTOGRAM tips */
const char *LATENCY_HISTOGRAM_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
#define LATENCY_HISTOGRAM_tips NULL
/* LATENCY HISTOGRAM argument table */
struct redisCommandArg LATENCY_HISTOGRAM_Args[] = {
......@@ -4534,12 +4540,7 @@ struct redisCommandArg LATENCY_HISTOGRAM_Args[] = {
#define LATENCY_HISTORY_History NULL
/* LATENCY HISTORY tips */
const char *LATENCY_HISTORY_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
#define LATENCY_HISTORY_tips NULL
/* LATENCY HISTORY argument table */
struct redisCommandArg LATENCY_HISTORY_Args[] = {
......@@ -4553,12 +4554,7 @@ struct redisCommandArg LATENCY_HISTORY_Args[] = {
#define LATENCY_LATEST_History NULL
/* LATENCY LATEST tips */
const char *LATENCY_LATEST_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
#define LATENCY_LATEST_tips NULL
/********** LATENCY RESET ********************/
......@@ -4566,11 +4562,7 @@ NULL
#define LATENCY_RESET_History NULL
/* LATENCY RESET tips */
const char *LATENCY_RESET_tips[] = {
"request_policy:all_nodes",
"response_policy:all_succeeded",
NULL
};
#define LATENCY_RESET_tips NULL
/* LATENCY RESET argument table */
struct redisCommandArg LATENCY_RESET_Args[] = {
......@@ -4658,8 +4650,6 @@ NULL
/* MEMORY STATS tips */
const char *MEMORY_STATS_tips[] = {
"nondeterministic_output",
"request_policy:all_shards",
"response_policy:special",
NULL
};
......
{
"SHARDS": {
"summary": "Get array of cluster slots to node mappings",
"complexity": "O(N) where N is the total number of cluster nodes",
"group": "cluster",
"since": "7.0.0",
"arity": 2,
"container": "CLUSTER",
"function": "clusterCommand",
"history": [],
"command_flags": [
"STALE"
],
"command_tips": [
"NONDETERMINISTIC_OUTPUT"
]
}
}
......@@ -7,6 +7,8 @@
"arity": 2,
"container": "CLUSTER",
"function": "clusterCommand",
"deprecated_since": "7.0.0",
"replaced_by": "`CLUSTER SHARDS`",
"history": [
[
"4.0.0",
......
......@@ -203,6 +203,21 @@ proc wait_for_cluster_propagation {} {
}
}
# Check if cluster's view of hostnames is consistent
proc are_hostnames_propagated {match_string} {
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
set cfg [R $j cluster slots]
foreach node $cfg {
for {set i 2} {$i < [llength $node]} {incr i} {
if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
return 0
}
}
}
}
return 1
}
# Returns a parsed CLUSTER LINKS output of the instance identified
# by the given `id` as a list of dictionaries, with each dictionary
# corresponds to a link.
......
source "../tests/includes/init-tests.tcl"
# Check if cluster's view of hostnames is consistent
proc are_hostnames_propagated {match_string} {
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
set cfg [R $j cluster slots]
foreach node $cfg {
for {set i 2} {$i < [llength $node]} {incr i} {
if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
return 0
}
}
}
}
return 1
}
# Isolate a node from the cluster and give it a new nodeid
proc isolate_node {id} {
set node_id [R $id CLUSTER MYID]
......
source "../tests/includes/init-tests.tcl"
# Initial slot distribution.
set ::slot0 [list 0 1000 1002 5459 5461 5461 10926 10926]
set ::slot1 [list 5460 5460 5462 10922 10925 10925]
set ::slot2 [list 10923 10924 10927 16383]
set ::slot3 [list 1001 1001]
proc cluster_create_with_split_slots {masters replicas} {
for {set j 0} {$j < $masters} {incr j} {
R $j cluster ADDSLOTSRANGE {*}[set ::slot${j}]
}
if {$replicas} {
cluster_allocate_slaves $masters $replicas
}
set ::cluster_master_nodes $masters
set ::cluster_replica_nodes $replicas
}
# Get the node info with the specific node_id from the
# given reference node. Valid type options are "node" and "shard"
proc get_node_info_from_shard {id reference {type node}} {
set shards_response [R $reference CLUSTER SHARDS]
foreach shard_response $shards_response {
set nodes [dict get $shard_response nodes]
foreach node $nodes {
if {[dict get $node id] eq $id} {
if {$type eq "node"} {
return $node
} elseif {$type eq "shard"} {
return $shard_response
} else {
return {}
}
}
}
}
# No shard found, return nothing
return {}
}
test "Create a 8 nodes cluster with 4 shards" {
cluster_create_with_split_slots 4 4
}
test "Cluster should start ok" {
assert_cluster_state ok
}
test "Set cluster hostnames and verify they are propagated" {
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
R $j config set cluster-announce-hostname "host-$j.com"
}
# Wait for everyone to agree about the state
wait_for_cluster_propagation
}
test "Verify information about the shards" {
set ids {}
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
lappend ids [R $j CLUSTER MYID]
}
set slots [list $::slot0 $::slot1 $::slot2 $::slot3 $::slot0 $::slot1 $::slot2 $::slot3]
# Verify on each node (primary/replica), the response of the `CLUSTER SLOTS` command is consistent.
for {set ref 0} {$ref < $::cluster_master_nodes + $::cluster_replica_nodes} {incr ref} {
for {set i 0} {$i < $::cluster_master_nodes + $::cluster_replica_nodes} {incr i} {
assert_equal [lindex $slots $i] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "shard"] slots]
assert_equal "host-$i.com" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] hostname]
assert_equal "127.0.0.1" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] ip]
# Default value of 'cluster-preferred-endpoint-type' is ip.
assert_equal "127.0.0.1" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] endpoint]
if {$::tls} {
assert_equal [get_instance_attrib redis $i plaintext-port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] tls-port]
} else {
assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
}
if {$i < 4} {
assert_equal "master" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
assert_equal "online" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] health]
} else {
assert_equal "replica" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
# Replica could be in online or loading
}
}
}
}
test "Verify no slot shard" {
# Node 8 has no slots assigned
set node_8_id [R 8 CLUSTER MYID]
assert_equal {} [dict get [get_node_info_from_shard $node_8_id 8 "shard"] slots]
assert_equal {} [dict get [get_node_info_from_shard $node_8_id 0 "shard"] slots]
}
set node_0_id [R 0 CLUSTER MYID]
test "Kill a node and tell the replica to immediately takeover" {
kill_instance redis 0
R 4 cluster failover force
}
# Primary 0 node should report as fail, wait until the new primary acknowledges it.
test "Verify health as fail for killed node" {
wait_for_condition 50 100 {
"fail" eq [dict get [get_node_info_from_shard $node_0_id 4 "node"] "health"]
} else {
fail "New primary never detected the node failed"
}
}
set primary_id 4
set replica_id 0
test "Restarting primary node" {
restart_instance redis $replica_id
}
test "Instance #0 gets converted into a replica" {
wait_for_condition 1000 50 {
[RI $replica_id role] eq {slave}
} else {
fail "Old primary was not converted into replica"
}
}
test "Test the replica reports a loading state while it's loading" {
# Test the command is good for verifying everything moves to a happy state
set replica_cluster_id [R $replica_id CLUSTER MYID]
wait_for_condition 50 100 {
[dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health] eq "online"
} else {
fail "Replica never transitioned to online"
}
# Set 1 MB of data, so there is something to load on full sync
R $primary_id debug populate 1000 key 1000
# Kill replica client for primary and load new data to the primary
R $primary_id config set repl-backlog-size 100
# Set the key load delay so that it will take at least
# 2 seconds to fully load the data.
R $replica_id config set key-load-delay 4000
# Trigger event loop processing every 1024 bytes, this trigger
# allows us to send and receive cluster messages, so we are setting
# it low so that the cluster messages are sent more frequently.
R $replica_id config set loading-process-events-interval-bytes 1024
R $primary_id multi
R $primary_id client kill type replica
# populate the correct data
set num 100
set value [string repeat A 1024]
for {set j 0} {$j < $num} {incr j} {
# Use hashtag valid for shard #0
set key "{ch3}$j"
R $primary_id set $key $value
}
R $primary_id exec
# The replica should reconnect and start a full sync, it will gossip about it's health to the primary.
wait_for_condition 50 100 {
"loading" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
} else {
fail "Replica never transitioned to loading"
}
# Speed up the key loading and verify everything resumes
R $replica_id config set key-load-delay 0
wait_for_condition 50 100 {
"online" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
} else {
fail "Replica never transitioned to online"
}
# Final sanity, the replica agrees it is online.
assert_equal "online" [dict get [get_node_info_from_shard $replica_cluster_id $replica_id "node"] health]
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment