Unverified Commit d092d64d authored by Moti Cohen's avatar Moti Cohen Committed by GitHub
Browse files

Add new SFLUSH command to cluster for slot-based FLUSH (#13564)

This PR introduces a new `SFLUSH` command to cluster mode that allows
partial flushing of nodes based on specified slot ranges. Current
implementation is designed to flush all slots of a shard, but future
extensions could allow for more granular flushing.

**Command Usage:**
`SFLUSH <start-slot> <end-slot> [<start-slot> <end-slot>]* [SYNC|ASYNC]`

This command removes all data from the specified slots, either
synchronously or asynchronously depending on the optional SYNC/ASYNC
argument.

**Functionality:**
Current imp of `SFLUSH` command verifies that the provided slot ranges
are valid and cover all of the node's slots before proceeding. If slots
are partially or incorrectly specified, the command will fail and return
an error, ensuring that all slots of a node must be fully covered for
the flush to proceed.

The function supports both synchronous (default) and asynchronous
flushing. In addition, if possible, SFLUSH SYNC will be run as blocking
ASYNC as an optimization.
parent 99c40ab5
...@@ -81,6 +81,7 @@ static int job_comp_pipe[2]; /* Pipe used to awake the event loop */ ...@@ -81,6 +81,7 @@ static int job_comp_pipe[2]; /* Pipe used to awake the event loop */
typedef struct bio_comp_item { typedef struct bio_comp_item {
comp_fn *func; /* callback after completion job will be processed */ comp_fn *func; /* callback after completion job will be processed */
uint64_t arg; /* user data to be passed to the function */ uint64_t arg; /* user data to be passed to the function */
void *ptr; /* user pointer to be passed to the function */
} bio_comp_item; } bio_comp_item;
/* This structure represents a background Job. It is only used locally to this /* This structure represents a background Job. It is only used locally to this
...@@ -110,6 +111,7 @@ typedef union bio_job { ...@@ -110,6 +111,7 @@ typedef union bio_job {
int type; /* header */ int type; /* header */
comp_fn *fn; /* callback. Handover to main thread to cb as notify for job completion */ comp_fn *fn; /* callback. Handover to main thread to cb as notify for job completion */
uint64_t arg; /* callback arguments */ uint64_t arg; /* callback arguments */
void *ptr; /* callback pointer */
} comp_rq; } comp_rq;
} bio_job; } bio_job;
...@@ -200,7 +202,7 @@ void bioCreateLazyFreeJob(lazy_free_fn free_fn, int arg_count, ...) { ...@@ -200,7 +202,7 @@ void bioCreateLazyFreeJob(lazy_free_fn free_fn, int arg_count, ...) {
bioSubmitJob(BIO_LAZY_FREE, job); bioSubmitJob(BIO_LAZY_FREE, job);
} }
void bioCreateCompRq(bio_worker_t assigned_worker, comp_fn *func, uint64_t user_data) { void bioCreateCompRq(bio_worker_t assigned_worker, comp_fn *func, uint64_t user_data, void *user_ptr) {
int type; int type;
switch (assigned_worker) { switch (assigned_worker) {
case BIO_WORKER_CLOSE_FILE: case BIO_WORKER_CLOSE_FILE:
...@@ -219,6 +221,7 @@ void bioCreateCompRq(bio_worker_t assigned_worker, comp_fn *func, uint64_t user_ ...@@ -219,6 +221,7 @@ void bioCreateCompRq(bio_worker_t assigned_worker, comp_fn *func, uint64_t user_
bio_job *job = zmalloc(sizeof(*job)); bio_job *job = zmalloc(sizeof(*job));
job->comp_rq.fn = func; job->comp_rq.fn = func;
job->comp_rq.arg = user_data; job->comp_rq.arg = user_data;
job->comp_rq.ptr = user_ptr;
bioSubmitJob(type, job); bioSubmitJob(type, job);
} }
...@@ -339,6 +342,7 @@ void *bioProcessBackgroundJobs(void *arg) { ...@@ -339,6 +342,7 @@ void *bioProcessBackgroundJobs(void *arg) {
bio_comp_item *comp_rsp = zmalloc(sizeof(bio_comp_item)); bio_comp_item *comp_rsp = zmalloc(sizeof(bio_comp_item));
comp_rsp->func = job->comp_rq.fn; comp_rsp->func = job->comp_rq.fn;
comp_rsp->arg = job->comp_rq.arg; comp_rsp->arg = job->comp_rq.arg;
comp_rsp->ptr = job->comp_rq.ptr;
/* just write it to completion job responses */ /* just write it to completion job responses */
pthread_mutex_lock(&bio_mutex_comp); pthread_mutex_lock(&bio_mutex_comp);
...@@ -432,7 +436,7 @@ void bioPipeReadJobCompList(aeEventLoop *el, int fd, void *privdata, int mask) { ...@@ -432,7 +436,7 @@ void bioPipeReadJobCompList(aeEventLoop *el, int fd, void *privdata, int mask) {
listNode *ln = listFirst(tmp_list); listNode *ln = listFirst(tmp_list);
bio_comp_item *rsp = ln->value; bio_comp_item *rsp = ln->value;
listDelNode(tmp_list, ln); listDelNode(tmp_list, ln);
rsp->func(rsp->arg); rsp->func(rsp->arg, rsp->ptr);
zfree(rsp); zfree(rsp);
} }
listRelease(tmp_list); listRelease(tmp_list);
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define __BIO_H #define __BIO_H
typedef void lazy_free_fn(void *args[]); typedef void lazy_free_fn(void *args[]);
typedef void comp_fn(uint64_t user_data); typedef void comp_fn(uint64_t user_data, void *user_ptr);
typedef enum bio_worker_t { typedef enum bio_worker_t {
BIO_WORKER_CLOSE_FILE = 0, BIO_WORKER_CLOSE_FILE = 0,
...@@ -40,7 +40,7 @@ void bioCreateCloseJob(int fd, int need_fsync, int need_reclaim_cache); ...@@ -40,7 +40,7 @@ void bioCreateCloseJob(int fd, int need_fsync, int need_reclaim_cache);
void bioCreateCloseAofJob(int fd, long long offset, int need_reclaim_cache); void bioCreateCloseAofJob(int fd, long long offset, int need_reclaim_cache);
void bioCreateFsyncJob(int fd, long long offset, int need_reclaim_cache); void bioCreateFsyncJob(int fd, long long offset, int need_reclaim_cache);
void bioCreateLazyFreeJob(lazy_free_fn free_fn, int arg_count, ...); void bioCreateLazyFreeJob(lazy_free_fn free_fn, int arg_count, ...);
void bioCreateCompRq(bio_worker_t assigned_worker, comp_fn *func, uint64_t user_data); void bioCreateCompRq(bio_worker_t assigned_worker, comp_fn *func, uint64_t user_data, void *user_ptr);
#endif #endif
...@@ -1558,6 +1558,126 @@ void readonlyCommand(client *c) { ...@@ -1558,6 +1558,126 @@ void readonlyCommand(client *c) {
addReply(c,shared.ok); addReply(c,shared.ok);
} }
void replySlotsFlushAndFree(client *c, SlotsFlush *sflush) {
addReplyArrayLen(c, sflush->numRanges);
for (int i = 0 ; i < sflush->numRanges ; i++) {
addReplyArrayLen(c, 2);
addReplyLongLong(c, sflush->ranges[i].first);
addReplyLongLong(c, sflush->ranges[i].last);
}
zfree(sflush);
}
/* Partially flush destination DB in a cluster node, based on the slot range.
*
* Usage: SFLUSH <start-slot> <end slot> [<start-slot> <end slot>]* [SYNC|ASYNC]
*
* This is an initial implementation of SFLUSH (slots flush) which is limited to
* flushing a single shard as a whole, but in the future the same command may be
* used to partially flush a shard based on hash slots. Currently only if provided
* slots cover entirely the slots of a node, the node will be flushed and the
* return value will be pairs of slot ranges. Otherwise, a single empty set will
* be returned. If possible, SFLUSH SYNC will be run as blocking ASYNC as an
* optimization.
*/
void sflushCommand(client *c) {
int flags = EMPTYDB_NO_FLAGS, argc = c->argc;
if (server.cluster_enabled == 0) {
addReplyError(c,"This instance has cluster support disabled");
return;
}
/* check if last argument is SYNC or ASYNC */
if (!strcasecmp(c->argv[c->argc-1]->ptr,"sync")) {
flags = EMPTYDB_NO_FLAGS;
argc--;
} else if (!strcasecmp(c->argv[c->argc-1]->ptr,"async")) {
flags = EMPTYDB_ASYNC;
argc--;
} else if (server.lazyfree_lazy_user_flush) {
flags = EMPTYDB_ASYNC;
}
/* parse the slot range */
if (argc % 2 == 0) {
addReplyErrorArity(c);
return;
}
/* Verify <first, last> slot pairs are valid and not overlapping */
long long j, first, last;
unsigned char slotsToFlushRq[CLUSTER_SLOTS] = {0};
for (j = 1; j < argc; j += 2) {
/* check if the first slot is valid */
if (getLongLongFromObject(c->argv[j], &first) != C_OK || first < 0 || first >= CLUSTER_SLOTS) {
addReplyError(c,"Invalid or out of range slot");
return;
}
/* check if the last slot is valid */
if (getLongLongFromObject(c->argv[j+1], &last) != C_OK || last < 0 || last >= CLUSTER_SLOTS) {
addReplyError(c,"Invalid or out of range slot");
return;
}
if (first > last) {
addReplyErrorFormat(c,"start slot number %lld is greater than end slot number %lld", first, last);
return;
}
/* Mark the slots in slotsToFlushRq[] */
for (int i = first; i <= last; i++) {
if (slotsToFlushRq[i]) {
addReplyErrorFormat(c, "Slot %d specified multiple times", i);
return;
}
slotsToFlushRq[i] = 1;
}
}
/* Verify slotsToFlushRq[] covers ALL slots of myNode. */
clusterNode *myNode = getMyClusterNode();
/* During iteration trace also the slot range pairs and save in SlotsFlush.
* It is allocated on heap since there is a chance that FLUSH SYNC will be
* running as blocking ASYNC and only later reply with slot ranges */
int capacity = 32; /* Initial capacity */
SlotsFlush *sflush = zmalloc(sizeof(SlotsFlush) + sizeof(SlotRange) * capacity);
sflush->numRanges = 0;
int inSlotRange = 0;
for (int i = 0; i < CLUSTER_SLOTS; i++) {
if (myNode == getNodeBySlot(i)) {
if (!slotsToFlushRq[i]) {
addReplySetLen(c, 0); /* Not all slots of mynode got covered. See sflushCommand() comment. */
zfree(sflush);
return;
}
if (!inSlotRange) { /* If start another slot range */
sflush->ranges[sflush->numRanges].first = i;
inSlotRange = 1;
}
} else {
if (inSlotRange) { /* If end another slot range */
sflush->ranges[sflush->numRanges++].last = i - 1;
inSlotRange = 0;
/* If reached 'sflush' capacity, double the capacity */
if (sflush->numRanges >= capacity) {
capacity *= 2;
sflush = zrealloc(sflush, sizeof(SlotsFlush) + sizeof(SlotRange) * capacity);
}
}
}
}
/* Update last pair if last cluster slot is also end of last range */
if (inSlotRange) sflush->ranges[sflush->numRanges++].last = CLUSTER_SLOTS - 1;
/* Flush selected slots. If not flush as blocking async, then reply immediately */
if (flushCommandCommon(c, FLUSH_TYPE_SLOTS, flags, sflush) == 0)
replySlotsFlushAndFree(c, sflush);
}
/* The READWRITE command just clears the READONLY command state. */ /* The READWRITE command just clears the READONLY command state. */
void readwriteCommand(client *c) { void readwriteCommand(client *c) {
if (server.cluster_enabled == 0) { if (server.cluster_enabled == 0) {
......
...@@ -7735,6 +7735,41 @@ struct COMMAND_ARG RESTORE_ASKING_Args[] = { ...@@ -7735,6 +7735,41 @@ struct COMMAND_ARG RESTORE_ASKING_Args[] = {
#define SAVE_Keyspecs NULL #define SAVE_Keyspecs NULL
#endif #endif
/********** SFLUSH ********************/
#ifndef SKIP_CMD_HISTORY_TABLE
/* SFLUSH history */
#define SFLUSH_History NULL
#endif
#ifndef SKIP_CMD_TIPS_TABLE
/* SFLUSH tips */
#define SFLUSH_Tips NULL
#endif
#ifndef SKIP_CMD_KEY_SPECS_TABLE
/* SFLUSH key specs */
#define SFLUSH_Keyspecs NULL
#endif
/* SFLUSH data argument table */
struct COMMAND_ARG SFLUSH_data_Subargs[] = {
{MAKE_ARG("slot-start",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
{MAKE_ARG("slot-last",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
};
/* SFLUSH flush_type argument table */
struct COMMAND_ARG SFLUSH_flush_type_Subargs[] = {
{MAKE_ARG("async",ARG_TYPE_PURE_TOKEN,-1,"ASYNC",NULL,NULL,CMD_ARG_NONE,0,NULL)},
{MAKE_ARG("sync",ARG_TYPE_PURE_TOKEN,-1,"SYNC",NULL,NULL,CMD_ARG_NONE,0,NULL)},
};
/* SFLUSH argument table */
struct COMMAND_ARG SFLUSH_Args[] = {
{MAKE_ARG("data",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,2,NULL),.subargs=SFLUSH_data_Subargs},
{MAKE_ARG("flush-type",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=SFLUSH_flush_type_Subargs},
};
/********** SHUTDOWN ********************/ /********** SHUTDOWN ********************/
#ifndef SKIP_CMD_HISTORY_TABLE #ifndef SKIP_CMD_HISTORY_TABLE
...@@ -11130,6 +11165,7 @@ struct COMMAND_STRUCT redisCommandTable[] = { ...@@ -11130,6 +11165,7 @@ struct COMMAND_STRUCT redisCommandTable[] = {
{MAKE_CMD("restore-asking","An internal command for migrating keys in a cluster.","O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).","3.0.0",CMD_DOC_SYSCMD,NULL,NULL,"server",COMMAND_GROUP_SERVER,RESTORE_ASKING_History,3,RESTORE_ASKING_Tips,0,restoreCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_ASKING,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,RESTORE_ASKING_Keyspecs,1,NULL,7),.args=RESTORE_ASKING_Args}, {MAKE_CMD("restore-asking","An internal command for migrating keys in a cluster.","O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).","3.0.0",CMD_DOC_SYSCMD,NULL,NULL,"server",COMMAND_GROUP_SERVER,RESTORE_ASKING_History,3,RESTORE_ASKING_Tips,0,restoreCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_ASKING,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,RESTORE_ASKING_Keyspecs,1,NULL,7),.args=RESTORE_ASKING_Args},
{MAKE_CMD("role","Returns the replication role.","O(1)","2.8.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,ROLE_History,0,ROLE_Tips,0,roleCommand,1,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_FAST|CMD_SENTINEL,ACL_CATEGORY_ADMIN|ACL_CATEGORY_DANGEROUS,ROLE_Keyspecs,0,NULL,0)}, {MAKE_CMD("role","Returns the replication role.","O(1)","2.8.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,ROLE_History,0,ROLE_Tips,0,roleCommand,1,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_FAST|CMD_SENTINEL,ACL_CATEGORY_ADMIN|ACL_CATEGORY_DANGEROUS,ROLE_Keyspecs,0,NULL,0)},
{MAKE_CMD("save","Synchronously saves the database(s) to disk.","O(N) where N is the total number of keys in all databases","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SAVE_History,0,SAVE_Tips,0,saveCommand,1,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_NO_MULTI,0,SAVE_Keyspecs,0,NULL,0)}, {MAKE_CMD("save","Synchronously saves the database(s) to disk.","O(N) where N is the total number of keys in all databases","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SAVE_History,0,SAVE_Tips,0,saveCommand,1,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_NO_MULTI,0,SAVE_Keyspecs,0,NULL,0)},
{MAKE_CMD("sflush","Remove all keys from selected range of slots.","O(N)+O(k) where N is the number of keys and k is the number of slots.","8.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SFLUSH_History,0,SFLUSH_Tips,0,sflushCommand,-3,CMD_WRITE,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,SFLUSH_Keyspecs,0,NULL,2),.args=SFLUSH_Args},
{MAKE_CMD("shutdown","Synchronously saves the database(s) to disk and shuts down the Redis server.","O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SHUTDOWN_History,1,SHUTDOWN_Tips,0,shutdownCommand,-1,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_NO_MULTI|CMD_SENTINEL|CMD_ALLOW_BUSY,0,SHUTDOWN_Keyspecs,0,NULL,4),.args=SHUTDOWN_Args}, {MAKE_CMD("shutdown","Synchronously saves the database(s) to disk and shuts down the Redis server.","O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SHUTDOWN_History,1,SHUTDOWN_Tips,0,shutdownCommand,-1,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_NO_MULTI|CMD_SENTINEL|CMD_ALLOW_BUSY,0,SHUTDOWN_Keyspecs,0,NULL,4),.args=SHUTDOWN_Args},
{MAKE_CMD("slaveof","Sets a Redis server as a replica of another, or promotes it to being a master.","O(1)","1.0.0",CMD_DOC_DEPRECATED,"`REPLICAOF`","5.0.0","server",COMMAND_GROUP_SERVER,SLAVEOF_History,0,SLAVEOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,SLAVEOF_Keyspecs,0,NULL,1),.args=SLAVEOF_Args}, {MAKE_CMD("slaveof","Sets a Redis server as a replica of another, or promotes it to being a master.","O(1)","1.0.0",CMD_DOC_DEPRECATED,"`REPLICAOF`","5.0.0","server",COMMAND_GROUP_SERVER,SLAVEOF_History,0,SLAVEOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,SLAVEOF_Keyspecs,0,NULL,1),.args=SLAVEOF_Args},
{MAKE_CMD("slowlog","A container for slow log commands.","Depends on subcommand.","2.2.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SLOWLOG_History,0,SLOWLOG_Tips,0,NULL,-2,0,0,SLOWLOG_Keyspecs,0,NULL,0),.subcommands=SLOWLOG_Subcommands}, {MAKE_CMD("slowlog","A container for slow log commands.","Depends on subcommand.","2.2.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SLOWLOG_History,0,SLOWLOG_Tips,0,NULL,-2,0,0,SLOWLOG_Keyspecs,0,NULL,0),.subcommands=SLOWLOG_Subcommands},
......
{
"SFLUSH": {
"summary": "Remove all keys from selected range of slots.",
"complexity": "O(N)+O(k) where N is the number of keys and k is the number of slots.",
"group": "server",
"since": "8.0.0",
"arity": -3,
"function": "sflushCommand",
"command_flags": [
"WRITE"
],
"acl_categories": [
"KEYSPACE",
"DANGEROUS"
],
"command_tips": [
],
"reply_schema": {
"description": "List of slot ranges",
"type": "array",
"minItems": 0,
"maxItems": 4294967295,
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{
"description": "start slot number",
"type": "integer"
},
{
"description": "end slot number",
"type": "integer"
}
]
}
},
"arguments": [
{
"name": "data",
"type": "block",
"multiple": true,
"arguments": [
{
"name": "slot-start",
"type": "integer"
},
{
"name": "slot-last",
"type": "integer"
}
]
},
{
"name": "flush-type",
"type": "oneof",
"optional": true,
"arguments": [
{
"name": "async",
"type": "pure-token",
"token": "ASYNC"
},
{
"name": "sync",
"type": "pure-token",
"token": "SYNC"
}
]
}
]
}
}
...@@ -730,9 +730,12 @@ void flushAllDataAndResetRDB(int flags) { ...@@ -730,9 +730,12 @@ void flushAllDataAndResetRDB(int flags) {
#endif #endif
} }
/* Optimized FLUSHALL\FLUSHDB SYNC command finished to run by lazyfree thread */ /* CB function on blocking ASYNC FLUSH completion
void flushallSyncBgDone(uint64_t client_id) { *
* Utilized by commands SFLUSH, FLUSHALL and FLUSHDB.
*/
void flushallSyncBgDone(uint64_t client_id, void *sflush) {
SlotsFlush *slotsFlush = sflush;
client *c = lookupClientByID(client_id); client *c = lookupClientByID(client_id);
/* Verify that client still exists */ /* Verify that client still exists */
...@@ -745,8 +748,11 @@ void flushallSyncBgDone(uint64_t client_id) { ...@@ -745,8 +748,11 @@ void flushallSyncBgDone(uint64_t client_id) {
/* Don't update blocked_us since command was processed in bg by lazy_free thread */ /* Don't update blocked_us since command was processed in bg by lazy_free thread */
updateStatsOnUnblock(c, 0 /*blocked_us*/, elapsedUs(c->bstate.lazyfreeStartTime), 0); updateStatsOnUnblock(c, 0 /*blocked_us*/, elapsedUs(c->bstate.lazyfreeStartTime), 0);
/* lazyfree bg job always succeed */ /* Only SFLUSH command pass pointer to `SlotsFlush` */
addReply(c, shared.ok); if (slotsFlush)
replySlotsFlushAndFree(c, slotsFlush);
else
addReply(c, shared.ok);
/* mark client as unblocked */ /* mark client as unblocked */
unblockClient(c, 1); unblockClient(c, 1);
...@@ -761,10 +767,17 @@ void flushallSyncBgDone(uint64_t client_id) { ...@@ -761,10 +767,17 @@ void flushallSyncBgDone(uint64_t client_id) {
server.current_client = old_client; server.current_client = old_client;
} }
void flushCommandCommon(client *c, int isFlushAll) { /* Common flush command implementation for FLUSHALL and FLUSHDB.
int blocking_async = 0; /* FLUSHALL\FLUSHDB SYNC opt to run as blocking ASYNC */ *
int flags; * Return 1 indicates that flush SYNC is actually running in bg as blocking ASYNC
if (getFlushCommandFlags(c,&flags) == C_ERR) return; * Return 0 otherwise
*
* sflush - provided only by SFLUSH command, otherwise NULL. Will be used on
* completion to reply with the slots flush result. Ownership is passed
* to the completion job in case of `blocking_async`.
*/
int flushCommandCommon(client *c, int type, int flags, SlotsFlush *sflush) {
int blocking_async = 0; /* Flush SYNC option to run as blocking ASYNC */
/* in case of SYNC, check if we can optimize and run it in bg as blocking ASYNC */ /* in case of SYNC, check if we can optimize and run it in bg as blocking ASYNC */
if ((!(flags & EMPTYDB_ASYNC)) && (!(c->flags & CLIENT_AVOID_BLOCKING_ASYNC_FLUSH))) { if ((!(flags & EMPTYDB_ASYNC)) && (!(c->flags & CLIENT_AVOID_BLOCKING_ASYNC_FLUSH))) {
...@@ -773,7 +786,7 @@ void flushCommandCommon(client *c, int isFlushAll) { ...@@ -773,7 +786,7 @@ void flushCommandCommon(client *c, int isFlushAll) {
blocking_async = 1; blocking_async = 1;
} }
if (isFlushAll) if (type == FLUSH_TYPE_ALL)
flushAllDataAndResetRDB(flags | EMPTYDB_NOFUNCTIONS); flushAllDataAndResetRDB(flags | EMPTYDB_NOFUNCTIONS);
else else
server.dirty += emptyData(c->db->id,flags | EMPTYDB_NOFUNCTIONS,NULL); server.dirty += emptyData(c->db->id,flags | EMPTYDB_NOFUNCTIONS,NULL);
...@@ -791,10 +804,9 @@ void flushCommandCommon(client *c, int isFlushAll) { ...@@ -791,10 +804,9 @@ void flushCommandCommon(client *c, int isFlushAll) {
c->bstate.timeout = 0; c->bstate.timeout = 0;
blockClient(c,BLOCKED_LAZYFREE); blockClient(c,BLOCKED_LAZYFREE);
bioCreateCompRq(BIO_WORKER_LAZY_FREE, flushallSyncBgDone, c->id); bioCreateCompRq(BIO_WORKER_LAZY_FREE, flushallSyncBgDone, c->id, sflush);
} else {
addReply(c, shared.ok);
} }
#if defined(USE_JEMALLOC) #if defined(USE_JEMALLOC)
/* jemalloc 5 doesn't release pages back to the OS when there's no traffic. /* jemalloc 5 doesn't release pages back to the OS when there's no traffic.
* for large databases, flushdb blocks for long anyway, so a bit more won't * for large databases, flushdb blocks for long anyway, so a bit more won't
...@@ -802,7 +814,7 @@ void flushCommandCommon(client *c, int isFlushAll) { ...@@ -802,7 +814,7 @@ void flushCommandCommon(client *c, int isFlushAll) {
* *
* Take care purge only FLUSHDB for sync flow. FLUSHALL sync flow already * Take care purge only FLUSHDB for sync flow. FLUSHALL sync flow already
* applied at flushAllDataAndResetRDB. Async flow will apply only later on */ * applied at flushAllDataAndResetRDB. Async flow will apply only later on */
if ((!isFlushAll) && (!(flags & EMPTYDB_ASYNC))) { if ((type != FLUSH_TYPE_ALL) && (!(flags & EMPTYDB_ASYNC))) {
/* Only clear the current thread cache. /* Only clear the current thread cache.
* Ignore the return call since this will fail if the tcache is disabled. */ * Ignore the return call since this will fail if the tcache is disabled. */
je_mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); je_mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
...@@ -810,20 +822,32 @@ void flushCommandCommon(client *c, int isFlushAll) { ...@@ -810,20 +822,32 @@ void flushCommandCommon(client *c, int isFlushAll) {
jemalloc_purge(); jemalloc_purge();
} }
#endif #endif
return blocking_async;
} }
/* FLUSHALL [SYNC|ASYNC] /* FLUSHALL [SYNC|ASYNC]
* *
* Flushes the whole server data set. */ * Flushes the whole server data set. */
void flushallCommand(client *c) { void flushallCommand(client *c) {
flushCommandCommon(c, 1); int flags;
if (getFlushCommandFlags(c,&flags) == C_ERR) return;
/* If FLUSH SYNC isn't running as blocking async, then reply */
if (flushCommandCommon(c, FLUSH_TYPE_ALL, flags, NULL) == 0)
addReply(c, shared.ok);
} }
/* FLUSHDB [SYNC|ASYNC] /* FLUSHDB [SYNC|ASYNC]
* *
* Flushes the currently SELECTed Redis DB. */ * Flushes the currently SELECTed Redis DB. */
void flushdbCommand(client *c) { void flushdbCommand(client *c) {
flushCommandCommon(c, 0); int flags;
if (getFlushCommandFlags(c,&flags) == C_ERR) return;
/* If FLUSH SYNC isn't running as blocking async, then reply */
if (flushCommandCommon(c, FLUSH_TYPE_DB,flags, NULL) == 0)
addReply(c, shared.ok);
} }
/* This command implements DEL and UNLINK. */ /* This command implements DEL and UNLINK. */
......
...@@ -3411,7 +3411,18 @@ int dbDelete(redisDb *db, robj *key); ...@@ -3411,7 +3411,18 @@ int dbDelete(redisDb *db, robj *key);
robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o); robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o);
robj *dbUnshareStringValueWithDictEntry(redisDb *db, robj *key, robj *o, dictEntry *de); robj *dbUnshareStringValueWithDictEntry(redisDb *db, robj *key, robj *o, dictEntry *de);
#define FLUSH_TYPE_ALL 0
#define FLUSH_TYPE_DB 1
#define FLUSH_TYPE_SLOTS 2
typedef struct SlotRange {
unsigned short first, last;
} SlotRange;
typedef struct SlotsFlush {
int numRanges;
SlotRange ranges[];
} SlotsFlush;
void replySlotsFlushAndFree(client *c, SlotsFlush *sflush);
int flushCommandCommon(client *c, int type, int flags, SlotsFlush *sflush);
#define EMPTYDB_NO_FLAGS 0 /* No flags. */ #define EMPTYDB_NO_FLAGS 0 /* No flags. */
#define EMPTYDB_ASYNC (1<<0) /* Reclaim memory in another thread. */ #define EMPTYDB_ASYNC (1<<0) /* Reclaim memory in another thread. */
#define EMPTYDB_NOFUNCTIONS (1<<1) /* Indicate not to flush the functions. */ #define EMPTYDB_NOFUNCTIONS (1<<1) /* Indicate not to flush the functions. */
...@@ -3782,6 +3793,7 @@ void migrateCommand(client *c); ...@@ -3782,6 +3793,7 @@ void migrateCommand(client *c);
void askingCommand(client *c); void askingCommand(client *c);
void readonlyCommand(client *c); void readonlyCommand(client *c);
void readwriteCommand(client *c); void readwriteCommand(client *c);
void sflushCommand(client *c);
int verifyDumpPayload(unsigned char *p, size_t len, uint16_t *rdbver_ptr); int verifyDumpPayload(unsigned char *p, size_t len, uint16_t *rdbver_ptr);
void dumpCommand(client *c); void dumpCommand(client *c);
void objectCommand(client *c); void objectCommand(client *c);
......
...@@ -13,6 +13,72 @@ test "Cluster should start ok" { ...@@ -13,6 +13,72 @@ test "Cluster should start ok" {
set master1 [Rn 0] set master1 [Rn 0]
set master2 [Rn 1] set master2 [Rn 1]
test "SFLUSH - Errors and output validation" {
assert_match "* 0-8191*" [$master1 CLUSTER NODES]
assert_match "* 8192-16383*" [$master2 CLUSTER NODES]
assert_match "*0 8191*" [$master1 CLUSTER SLOTS]
assert_match "*8192 16383*" [$master2 CLUSTER SLOTS]
# make master1 non-continuous slots
$master1 cluster DELSLOTSRANGE 1000 2000
# Test SFLUSH errors validation
assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4}
assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4 SYNC}
assert_error {ERR Invalid or out of range slot} {$master1 SFLUSH x 4}
assert_error {ERR Invalid or out of range slot} {$master1 SFLUSH 0 12x}
assert_error {ERR Slot 3 specified multiple times} {$master1 SFLUSH 2 4 3 5}
assert_error {ERR start slot number 8 is greater than*} {$master1 SFLUSH 8 4}
assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4 8 10}
assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 0 999 2001 8191 ASYNCX}
# Test SFLUSH output validation
assert_match "" [$master1 SFLUSH 2 4]
assert_match "" [$master1 SFLUSH 0 4]
assert_match "" [$master2 SFLUSH 0 4]
assert_match "" [$master1 SFLUSH 1 8191]
assert_match "" [$master1 SFLUSH 0 8190]
assert_match "" [$master1 SFLUSH 0 998 2001 8191]
assert_match "" [$master1 SFLUSH 1 999 2001 8191]
assert_match "" [$master1 SFLUSH 0 999 2001 8190]
assert_match "" [$master1 SFLUSH 0 999 2002 8191]
assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 999 2001 8191]
assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 8191]
assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 4000 4001 8191]
assert_match "" [$master2 SFLUSH 8193 16383]
assert_match "" [$master2 SFLUSH 8192 16382]
assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383]
assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383 SYNC]
assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383 ASYNC]
assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383]
assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383 SYNC]
assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383 ASYNC]
# restore master1 continuous slots
$master1 cluster ADDSLOTSRANGE 1000 2000
}
test "SFLUSH - Deletes the keys with argument <NONE>/SYNC/ASYNC" {
foreach op {"" "SYNC" "ASYNC"} {
for {set i 0} {$i < 100} {incr i} {
catch {$master1 SET key$i val$i}
catch {$master2 SET key$i val$i}
}
assert {[$master1 DBSIZE] > 0}
assert {[$master2 DBSIZE] > 0}
if {$op eq ""} {
assert_match "{0 8191}" [ $master1 SFLUSH 0 8191]
} else {
assert_match "{0 8191}" [ $master1 SFLUSH 0 8191 $op]
}
assert {[$master1 DBSIZE] == 0}
assert {[$master2 DBSIZE] > 0}
assert_match "{8192 16383}" [ $master2 SFLUSH 8192 16383]
assert {[$master2 DBSIZE] == 0}
}
}
test "Continuous slots distribution" { test "Continuous slots distribution" {
assert_match "* 0-8191*" [$master1 CLUSTER NODES] assert_match "* 0-8191*" [$master1 CLUSTER NODES]
assert_match "* 8192-16383*" [$master2 CLUSTER NODES] assert_match "* 8192-16383*" [$master2 CLUSTER NODES]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment