Commit 6a10146f authored by Vitaly Arbuzov's avatar Vitaly Arbuzov
Browse files

Refactor rdb.c to use db iterator to iterate over keys

parent 41fab4ec
......@@ -1293,7 +1293,6 @@ werr:
}
ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
dictIterator *di = NULL;
dictEntry *de;
ssize_t written = 0;
ssize_t res;
......@@ -1320,61 +1319,52 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
if ((res = rdbSaveLen(rdb,expires_size)) < 0) goto werr;
written += res;
dict *d;
dbIterator dbit;
dbIteratorInit(&dbit, db);
while ((d = dbIteratorNextDict(&dbit))) {
if (!dictSize(d)) continue;
int last_slot = -1;
/* Iterate this DB writing every entry */
while ((de = dbIteratorNext(&dbit)) != NULL) {
/* Save slot info. */
if (server.cluster_enabled) {
if (server.cluster_enabled && dbit.cur_slot != last_slot) {
serverAssert(dbit.cur_slot >= 0 && dbit.cur_slot < CLUSTER_SLOTS);
if ((res = rdbSaveType(rdb, RDB_OPCODE_SLOT_INFO)) < 0) goto werr;
written += res;
if ((res = rdbSaveLen(rdb, dbit.cur_slot)) < 0) goto werr;
written += res;
if ((res = rdbSaveLen(rdb, dictSize(d))) < 0) goto werr;
if ((res = rdbSaveLen(rdb, dictSize(db->dict[dbit.cur_slot]))) < 0) goto werr;
written += res;
last_slot = dbit.cur_slot;
}
sds keystr = dictGetKey(de);
robj key, *o = dictGetVal(de);
long long expire;
size_t rdb_bytes_before_key = rdb->processed_bytes;
di = dictGetSafeIterator(d);
/* Iterate this DB writing every entry */
while ((de = dictNext(di)) != NULL) {
sds keystr = dictGetKey(de);
robj key, *o = dictGetVal(de);
long long expire;
size_t rdb_bytes_before_key = rdb->processed_bytes;
initStaticStringObject(key, keystr);
expire = getExpire(db, &key);
if ((res = rdbSaveKeyValuePair(rdb, &key, o, expire, dbid)) < 0) goto werr;
written += res;
initStaticStringObject(key, keystr);
expire = getExpire(db, &key);
if ((res = rdbSaveKeyValuePair(rdb, &key, o, expire, dbid)) < 0) goto werr;
written += res;
/* In fork child process, we can try to release memory back to the
* OS and possibly avoid or decrease COW. We give the dismiss
* mechanism a hint about an estimated size of the object we stored. */
size_t dump_size = rdb->processed_bytes - rdb_bytes_before_key;
if (server.in_fork_child) dismissObject(o, dump_size);
/* In fork child process, we can try to release memory back to the
* OS and possibly avoid or decrease COW. We give the dismiss
* mechanism a hint about an estimated size of the object we stored. */
size_t dump_size = rdb->processed_bytes - rdb_bytes_before_key;
if (server.in_fork_child) dismissObject(o, dump_size);
/* Update child info every 1 second (approximately).
* in order to avoid calling mstime() on each iteration, we will
* check the diff every 1024 keys */
if (((*key_counter)++ & 1023) == 0) {
long long now = mstime();
if (now - info_updated_time >= 1000) {
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, *key_counter, pname);
info_updated_time = now;
}
/* Update child info every 1 second (approximately).
* in order to avoid calling mstime() on each iteration, we will
* check the diff every 1024 keys */
if (((*key_counter)++ & 1023) == 0) {
long long now = mstime();
if (now - info_updated_time >= 1000) {
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, *key_counter, pname);
info_updated_time = now;
}
}
dictReleaseIterator(di);
di = NULL;
}
return written;
werr:
if (di) dictReleaseIterator(di);
return -1;
}
......
......@@ -451,7 +451,7 @@ static int scriptVerifyClusterState(scriptRunCtx *run_ctx, client *c, client *or
original_c->slot = hashslot;
} else if (original_c->slot != hashslot) {
*err = sdsnew("Script attempted to access keys that do not hash to "
"the same slot");
"the same slot");
return C_ERR;
}
}
......
......@@ -398,7 +398,7 @@ int dictExpandAllowed(size_t moreMem, double usedRatio) {
void dictRehashingStarted(dict *d) {
if (!server.cluster_enabled || !server.activerehashing) return;
/* Safety check against queue overflow. */
if (listLength(server.db[0].rehashing) > INCREMENTAL_REHASHING_MAX_QUEUE_SIZE) return;
if (listLength(server.db[0].rehashing) > CLUSTER_SLOTS) return;
listAddNodeTail(server.db[0].rehashing, d);
}
......@@ -4149,6 +4149,7 @@ int processCommand(client *c) {
blockPostponeClient(c);
return C_OK;
}
/* Exec the command */
if (c->flags & CLIENT_MULTI &&
c->cmd->proc != execCommand &&
......@@ -4167,6 +4168,7 @@ int processCommand(client *c) {
if (listLength(server.ready_keys))
handleClientsBlockedOnKeys();
}
return C_OK;
}
......
......@@ -138,7 +138,6 @@ typedef struct redisObject robj;
#define CONFIG_BINDADDR_MAX 16
#define CONFIG_MIN_RESERVED_FDS 32
#define CONFIG_DEFAULT_PROC_TITLE_TEMPLATE "{title} {listen-addr} {server-mode}"
#define INCREMENTAL_REHASHING_MAX_QUEUE_SIZE (1024*16)
#define INCREMENTAL_REHASHING_THRESHOLD_MS 1
/* Bucket sizes for client eviction pools. Each bucket stores clients with
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment