Commit 923c6c19 authored by Vitaly Arbuzov's avatar Vitaly Arbuzov
Browse files

Put DB iterator on stack

parent 88bc7687
......@@ -2239,8 +2239,9 @@ int rewriteAppendOnlyFileRio(rio *aof) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db + j;
dict *d;
dbIterator *dbit = dbGetIterator(db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, db);
while ((d = dbNextDict(&dbit))) {
if (dictSize(d) == 0) continue;
di = dictGetSafeIterator(d);
......@@ -2317,7 +2318,6 @@ int rewriteAppendOnlyFileRio(rio *aof) {
dictReleaseIterator(di);
di = NULL;
}
dbReleaseIterator(dbit);
}
return C_OK;
......
......@@ -49,10 +49,10 @@ int expireIfNeeded(redisDb *db, robj *key, int flags);
int keyIsExpired(redisDb *db, robj *key);
/* Returns next dictionary from the iterator, or NULL if iteration is complete. */
dict *dbNextDict(dbIterator *iter) {
while (iter->index < iter->db->dict_count - 1) {
iter->index++;
dict *d = iter->db->dict[iter->index];
dict *dbNextDict(dbIterator *dbit) {
while (dbit->index < dbit->db->dict_count - 1) {
dbit->index++;
dict *d = dbit->db->dict[dbit->index];
/* There is a single dictionary in non cluster mode,
* in cluster mode return first non-empty sub-dictionary. */
if (!server.cluster_enabled || dictSize(d) > 0) return d;
......@@ -62,10 +62,8 @@ dict *dbNextDict(dbIterator *iter) {
void dbInitIteratorAt(dbIterator *dbit, redisDb *db, int slot) {
serverAssert(slot == 0 || server.cluster_enabled);
dbIterator *iter = zmalloc(sizeof(*iter));
iter->db = db;
iter->index = slot - 1; /* Start one slot ahead, as dbNextDict increments index right away. */
return iter;
dbit->db = db;
dbit->index = slot - 1; /* Start one slot ahead, as dbNextDict increments index right away. */
}
/* Returns DB iterator that can be used to iterate through sub-dictionaries.
......@@ -78,22 +76,18 @@ void dbInitIterator(dbIterator *dbit, redisDb *db) {
/* Returns next dictionary strictly after provided slot and updates slot id in the supplied reference. */
dict *dbGetNextUnvisitedSlot(redisDb *db, int *slot) {
if (*slot < db->dict_count - 1) {
dbIterator *dbit = dbGetIteratorAt(db, *slot + 1); /* Scan on the current slot has already returned 0, find next non-empty dict. */
dict *dict = dbNextDict(dbit);
dbIterator dbit;
dbInitIteratorAt(&dbit, db, *slot + 1); /* Scan on the current slot has already returned 0, find next non-empty dict. */
dict *dict = dbNextDict(&dbit);
if (dict != NULL) {
*slot = dbit->index;
*slot = dbit.index;
return dict;
}
dbReleaseIterator(dbit);
}
*slot = -1;
return NULL;
}
void dbReleaseIterator(dbIterator *iter) {
zfree(iter);
}
/* Update LFU when an object is accessed.
* Firstly, decrement the counter if the decrement time is reached.
* Then logarithmically increment the counter, and update the access time. */
......@@ -832,8 +826,9 @@ void keysCommand(client *c) {
long numkeys = 0;
void *replylen = addReplyDeferredLen(c);
dict *d;
dbIterator *dbit = dbGetIterator(c->db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, c->db);
while ((d = dbNextDict(&dbit))) {
if (dictSize(d) == 0) continue;
di = dictGetSafeIterator(d);
allkeys = (pattern[0] == '*' && plen == 1);
......@@ -853,7 +848,6 @@ void keysCommand(client *c) {
break;}
dictReleaseIterator(di);
}
dbReleaseIterator(dbit);
setDeferredArrayLen(c,replylen,numkeys);
}
......@@ -1154,22 +1148,22 @@ void dbsizeCommand(client *c) {
unsigned long long int dbSize(redisDb *db) {
unsigned long long size = 0;
dict *d;
dbIterator *dbit = dbGetIterator(db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, db);
while ((d = dbNextDict(&dbit))) {
size += dictSize(d);
}
dbReleaseIterator(dbit);
return size;
}
unsigned long dbSlots(redisDb *db) {
unsigned long slots = 0;
dict *d;
dbIterator *dbit = dbGetIterator(db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, db);
while ((d = dbNextDict(&dbit))) {
slots += dictSlots(d);
}
dbReleaseIterator(dbit);
return slots;
}
......@@ -1843,12 +1837,12 @@ int expireIfNeeded(redisDb *db, robj *key, int flags) {
void expandDb(const redisDb *db, uint64_t db_size) {
if (server.cluster_enabled) {
dict *d;
dbIterator *dbit = dbGetIterator((redisDb *) db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, (redisDb *) db);
while ((d = dbNextDict(&dbit))) {
/* We don't exact number of keys that would fall into each slot, but we can approximate it, assuming even distribution. */
dictExpand(d, (db_size / server.cluster->myself->numslots));
}
dbReleaseIterator(dbit);
} else {
dictExpand(db->dict[0], db_size);
}
......@@ -2599,8 +2593,9 @@ void dbGetStats(char *buf, size_t bufsize, redisDb *db) {
dictStats *mainHtStats = NULL;
dictStats *rehashHtStats = NULL;
dict *d;
dbIterator *dbit = dbGetIterator(db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, db);
while ((d = dbNextDict(&dbit))) {
dictStats *stats = dictGetStatsHt(d, 0);
if (!mainHtStats) mainHtStats = stats;
else {
......@@ -2616,7 +2611,6 @@ void dbGetStats(char *buf, size_t bufsize, redisDb *db) {
}
}
}
dbReleaseIterator(dbit);
l = dictGetStatsMsg(buf, bufsize, mainHtStats);
dictFreeStats(mainHtStats);
buf += l;
......
......@@ -288,8 +288,9 @@ void computeDatasetDigest(unsigned char *final) {
redisDb *db = server.db+j;
int hasEntries = 0;
dict *d;
dbIterator *dbit = dbGetIterator(db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, db);
while ((d = dbNextDict(&dbit))) {
if (dictSize(d) == 0) continue;
hasEntries = 1;
di = dictGetSafeIterator(d);
......@@ -314,7 +315,6 @@ void computeDatasetDigest(unsigned char *final) {
}
dictReleaseIterator(di);
}
dbReleaseIterator(dbit);
if (hasEntries) {
/* hash the DB id, so the same dataset moved in a different DB will lead to a different digest */
aux = htonl(j);
......
......@@ -1101,7 +1101,7 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
/* Reallocate the dictEntry, key and value allocations in a bucket using the
* provided allocation functions in order to defrag them. */
static void dictDefragBucket(dict *d, dictEntry **bucketref, dictDefragFunctions *defragfns) {
static void dictDefragBucket(dictEntry **bucketref, dictDefragFunctions *defragfns) {
dictDefragAllocFunction *defragalloc = defragfns->defragAlloc;
dictDefragAllocFunction *defragkey = defragfns->defragKey;
dictDefragAllocFunction *defragval = defragfns->defragVal;
......
......@@ -1322,8 +1322,9 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
written += res;
dict *d;
dbIterator *dbit = dbGetIterator(db);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, db);
while ((d = dbNextDict(&dbit))) {
if (!dictSize(d)) continue;
di = dictGetSafeIterator(d);
/* Iterate this DB writing every entry */
......@@ -1359,7 +1360,6 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
dictReleaseIterator(di);
di = NULL;
}
dbReleaseIterator(dbit);
return written;
werr:
......
......@@ -589,12 +589,12 @@ int htNeedsResize(dict *dict) {
* we resize the hash table to save memory */
void tryResizeHashTables(int dbid) {
dict *d;
dbIterator *dbit = dbGetIterator(&server.db[dbid]);
while ((d = dbNextDict(dbit))) {
dbIterator dbit;
dbInitIterator(&dbit, &server.db[dbid]);
while ((d = dbNextDict(&dbit))) {
if (htNeedsResize(d))
dictResize(d);
}
dbReleaseIterator(dbit);
if (htNeedsResize(server.db[dbid].expires))
dictResize(server.db[dbid].expires);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment