Commit 8504bf18 authored by antirez's avatar antirez
Browse files

dict.c clustered buckets.

parent 560e6787
......@@ -434,7 +434,7 @@ void debugCommand(client *c) {
if (getLongFromObjectOrReply(c, c->argv[2], &keys, NULL) != C_OK)
return;
dictExpand(c->db->dict,keys);
dictExpandToOptimalSize(c->db->dict,keys);
for (j = 0; j < keys; j++) {
snprintf(buf,sizeof(buf),"%s:%lu",
(c->argc == 3) ? "key" : (char*)c->argv[3]->ptr, j);
......
This diff is collapsed.
......@@ -54,10 +54,11 @@ typedef struct dictEntry {
} v;
} dictEntry;
typedef struct dictEntrySlot {
unsigned long numentries;
dictEntry *entries;
} dictEntrySlot;
typedef struct dictEntryVector {
uint32_t used; /* Number of used entries. */
uint32_t free; /* Number of free entries (with key field = NULL). */
dictEntry entry[];
} dictEntryVector;
typedef struct dictType {
unsigned int (*hashFunction)(const void *key);
......@@ -71,7 +72,7 @@ typedef struct dictType {
/* This is our hash table structure. Every dictionary has two of this as we
* implement incremental rehashing, for the old to the new table. */
typedef struct dictht {
dictEntrySlot **table;
dictEntryVector **table;
unsigned long size;
unsigned long sizemask;
unsigned long used;
......@@ -93,7 +94,7 @@ typedef struct dictIterator {
dict *d;
long index;
int table, safe;
dictEntry *entry, *nextEntry;
long entry; /* Current entry position in the cluster. */
/* unsafe iterator fingerprint for misuse detection. */
long long fingerprint;
} dictIterator;
......@@ -153,6 +154,7 @@ typedef void (dictScanFunction)(void *privdata, const dictEntry *de);
/* API */
dict *dictCreate(dictType *type, void *privDataPtr);
int dictExpand(dict *d, unsigned long size);
int dictExpandToOptimalSize(dict *d, unsigned long entries);
int dictAdd(dict *d, void *key, void *val);
dictEntry *dictAddRaw(dict *d, void *key);
int dictReplace(dict *d, void *key, void *val);
......
......@@ -1086,7 +1086,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb) {
/* It's faster to expand the dict to the right size asap in order
* to avoid rehashing */
if (len > DICT_HT_INITIAL_SIZE)
dictExpand(o->ptr,len);
dictExpandToOptimalSize(o->ptr,len);
} else {
o = createIntsetObject();
}
......@@ -1105,7 +1105,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb) {
o->ptr = intsetAdd(o->ptr,llval,NULL);
} else {
setTypeConvert(o,OBJ_ENCODING_HT);
dictExpand(o->ptr,len);
dictExpandToOptimalSize(o->ptr,len);
}
}
......@@ -1452,8 +1452,8 @@ int rdbLoad(char *filename) {
goto eoferr;
if ((expires_size = rdbLoadLen(&rdb,NULL)) == RDB_LENERR)
goto eoferr;
dictExpand(db->dict,db_size);
dictExpand(db->expires,expires_size);
dictExpandToOptimalSize(db->dict,db_size);
dictExpandToOptimalSize(db->expires,expires_size);
continue; /* Read type again. */
} else if (type == RDB_OPCODE_AUX) {
/* AUX: generic string-string fields. Use to add state to RDB
......
......@@ -243,7 +243,7 @@ void setTypeConvert(robj *setobj, int enc) {
sds element;
/* Presize the dict to avoid rehashing */
dictExpand(d,intsetLen(setobj->ptr));
dictExpandToOptimalSize(d,intsetLen(setobj->ptr));
/* To add the elements we extract integers and create redis objects */
si = setTypeInitIterator(setobj);
......
......@@ -2268,7 +2268,7 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) {
if (setnum) {
/* Our union is at least as large as the largest set.
* Resize the dictionary ASAP to avoid useless rehashing. */
dictExpand(accumulator,zuiLength(&src[setnum-1]));
dictExpandToOptimalSize(accumulator,zuiLength(&src[setnum-1]));
}
/* Step 1: Create a dictionary of elements -> aggregated-scores
......@@ -2313,7 +2313,7 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) {
/* We now are aware of the final size of the resulting sorted set,
* let's resize the dictionary embedded inside the sorted set to the
* right size, in order to save rehashing time. */
dictExpand(dstzset->dict,dictSize(accumulator));
dictExpandToOptimalSize(dstzset->dict,dictSize(accumulator));
while((de = dictNext(di)) != NULL) {
sds ele = dictGetKey(de);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment