Unverified Commit 8cd62f82 authored by guybe7's avatar guybe7 Committed by GitHub
Browse files

Refactor the per-slot dict-array db.c into a new kvstore data structure (#12822)

# Description
Gather most of the scattered `redisDb`-related code from the per-slot
dict PR (#11695) and turn it to a new data structure, `kvstore`. i.e.
it's a class that represents an array of dictionaries.

# Motivation
The main motivation is code cleanliness, the idea of using an array of
dictionaries is very well-suited to becoming a self-contained data
structure.
This allowed cleaning some ugly code, among others: loops that run twice
on the main dict and expires dict, and duplicate code for allocating and
releasing this data structure.

# Notes
1. This PR reverts the part of https://github.com/redis/redis/pull/12848
where the `rehashing` list is global (handling rehashing `dict`s is
under the responsibility of `kvstore`, and should not be managed by the
server)
2. This PR also replaces the type of `server.pubsubshard_channels` from
`dict**` to `kvstore` (original PR:
https://github.com/redis/redis/pull/12804). After that was done,
server.pubsub_channels was also chosen to be a `kvstore` (with only one
`dict`, which seems odd) just to make the code cleaner by making it the
same type as `server.pubsubshard_channels`, see
`pubsubtype.serverPubSubChannels`
3. the keys and expires kvstores are currenlty configured to allocate
the individual dicts only when the first key is added (unlike before, in
which they allocated them in advance), but they won't release them when
the last key is deleted.

Worth mentioning that due to the recent change the reply of DEBUG
HTSTATS changed, in case no keys were ever added to the db.

before:
```
127.0.0.1:6379> DEBUG htstats 9
[Dictionary HT]
Hash table 0 stats (main hash table):
No stats available for empty dictionaries
[Expires HT]
Hash table 0 stats (main hash table):
No stats available for empty dictionaries
```

after:
```
127.0.0.1:6379> DEBUG htstats 9
[Dictionary HT]
[Expires HT]
```
parent f20774ec
......@@ -436,63 +436,6 @@ int dictResizeAllowed(size_t moreMem, double usedRatio) {
}
}
/* Adds dictionary to the rehashing list, which allows us
* to quickly find rehash targets during incremental rehashing.
*
* Updates the bucket count in cluster-mode for the given dictionary in a DB, bucket count
* incremented with the new ht size during the rehashing phase. In non-cluster mode,
* bucket count can be retrieved directly from single dict bucket. */
void dictRehashingStarted(dict *d, dbKeyType keyType) {
dbDictMetadata *metadata = (dbDictMetadata *)dictMetadata(d);
listAddNodeTail(server.rehashing, d);
metadata->rehashing_node = listLast(server.rehashing);
if (!server.cluster_enabled) return;
unsigned long long from, to;
dictRehashingInfo(d, &from, &to);
server.db[0].sub_dict[keyType].bucket_count += to; /* Started rehashing (Add the new ht size) */
}
/* Remove dictionary from the rehashing list.
*
* Updates the bucket count for the given dictionary in a DB. It removes
* the old ht size of the dictionary from the total sum of buckets for a DB. */
void dictRehashingCompleted(dict *d, dbKeyType keyType) {
dbDictMetadata *metadata = (dbDictMetadata *)dictMetadata(d);
if (metadata->rehashing_node) {
listDelNode(server.rehashing, metadata->rehashing_node);
metadata->rehashing_node = NULL;
}
if (!server.cluster_enabled) return;
unsigned long long from, to;
dictRehashingInfo(d, &from, &to);
server.db[0].sub_dict[keyType].bucket_count -= from; /* Finished rehashing (Remove the old ht size) */
}
void dbDictRehashingStarted(dict *d) {
dictRehashingStarted(d, DB_MAIN);
}
void dbDictRehashingCompleted(dict *d) {
dictRehashingCompleted(d, DB_MAIN);
}
void dbExpiresRehashingStarted(dict *d) {
dictRehashingStarted(d, DB_EXPIRES);
}
void dbExpiresRehashingCompleted(dict *d) {
dictRehashingCompleted(d, DB_EXPIRES);
}
/* Returns the size of the DB dict metadata in bytes. */
size_t dbDictMetadataSize(dict *d) {
UNUSED(d);
/* NOTICE: this also affects overhead_ht_main and overhead_ht_expires in getMemoryOverheadData. */
return sizeof(dbDictMetadata);
}
/* Generic hash table type where keys are Redis Objects, Values
* dummy pointers. */
dictType objectKeyPointerValueDictType = {
......@@ -524,6 +467,8 @@ dictType setDictType = {
NULL, /* val dup */
dictSdsKeyCompare, /* key compare */
dictSdsDestructor, /* key destructor */
NULL, /* val destructor */
NULL, /* allow to expand */
.no_value = 1, /* no values in this dict */
.keys_are_odd = 1 /* an SDS string is always an odd pointer */
};
......@@ -536,7 +481,7 @@ dictType zsetDictType = {
dictSdsKeyCompare, /* key compare */
NULL, /* Note: SDS string shared & freed by skiplist */
NULL, /* val destructor */
NULL /* allow to expand */
NULL, /* allow to expand */
};
/* Db->dict, keys are sds strings, vals are Redis objects. */
......@@ -548,9 +493,6 @@ dictType dbDictType = {
dictSdsDestructor, /* key destructor */
dictObjectDestructor, /* val destructor */
dictResizeAllowed, /* allow to resize */
dbDictRehashingStarted,
dbDictRehashingCompleted,
dbDictMetadataSize,
};
/* Db->expires */
......@@ -562,9 +504,6 @@ dictType dbExpiresDictType = {
NULL, /* key destructor */
NULL, /* val destructor */
dictResizeAllowed, /* allow to resize */
dbExpiresRehashingStarted,
dbExpiresRehashingCompleted,
dbDictMetadataSize,
};
/* Command table. sds string -> command struct pointer. */
......@@ -586,7 +525,7 @@ dictType hashDictType = {
dictSdsKeyCompare, /* key compare */
dictSdsDestructor, /* key destructor */
dictSdsDestructor, /* val destructor */
NULL /* allow to expand */
NULL, /* allow to expand */
};
/* Dict type without destructor */
......@@ -693,53 +632,6 @@ dictType clientDictType = {
.no_value = 1 /* no values in this dict */
};
/* In cluster-enabled setup, this method traverses through all main/expires dictionaries (CLUSTER_SLOTS)
* and triggers a resize if the percentage of used buckets in the HT reaches (100 / HASHTABLE_MIN_FILL)
* we shrink the hash table to save memory, or expand the hash when the percentage of used buckets reached
* 100.
*
* In non cluster-enabled setup, it resize main/expires dictionary based on the same condition described above. */
void tryResizeHashTables(int dbid) {
redisDb *db = &server.db[dbid];
int dicts_per_call = min(CRON_DICTS_PER_DB, db->dict_count);
for (dbKeyType subdict = DB_MAIN; subdict <= DB_EXPIRES; subdict++) {
for (int i = 0; i < dicts_per_call; i++) {
int slot = db->sub_dict[subdict].resize_cursor;
dict *d = (subdict == DB_MAIN ? db->dict[slot] : db->expires[slot]);
if (dictShrinkIfNeeded(d) == DICT_ERR) {
dictExpandIfNeeded(d);
}
db->sub_dict[subdict].resize_cursor = (slot + 1) % db->dict_count;
}
}
}
/* Our hash table implementation performs rehashing incrementally while
* we write/read from the hash table. Still if the server is idle, the hash
* table will use two tables for a long time. So we try to use 1 millisecond
* of CPU time at every call of this function to perform some rehashing.
*
* The function returns 1 if some rehashing was performed, otherwise 0
* is returned. */
int incrementallyRehash(void) {
if (listLength(server.rehashing) == 0) return 0;
serverLog(LL_DEBUG,"Rehashing list length: %lu", listLength(server.rehashing));
/* Our goal is to rehash as many dictionaries as we can before reaching predefined threshold,
* after each dictionary completes rehashing, it removes itself from the list. */
listNode *node;
monotime timer;
elapsedStart(&timer);
while ((node = listFirst(server.rehashing))) {
uint64_t elapsed_us = elapsedUs(timer);
if (elapsed_us >= INCREMENTAL_REHASHING_THRESHOLD_US) {
break; /* Reached the time limit. */
}
dictRehashMicroseconds(listNodeValue(node), INCREMENTAL_REHASHING_THRESHOLD_US - elapsed_us);
}
return 1;
}
/* This function is called once a background process of some kind terminates,
* as we want to avoid resizing the hash tables when there is a child in order
* to play well with copy-on-write (otherwise when a resize happens lots of
......@@ -1179,21 +1071,33 @@ void databasesCron(void) {
* DB we'll be able to start from the successive in the next
* cron loop iteration. */
static unsigned int resize_db = 0;
static unsigned int rehash_db = 0;
int dbs_per_call = CRON_DBS_PER_CALL;
int j;
/* Don't test more DBs than we have. */
if (dbs_per_call > server.dbnum) dbs_per_call = server.dbnum;
/* Resize */
for (j = 0; j < dbs_per_call; j++) {
tryResizeHashTables(resize_db % server.dbnum);
redisDb *db = &server.db[resize_db % server.dbnum];
kvstoreTryResizeDicts(db->keys, CRON_DICTS_PER_DB);
kvstoreTryResizeDicts(db->expires, CRON_DICTS_PER_DB);
resize_db++;
}
/* Rehash */
if (server.activerehashing) {
incrementallyRehash();
uint64_t elapsed_us = 0;
for (j = 0; j < dbs_per_call; j++) {
redisDb *db = &server.db[rehash_db % server.dbnum];
elapsed_us += kvstoreIncrementallyRehash(db->keys, INCREMENTAL_REHASHING_THRESHOLD_US);
if (elapsed_us >= INCREMENTAL_REHASHING_THRESHOLD_US)
break;
elapsed_us += kvstoreIncrementallyRehash(db->expires, INCREMENTAL_REHASHING_THRESHOLD_US);
if (elapsed_us >= INCREMENTAL_REHASHING_THRESHOLD_US)
break;
rehash_db++;
}
}
}
}
......@@ -1449,9 +1353,9 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
for (j = 0; j < server.dbnum; j++) {
long long size, used, vkeys;
size = dbBuckets(&server.db[j], DB_MAIN);
used = dbSize(&server.db[j], DB_MAIN);
vkeys = dbSize(&server.db[j], DB_EXPIRES);
size = kvstoreBuckets(server.db[j].keys);
used = kvstoreSize(server.db[j].keys);
vkeys = kvstoreSize(server.db[j].expires);
if (used || vkeys) {
serverLog(LL_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size);
}
......@@ -2669,17 +2573,6 @@ void makeThreadKillable(void) {
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
}
/* When adding fields, please check the initTempDb related logic. */
void initDbState(redisDb *db){
for (dbKeyType subdict = DB_MAIN; subdict <= DB_EXPIRES; subdict++) {
db->sub_dict[subdict].non_empty_slots = 0;
db->sub_dict[subdict].key_count = 0;
db->sub_dict[subdict].resize_cursor = 0;
db->sub_dict[subdict].slot_size_index = server.cluster_enabled ? zcalloc(sizeof(unsigned long long) * (CLUSTER_SLOTS + 1)) : NULL;
db->sub_dict[subdict].bucket_count = 0;
}
}
void initServer(void) {
int j;
......@@ -2755,10 +2648,10 @@ void initServer(void) {
server.db = zmalloc(sizeof(redisDb)*server.dbnum);
/* Create the Redis databases, and initialize other internal state. */
int slot_count = (server.cluster_enabled) ? CLUSTER_SLOTS : 1;
for (j = 0; j < server.dbnum; j++) {
server.db[j].dict = dictCreateMultiple(&dbDictType, slot_count);
server.db[j].expires = dictCreateMultiple(&dbExpiresDictType,slot_count);
int slot_count_bits = (server.cluster_enabled) ? CLUSTER_SLOT_MASK_BITS : 0;
for (j = 0; j < server.dbnum; j++) {
server.db[j].keys = kvstoreCreate(&dbDictType, slot_count_bits, KVSTORE_ALLOCATE_DICTS_ON_DEMAND);
server.db[j].expires = kvstoreCreate(&dbExpiresDictType, slot_count_bits, KVSTORE_ALLOCATE_DICTS_ON_DEMAND);
server.db[j].expires_cursor = 0;
server.db[j].blocking_keys = dictCreate(&keylistDictType);
server.db[j].blocking_keys_unblock_on_nokey = dictCreate(&objectKeyPointerValueDictType);
......@@ -2767,16 +2660,15 @@ void initServer(void) {
server.db[j].id = j;
server.db[j].avg_ttl = 0;
server.db[j].defrag_later = listCreate();
server.db[j].dict_count = slot_count;
initDbState(&server.db[j]);
listSetFreeMethod(server.db[j].defrag_later,(void (*)(void*))sdsfree);
}
server.rehashing = listCreate();
evictionPoolAlloc(); /* Initialize the LRU keys pool. */
server.pubsub_channels = dictCreate(&objToDictDictType);
/* Note that server.pubsub_channels was chosen to be a kvstore (with only one dict, which
* seems odd) just to make the code cleaner by making it be the same type as server.pubsubshard_channels
* (which has to be kvstore), see pubsubtype.serverPubSubChannels */
server.pubsub_channels = kvstoreCreate(&objToDictDictType, 0, KVSTORE_ALLOCATE_DICTS_ON_DEMAND);
server.pubsub_patterns = dictCreate(&objToDictDictType);
server.pubsubshard_channels = zcalloc(sizeof(dict *) * slot_count);
server.shard_channel_count = 0;
server.pubsubshard_channels = kvstoreCreate(&objToDictDictType, slot_count_bits, KVSTORE_ALLOCATE_DICTS_ON_DEMAND | KVSTORE_FREE_EMPTY_DICTS);
server.pubsub_clients = 0;
server.cronloops = 0;
server.in_exec = 0;
......@@ -5906,9 +5798,9 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
"current_eviction_exceeded_time:%lld\r\n", current_eviction_exceeded_time / 1000,
"keyspace_hits:%lld\r\n", server.stat_keyspace_hits,
"keyspace_misses:%lld\r\n", server.stat_keyspace_misses,
"pubsub_channels:%ld\r\n", dictSize(server.pubsub_channels),
"pubsub_channels:%llu\r\n", kvstoreSize(server.pubsub_channels),
"pubsub_patterns:%lu\r\n", dictSize(server.pubsub_patterns),
"pubsubshard_channels:%llu\r\n", server.shard_channel_count,
"pubsubshard_channels:%llu\r\n", kvstoreSize(server.pubsubshard_channels),
"latest_fork_usec:%lld\r\n", server.stat_fork_time,
"total_forks:%lld\r\n", server.stat_total_forks,
"migrate_cached_sockets:%ld\r\n", dictSize(server.migrate_cached_sockets),
......@@ -6135,8 +6027,8 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
for (j = 0; j < server.dbnum; j++) {
long long keys, vkeys;
keys = dbSize(&server.db[j], DB_MAIN);
vkeys = dbSize(&server.db[j], DB_EXPIRES);
keys = kvstoreSize(server.db[j].keys);
vkeys = kvstoreSize(server.db[j].expires);
if (keys || vkeys) {
info = sdscatprintf(info,
"db%d:keys=%lld,expires=%lld,avg_ttl=%lld\r\n",
......
......@@ -67,6 +67,7 @@ typedef long long ustime_t; /* microsecond time type. */
#include "ae.h" /* Event driven programming library */
#include "sds.h" /* Dynamic safe strings */
#include "dict.h" /* Hash tables */
#include "kvstore.h" /* Slot-based hash table */
#include "adlist.h" /* Linked lists */
#include "zmalloc.h" /* total memory usage aware version of malloc/free */
#include "anet.h" /* Networking the easy way */
......@@ -970,31 +971,12 @@ typedef struct replBufBlock {
char buf[];
} replBufBlock;
/* When adding fields, please check the swap db related logic. */
typedef struct dbDictState {
int resize_cursor; /* Cron job uses this cursor to gradually resize all dictionaries. */
int non_empty_slots; /* The number of non-empty slots. */
unsigned long long key_count; /* Total number of keys in this DB. */
unsigned long long bucket_count; /* Total number of buckets in this DB across dictionaries (only used for cluster-enabled). */
unsigned long long *slot_size_index; /* Binary indexed tree (BIT) that describes cumulative key frequencies up until given slot. */
} dbDictState;
typedef enum dbKeyType {
DB_MAIN,
DB_EXPIRES
} dbKeyType;
/* Dict metadata for database, used for record the position in rehashing list. */
typedef struct dbDictMetadata {
listNode *rehashing_node; /* list node in rehashing list */
} dbDictMetadata;
/* Redis database representation. There are multiple databases identified
* by integers from 0 (the default database) up to the max configured
* database. The database number is the 'id' field in the structure. */
typedef struct redisDb {
dict **dict; /* The keyspace for this DB */
dict **expires; /* Timeout of keys with a timeout set */
kvstore *keys; /* The keyspace for this DB */
kvstore *expires; /* Timeout of keys with a timeout set */
dict *blocking_keys; /* Keys with clients waiting for data (BLPOP)*/
dict *blocking_keys_unblock_on_nokey; /* Keys with clients waiting for
* data, and should be unblocked if key is deleted (XREADEDGROUP).
......@@ -1005,8 +987,6 @@ typedef struct redisDb {
long long avg_ttl; /* Average TTL, just for stats */
unsigned long expires_cursor; /* Cursor of the active expire cycle. */
list *defrag_later; /* List of key names to attempt to defrag one by one, gradually. */
int dict_count; /* Indicates total number of dictionaries owned by this DB, 1 dict per slot in cluster mode. */
dbDictState sub_dict[2]; /* Metadata for main and expires dictionaries */
} redisDb;
/* forward declaration for functions ctx */
......@@ -1574,7 +1554,6 @@ struct redisServer {
int hz; /* serverCron() calls frequency in hertz */
int in_fork_child; /* indication that this is a fork child */
redisDb *db;
list *rehashing; /* List of dictionaries in DBs that are currently rehashing. */
dict *commands; /* Command table */
dict *orig_commands; /* Command table before command renaming. */
aeEventLoop *el;
......@@ -1994,12 +1973,11 @@ struct redisServer {
size_t blocking_op_nesting; /* Nesting level of blocking operation, used to reset blocked_last_cron. */
long long blocked_last_cron; /* Indicate the mstime of the last time we did cron jobs from a blocking operation */
/* Pubsub */
dict *pubsub_channels; /* Map channels to list of subscribed clients */
kvstore *pubsub_channels; /* Map channels to list of subscribed clients */
dict *pubsub_patterns; /* A dict of pubsub_patterns */
int notify_keyspace_events; /* Events to propagate via Pub/Sub. This is an
xor of NOTIFY_... flags. */
dict **pubsubshard_channels; /* Map shard channels in every slot to list of subscribed clients */
unsigned long long shard_channel_count;
kvstore *pubsubshard_channels; /* Map shard channels in every slot to list of subscribed clients */
unsigned int pubsub_clients; /* # of clients in Pub/Sub mode */
/* Cluster */
int cluster_enabled; /* Is cluster enabled? */
......@@ -2445,20 +2423,6 @@ typedef struct {
unsigned char *lpi; /* listpack iterator */
} setTypeIterator;
typedef struct dbIterator dbIterator;
/* DB iterator specific functions */
dbIterator *dbIteratorInit(redisDb *db, dbKeyType keyType);
void dbReleaseIterator(dbIterator *dbit);
dict *dbIteratorNextDict(dbIterator *dbit);
dict *dbGetDictFromIterator(dbIterator *dbit);
int dbIteratorGetCurrentSlot(dbIterator *dbit);
dictEntry *dbIteratorNext(dbIterator *iter);
/* SCAN specific commands for easy cursor manipulation, shared between main code and modules. */
int getAndClearSlotIdFromCursor(unsigned long long *cursor);
void addSlotIdToCursor(int slot, unsigned long long *cursor);
/* Structure to hold hash iteration abstraction. Note that iteration over
* hashes involves both fields and values. Because it is possible that
* not both are required, store pointers in the iterator to avoid
......@@ -3143,21 +3107,16 @@ void dismissMemoryInChild(void);
#define RESTART_SERVER_GRACEFULLY (1<<0) /* Do proper shutdown. */
#define RESTART_SERVER_CONFIG_REWRITE (1<<1) /* CONFIG REWRITE before restart.*/
int restartServer(int flags, mstime_t delay);
unsigned long long int dbSize(redisDb *db, dbKeyType keyType);
int dbNonEmptySlots(redisDb *db, dbKeyType keyType);
int getKeySlot(sds key);
int calculateKeySlot(sds key);
unsigned long dbBuckets(redisDb *db, dbKeyType keyType);
size_t dbMemUsage(redisDb *db, dbKeyType keyType);
dictEntry *dbFind(redisDb *db, void *key, dbKeyType keyType);
unsigned long long dbScan(redisDb *db, dbKeyType keyType, unsigned long long cursor,
int onlyslot, dictScanFunction *fn,
int (dictScanValidFunction)(dict *d), void *privdata);
int dbExpand(const redisDb *db, uint64_t db_size, dbKeyType keyType, int try_expand);
unsigned long long cumulativeKeyCountRead(redisDb *db, int idx, dbKeyType keyType);
int getFairRandomSlot(redisDb *db, dbKeyType keyType);
int dbGetNextNonEmptySlot(redisDb *db, int slot, dbKeyType keyType);
int findSlotByKeyIndex(redisDb *db, unsigned long target, dbKeyType keyType);
/* kvstore wrappers */
int dbExpand(redisDb *db, uint64_t db_size, int try_expand);
int dbExpandExpires(redisDb *db, uint64_t db_size, int try_expand);
dictEntry *dbFind(redisDb *db, void *key);
dictEntry *dbFindExpires(redisDb *db, void *key);
unsigned long long dbSize(redisDb *db);
unsigned long long dbScan(redisDb *db, unsigned long long cursor, dictScanFunction *scan_cb, void *privdata);
/* Set data type */
robj *setTypeCreate(sds value, size_t size_hint);
......@@ -3214,6 +3173,7 @@ int serverPubsubSubscriptionCount(void);
int serverPubsubShardSubscriptionCount(void);
size_t pubsubMemOverhead(client *c);
void unmarkClientAsPubSub(client *c);
int pubsubTotalSubscriptions(void);
/* Keyspace events notification */
void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid);
......
......@@ -527,9 +527,9 @@ start_server {tags {"other external:skip"}} {
# Set a key to enable overhead display of db 0
r set a b
# The dict containing 128 keys must have expanded,
# its hash table itself takes a lot more than 200 bytes
# its hash table itself takes a lot more than 400 bytes
wait_for_condition 100 50 {
[get_overhead_hashtable_main] < 200
[get_overhead_hashtable_main] < 400
} else {
fail "dict did not resize in time"
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment