Commit cbdedf86 authored by Vitaly Arbuzov's avatar Vitaly Arbuzov
Browse files

Collect db wide stats for debug command

parent 6a49e97e
......@@ -72,6 +72,7 @@ void bugReportStart(void);
void printCrashReport(void);
void bugReportEnd(int killViaSignal, int sig);
void logStackTrace(void *eip, int uplevel);
void dbGetStats(char *buf, size_t bufsize, redisDb *db);
/* ================================= Debugging ============================== */
......@@ -898,7 +899,7 @@ NULL
}
stats = sdscatprintf(stats,"[Dictionary HT]\n");
// dictGetStats(buf,sizeof(buf),server.db[dbid].dict); FIXME (vitarb) get aggregated stats https://sim.amazon.com/issues/ELMO-63798
dbGetStats(buf,sizeof(buf),&server.db[dbid]);
stats = sdscat(stats,buf);
stats = sdscatprintf(stats,"[Expires HT]\n");
......
......@@ -46,6 +46,7 @@
#include "dict.h"
#include "zmalloc.h"
#include "redisassert.h"
#include "server.h"
/* Using dictEnableResize() / dictDisableResize() we make possible to disable
* resizing and rehashing of the hash table as needed. This is very important
......@@ -106,6 +107,18 @@ uint8_t *dictGetHashFunctionSeed(void) {
uint64_t siphash(const uint8_t *in, const size_t inlen, const uint8_t *k);
uint64_t siphash_nocase(const uint8_t *in, const size_t inlen, const uint8_t *k);
typedef struct dictStats {
int htidx;
unsigned long buckets;
unsigned long maxChainLen;
unsigned long totalChainLen;
unsigned long htSize;
unsigned long htUsed;
unsigned long *clvector;
} dictStats;
size_t _dictGetStatsMsg(char *buf, size_t bufsize, dictStats *stats);
uint64_t dictGenHashFunction(const void *key, size_t len) {
return siphash(key,len,dict_hash_function_seed);
}
......@@ -1505,43 +1518,62 @@ dictEntry *dictFindEntryByPtrAndHash(dict *d, const void *oldptr, uint64_t hash)
}
/* ------------------------------- Debugging ---------------------------------*/
#define DICT_STATS_VECTLEN 50
size_t _dictGetStatsHt(char *buf, size_t bufsize, dict *d, int htidx) {
unsigned long i, slots = 0, chainlen, maxchainlen = 0;
unsigned long totchainlen = 0;
unsigned long clvector[DICT_STATS_VECTLEN];
size_t l = 0;
void _dictFreeStats(dictStats *stats) {
zfree(stats->clvector);
zfree(stats);
}
if (d->ht_used[htidx] == 0) {
return snprintf(buf,bufsize,
"No stats available for empty dictionaries\n");
void _dictCombineStats(dictStats *from, dictStats *into) {
into->buckets += from->buckets;
into->maxChainLen = max(from->maxChainLen, into->maxChainLen);
into->totalChainLen += from->totalChainLen;
into->htSize += from->htSize;
into->htUsed += from->htUsed;
for (int i = 0; i < DICT_STATS_VECTLEN; i++) {
into->clvector[i] += from->clvector[i];
}
}
dictStats* _dictGetStatsHt(dict *d, int htidx) {
unsigned long *clvector = zcalloc(sizeof(unsigned long) * DICT_STATS_VECTLEN);
dictStats *stats = zcalloc(sizeof(dictStats));
stats->htidx = htidx;
stats->clvector = clvector;
stats->htSize = DICTHT_SIZE(d->ht_size_exp[htidx]);
stats->htUsed = d->ht_used[htidx];
/* Compute stats. */
for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0;
for (i = 0; i < DICTHT_SIZE(d->ht_size_exp[htidx]); i++) {
for (unsigned long i = 0; i < DICTHT_SIZE(d->ht_size_exp[htidx]); i++) {
dictEntry *he;
if (d->ht_table[htidx][i] == NULL) {
clvector[0]++;
continue;
}
slots++;
stats->buckets++;
/* For each hash entry on this slot... */
chainlen = 0;
unsigned long chainlen = 0;
he = d->ht_table[htidx][i];
while(he) {
chainlen++;
he = dictGetNext(he);
}
clvector[(chainlen < DICT_STATS_VECTLEN) ? chainlen : (DICT_STATS_VECTLEN-1)]++;
if (chainlen > maxchainlen) maxchainlen = chainlen;
totchainlen += chainlen;
if (chainlen > stats->maxChainLen) stats->maxChainLen = chainlen;
stats->totalChainLen += chainlen;
}
/* Generate human readable stats. */
l += snprintf(buf+l,bufsize-l,
return stats;
}
size_t _dictGetStatsMsg(char *buf, size_t bufsize, dictStats *stats) {/* Generate human readable stats. */
if (stats->htUsed == 0) {
return snprintf(buf,bufsize,
"No stats available for empty dictionaries\n");
}
size_t l = 0;
l += snprintf(buf+l, bufsize-l,
"Hash table %d stats (%s):\n"
" table size: %lu\n"
" number of elements: %lu\n"
......@@ -1550,16 +1582,16 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dict *d, int htidx) {
" avg chain length (counted): %.02f\n"
" avg chain length (computed): %.02f\n"
" Chain length distribution:\n",
htidx, (htidx == 0) ? "main hash table" : "rehashing target",
DICTHT_SIZE(d->ht_size_exp[htidx]), d->ht_used[htidx], slots, maxchainlen,
(float)totchainlen/slots, (float)d->ht_used[htidx]/slots);
stats->htidx, (stats->htidx == 0) ? "main hash table" : "rehashing target",
stats->htSize, stats->htUsed, stats->buckets, stats->maxChainLen,
(float)stats->totalChainLen / stats->buckets, (float)stats->htUsed / stats->buckets);
for (i = 0; i < DICT_STATS_VECTLEN-1; i++) {
if (clvector[i] == 0) continue;
for (unsigned long i = 0; i < DICT_STATS_VECTLEN-1; i++) {
if (stats->clvector[i] == 0) continue;
if (l >= bufsize) break;
l += snprintf(buf+l,bufsize-l,
" %ld: %ld (%.02f%%)\n",
i, clvector[i], ((float)clvector[i]/DICTHT_SIZE(d->ht_size_exp[htidx]))*100);
i, stats->clvector[i], ((float)stats->clvector[i]/stats->htSize)*100);
}
/* Unlike snprintf(), return the number of characters actually written. */
......@@ -1567,16 +1599,55 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dict *d, int htidx) {
return strlen(buf);
}
void dbGetStats(char *buf, size_t bufsize, redisDb *db) {
size_t l;
char *orig_buf = buf;
size_t orig_bufsize = bufsize;
dictStats *mainHtStats = NULL;
dictStats *rehashHtStats = NULL;
for (int i = 0; i < db->dict_count; i++) {
dictStats *stats = _dictGetStatsHt(db->dict[i], 0);
if (!mainHtStats) mainHtStats = stats;
else {
_dictCombineStats(stats, mainHtStats);
_dictFreeStats(stats);
}
if (dictIsRehashing(db->dict[i])) {
stats = _dictGetStatsHt(db->dict[i], 1);
if (!rehashHtStats) rehashHtStats = stats;
else {
_dictCombineStats(stats, rehashHtStats);
_dictFreeStats(stats);
}
}
}
l = _dictGetStatsMsg(buf, bufsize, mainHtStats);
_dictFreeStats(mainHtStats);
buf += l;
bufsize -= l;
if (rehashHtStats && bufsize > 0) {
_dictGetStatsMsg(buf, bufsize, rehashHtStats);
_dictFreeStats(rehashHtStats);
}
/* Make sure there is a NULL term at the end. */
if (orig_bufsize) orig_buf[orig_bufsize - 1] = '\0';
}
void dictGetStats(char *buf, size_t bufsize, dict *d) {
size_t l;
char *orig_buf = buf;
size_t orig_bufsize = bufsize;
l = _dictGetStatsHt(buf,bufsize,d,0);
dictStats *mainHtStats = _dictGetStatsHt(d, 0);
l = _dictGetStatsMsg(buf, bufsize, mainHtStats);
_dictFreeStats(mainHtStats);
buf += l;
bufsize -= l;
if (dictIsRehashing(d) && bufsize > 0) {
_dictGetStatsHt(buf,bufsize,d,1);
dictStats *rehashHtStats = _dictGetStatsHt(d, 1);
_dictGetStatsMsg(buf, bufsize, rehashHtStats);
_dictFreeStats(rehashHtStats);
}
/* Make sure there is a NULL term at the end. */
if (orig_bufsize) orig_buf[orig_bufsize-1] = '\0';
......
......@@ -3066,11 +3066,15 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin
goto eoferr;
if ((expires_size = rdbLoadLen(rdb,NULL)) == RDB_LENERR)
goto eoferr;
if (server.cluster_enabled) {
clusterNode *myself = server.cluster->myself;
for (int i = 0; i < db->dict_count; i++) {
/* We don't exact number of keys that would fall into each slot, but we can approximate it, assuming even distribution. */
if (clusterNodeGetSlotBit(myself, i)) dictExpand(db->dict[i], (db_size / myself->numslots));
}
} else {
dictExpand(db->dict[0], db_size);
}
dictExpand(db->expires,expires_size);
continue; /* Read next opcode. */
} else if (type == RDB_OPCODE_AUX) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment