Commit 658f9bf8 authored by Oran Agra's avatar Oran Agra
Browse files

Make sure that fork child doesn't do incremental rehashing (#11692)

Turns out that a fork child calling getExpire while persisting keys (and
possibly also a result of some module fork tasks) could cause dictFind
to do incremental rehashing in the child process, which is both a waste
of time, and also causes COW harm.

(cherry picked from commit 2bec254d)
(cherry picked from commit 3e82bdf7)
parent 9c76b1ab
......@@ -47,15 +47,15 @@
#include "zmalloc.h"
#include "redisassert.h"
/* Using dictEnableResize() / dictDisableResize() we make possible to
* enable/disable resizing of the hash table as needed. This is very important
/* Using dictEnableResize() / dictDisableResize() we make possible to disable
* resizing and rehashing of the hash table as needed. This is very important
* for Redis, as we use copy-on-write and don't want to move too much memory
* around when there is a child performing saving operations.
*
* Note that even when dict_can_resize is set to 0, not all resizes are
* prevented: a hash table is still allowed to grow if the ratio between
* the number of elements and the buckets > dict_force_resize_ratio. */
static int dict_can_resize = 1;
static dictResizeEnable dict_can_resize = DICT_RESIZE_ENABLE;
static unsigned int dict_force_resize_ratio = 5;
/* -------------------------- private prototypes ---------------------------- */
......@@ -132,7 +132,7 @@ int dictResize(dict *d)
{
unsigned long minimal;
if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
if (dict_can_resize != DICT_RESIZE_ENABLE || dictIsRehashing(d)) return DICT_ERR;
minimal = d->ht[0].used;
if (minimal < DICT_HT_INITIAL_SIZE)
minimal = DICT_HT_INITIAL_SIZE;
......@@ -210,7 +210,12 @@ int dictTryExpand(dict *d, unsigned long size) {
* work it does would be unbound and the function may block for a long time. */
int dictRehash(dict *d, int n) {
int empty_visits = n*10; /* Max number of empty buckets to visit. */
if (!dictIsRehashing(d)) return 0;
if (dict_can_resize == DICT_RESIZE_FORBID || !dictIsRehashing(d)) return 0;
if (dict_can_resize == DICT_RESIZE_AVOID &&
(d->ht[1].size / d->ht[0].size < dict_force_resize_ratio))
{
return 0;
}
while(n-- && d->ht[0].used != 0) {
dictEntry *de, *nextde;
......@@ -995,10 +1000,12 @@ static int _dictExpandIfNeeded(dict *d)
* table (global setting) or we should avoid it but the ratio between
* elements/buckets is over the "safe" threshold, we resize doubling
* the number of buckets. */
if (d->ht[0].used >= d->ht[0].size &&
(dict_can_resize ||
d->ht[0].used/d->ht[0].size > dict_force_resize_ratio) &&
dictTypeExpandAllowed(d))
if (!dictTypeExpandAllowed(d))
return DICT_OK;
if ((dict_can_resize == DICT_RESIZE_ENABLE &&
d->ht[0].used >= d->ht[0].size) ||
(dict_can_resize != DICT_RESIZE_FORBID &&
d->ht[0].used / d->ht[0].size > dict_force_resize_ratio))
{
return dictExpand(d, d->ht[0].used + 1);
}
......@@ -1057,12 +1064,8 @@ void dictEmpty(dict *d, void(callback)(void*)) {
d->pauserehash = 0;
}
void dictEnableResize(void) {
dict_can_resize = 1;
}
void dictDisableResize(void) {
dict_can_resize = 0;
void dictSetResizeEnabled(dictResizeEnable enable) {
dict_can_resize = enable;
}
uint64_t dictGetHash(dict *d, const void *key) {
......
......@@ -160,6 +160,12 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
#define randomULong() random()
#endif
typedef enum {
DICT_RESIZE_ENABLE,
DICT_RESIZE_AVOID,
DICT_RESIZE_FORBID,
} dictResizeEnable;
/* API */
dict *dictCreate(dictType *type, void *privDataPtr);
int dictExpand(dict *d, unsigned long size);
......@@ -186,8 +192,7 @@ void dictGetStats(char *buf, size_t bufsize, dict *d);
uint64_t dictGenHashFunction(const void *key, int len);
uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len);
void dictEmpty(dict *d, void(callback)(void*));
void dictEnableResize(void);
void dictDisableResize(void);
void dictSetResizeEnabled(dictResizeEnable enable);
int dictRehash(dict *d, int n);
int dictRehashMilliseconds(dict *d, int ms);
void dictSetHashFunctionSeed(uint8_t *seed);
......
......@@ -1587,13 +1587,15 @@ int incrementallyRehash(int dbid) {
* as we want to avoid resizing the hash tables when there is a child in order
* to play well with copy-on-write (otherwise when a resize happens lots of
* memory pages are copied). The goal of this function is to update the ability
* for dict.c to resize the hash tables accordingly to the fact we have an
* for dict.c to resize or rehash the tables accordingly to the fact we have an
* active fork child running. */
void updateDictResizePolicy(void) {
if (!hasActiveChildProcess())
dictEnableResize();
if (server.in_fork_child != CHILD_TYPE_NONE)
dictSetResizeEnabled(DICT_RESIZE_FORBID);
else if (hasActiveChildProcess())
dictSetResizeEnabled(DICT_RESIZE_AVOID);
else
dictDisableResize();
dictSetResizeEnabled(DICT_RESIZE_ENABLE);
}
const char *strChildType(int type) {
......@@ -5923,6 +5925,7 @@ int redisFork(int purpose) {
server.in_fork_child = purpose;
setOOMScoreAdj(CONFIG_OOM_BGCHILD);
setupChildSignalHandlers();
updateDictResizePolicy();
closeChildUnusedResourceAfterFork();
/* Close the reading part, so that if the parent crashes, the child will
* get a write error and exit. */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment