mirror of https://mirror.osredm.com/root/redis.git
Make sure that fork child doesn't do incremental rehashing (#11692)
Turns out that a fork child calling getExpire while persisting keys (and possibly also a result of some module fork tasks) could cause dictFind to do incremental rehashing in the child process, which is both a waste of time, and also causes COW harm. (cherry picked from commit2bec254d89
) (cherry picked from commit3e82bdf738
) (cherry picked from commit 4803334cf6cb1eccdd33674a72a215ed6cd10069)
This commit is contained in:
parent
d49ffeea1c
commit
789f6a95db
30
src/dict.c
30
src/dict.c
|
@ -51,15 +51,15 @@
|
|||
#include <assert.h>
|
||||
#endif
|
||||
|
||||
/* Using dictEnableResize() / dictDisableResize() we make possible to
|
||||
* enable/disable resizing of the hash table as needed. This is very important
|
||||
/* Using dictEnableResize() / dictDisableResize() we make possible to disable
|
||||
* resizing and rehashing of the hash table as needed. This is very important
|
||||
* for Redis, as we use copy-on-write and don't want to move too much memory
|
||||
* around when there is a child performing saving operations.
|
||||
*
|
||||
* Note that even when dict_can_resize is set to 0, not all resizes are
|
||||
* prevented: a hash table is still allowed to grow if the ratio between
|
||||
* the number of elements and the buckets > dict_force_resize_ratio. */
|
||||
static int dict_can_resize = 1;
|
||||
static dictResizeEnable dict_can_resize = DICT_RESIZE_ENABLE;
|
||||
static unsigned int dict_force_resize_ratio = 5;
|
||||
|
||||
/* -------------------------- private prototypes ---------------------------- */
|
||||
|
@ -136,7 +136,7 @@ int dictResize(dict *d)
|
|||
{
|
||||
unsigned long minimal;
|
||||
|
||||
if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
|
||||
if (dict_can_resize != DICT_RESIZE_ENABLE || dictIsRehashing(d)) return DICT_ERR;
|
||||
minimal = d->ht[0].used;
|
||||
if (minimal < DICT_HT_INITIAL_SIZE)
|
||||
minimal = DICT_HT_INITIAL_SIZE;
|
||||
|
@ -187,7 +187,12 @@ int dictExpand(dict *d, unsigned long size)
|
|||
* work it does would be unbound and the function may block for a long time. */
|
||||
int dictRehash(dict *d, int n) {
|
||||
int empty_visits = n*10; /* Max number of empty buckets to visit. */
|
||||
if (!dictIsRehashing(d)) return 0;
|
||||
if (dict_can_resize == DICT_RESIZE_FORBID || !dictIsRehashing(d)) return 0;
|
||||
if (dict_can_resize == DICT_RESIZE_AVOID &&
|
||||
(d->ht[1].size / d->ht[0].size < dict_force_resize_ratio))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
while(n-- && d->ht[0].used != 0) {
|
||||
dictEntry *de, *nextde;
|
||||
|
@ -962,9 +967,10 @@ static int _dictExpandIfNeeded(dict *d)
|
|||
* table (global setting) or we should avoid it but the ratio between
|
||||
* elements/buckets is over the "safe" threshold, we resize doubling
|
||||
* the number of buckets. */
|
||||
if (d->ht[0].used >= d->ht[0].size &&
|
||||
(dict_can_resize ||
|
||||
d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))
|
||||
if ((dict_can_resize == DICT_RESIZE_ENABLE &&
|
||||
d->ht[0].used >= d->ht[0].size) ||
|
||||
(dict_can_resize != DICT_RESIZE_FORBID &&
|
||||
d->ht[0].used / d->ht[0].size > dict_force_resize_ratio))
|
||||
{
|
||||
return dictExpand(d, d->ht[0].used*2);
|
||||
}
|
||||
|
@ -1023,12 +1029,8 @@ void dictEmpty(dict *d, void(callback)(void*)) {
|
|||
d->iterators = 0;
|
||||
}
|
||||
|
||||
void dictEnableResize(void) {
|
||||
dict_can_resize = 1;
|
||||
}
|
||||
|
||||
void dictDisableResize(void) {
|
||||
dict_can_resize = 0;
|
||||
void dictSetResizeEnabled(dictResizeEnable enable) {
|
||||
dict_can_resize = enable;
|
||||
}
|
||||
|
||||
uint64_t dictGetHash(dict *d, const void *key) {
|
||||
|
|
|
@ -157,6 +157,12 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
|
|||
#define randomULong() random()
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
DICT_RESIZE_ENABLE,
|
||||
DICT_RESIZE_AVOID,
|
||||
DICT_RESIZE_FORBID,
|
||||
} dictResizeEnable;
|
||||
|
||||
/* API */
|
||||
dict *dictCreate(dictType *type, void *privDataPtr);
|
||||
int dictExpand(dict *d, unsigned long size);
|
||||
|
@ -182,8 +188,7 @@ void dictGetStats(char *buf, size_t bufsize, dict *d);
|
|||
uint64_t dictGenHashFunction(const void *key, int len);
|
||||
uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len);
|
||||
void dictEmpty(dict *d, void(callback)(void*));
|
||||
void dictEnableResize(void);
|
||||
void dictDisableResize(void);
|
||||
void dictSetResizeEnabled(dictResizeEnable enable);
|
||||
int dictRehash(dict *d, int n);
|
||||
int dictRehashMilliseconds(dict *d, int ms);
|
||||
void dictSetHashFunctionSeed(uint8_t *seed);
|
||||
|
|
11
src/server.c
11
src/server.c
|
@ -1465,13 +1465,15 @@ int incrementallyRehash(int dbid) {
|
|||
* as we want to avoid resizing the hash tables when there is a child in order
|
||||
* to play well with copy-on-write (otherwise when a resize happens lots of
|
||||
* memory pages are copied). The goal of this function is to update the ability
|
||||
* for dict.c to resize the hash tables accordingly to the fact we have an
|
||||
* for dict.c to resize or rehash the tables accordingly to the fact we have an
|
||||
* active fork child running. */
|
||||
void updateDictResizePolicy(void) {
|
||||
if (!hasActiveChildProcess())
|
||||
dictEnableResize();
|
||||
if (server.in_fork_child != CHILD_TYPE_NONE)
|
||||
dictSetResizeEnabled(DICT_RESIZE_FORBID);
|
||||
else if (hasActiveChildProcess())
|
||||
dictSetResizeEnabled(DICT_RESIZE_AVOID);
|
||||
else
|
||||
dictDisableResize();
|
||||
dictSetResizeEnabled(DICT_RESIZE_ENABLE);
|
||||
}
|
||||
|
||||
/* Return true if there are no active children processes doing RDB saving,
|
||||
|
@ -5122,6 +5124,7 @@ int redisFork(int purpose) {
|
|||
server.in_fork_child = purpose;
|
||||
setOOMScoreAdj(CONFIG_OOM_BGCHILD);
|
||||
setupChildSignalHandlers();
|
||||
updateDictResizePolicy();
|
||||
closeClildUnusedResourceAfterFork();
|
||||
} else {
|
||||
/* Parent */
|
||||
|
|
Loading…
Reference in New Issue