mirror of https://mirror.osredm.com/root/redis.git
Correctly update kvstore overhead after emptying or releasing dict (#13984)
CI / build-debian-old (push) Failing after 2s
Details
CI / build-centos-jemalloc (push) Failing after 2s
Details
CI / build-old-chain-jemalloc (push) Failing after 9s
Details
Codecov / code-coverage (push) Failing after 7s
Details
CI / build-32bit (push) Failing after 16s
Details
Spellcheck / Spellcheck (push) Failing after 30s
Details
CI / build-libc-malloc (push) Successful in 2m13s
Details
CI / test-sanitizer-address (push) Failing after 3m26s
Details
CI / test-ubuntu-latest (push) Failing after 4m14s
Details
Coverity Scan / coverity (push) Has been skipped
Details
External Server Tests / test-external-cluster (push) Failing after 1m31s
Details
External Server Tests / test-external-nodebug (push) Failing after 2m5s
Details
External Server Tests / test-external-standalone (push) Failing after 6m35s
Details
CI / build-macos-latest (push) Has been cancelled
Details
CI / build-debian-old (push) Failing after 2s
Details
CI / build-centos-jemalloc (push) Failing after 2s
Details
CI / build-old-chain-jemalloc (push) Failing after 9s
Details
Codecov / code-coverage (push) Failing after 7s
Details
CI / build-32bit (push) Failing after 16s
Details
Spellcheck / Spellcheck (push) Failing after 30s
Details
CI / build-libc-malloc (push) Successful in 2m13s
Details
CI / test-sanitizer-address (push) Failing after 3m26s
Details
CI / test-ubuntu-latest (push) Failing after 4m14s
Details
Coverity Scan / coverity (push) Has been skipped
Details
External Server Tests / test-external-cluster (push) Failing after 1m31s
Details
External Server Tests / test-external-nodebug (push) Failing after 2m5s
Details
External Server Tests / test-external-standalone (push) Failing after 6m35s
Details
CI / build-macos-latest (push) Has been cancelled
Details
Close #13973 This PR fixed two bugs. 1) `overhead_hashtable_lut` isn't updated correctly This bug was introduced by https://github.com/redis/redis/pull/12913 We only update `overhead_hashtable_lut` at the beginning and end of rehashing, but we forgot to update it when a dict is emptied or released. This PR introduces a new `bucketChanged` callback to track the change changes in the bucket size. Now, `rehashingStarted` and `rehashingCompleted` callbacks are no longer responsible for bucket changes, but are entirely handled by `bucketChanged`, this can also avoid that we need to register three callbacks to track the change of bucket size, now only one is needed. In most cases, it will be triggered together with `rehashingStarted` or `rehashingCompleted`, except when a dict is being emptied or released, in these cases, even if the dict is not rehashing, we still need to subtract its current size. On the other hand, `overhead_hashtable_lut` was duplicated with `bucket_count`, so we remove `overhead_hashtable_lut` and use `bucket_count` instead. Note that this bug only happens with cluster mode, because we don't use KVSTORE_FREE_EMPTY_DICTS without cluster. 2) The size of `dict_size_index` repeatedly counted in terms of memory usage. `dict_size_index` is created at startup, so its memory usage has been counted into `used_memory_startup`. However, when we want to count the overhead, we repeat the calculation, which may cause the overhead to exceed the total memory usage. --------- Co-authored-by: Yuan Wang <yuan.wang@redis.com>
This commit is contained in:
parent
97d7d2f865
commit
ac0bef15b5
15
src/dict.c
15
src/dict.c
|
@ -263,12 +263,16 @@ int _dictResize(dict *d, unsigned long size, int* malloc_failed)
|
|||
d->ht_table[1] = new_ht_table;
|
||||
d->rehashidx = 0;
|
||||
if (d->type->rehashingStarted) d->type->rehashingStarted(d);
|
||||
if (d->type->bucketChanged)
|
||||
d->type->bucketChanged(d, DICTHT_SIZE(d->ht_size_exp[1]));
|
||||
|
||||
/* Is this the first initialization or is the first hash table empty? If so
|
||||
* it's not really a rehashing, we can just set the first hash table so that
|
||||
* it can accept keys. */
|
||||
if (d->ht_table[0] == NULL || d->ht_used[0] == 0) {
|
||||
if (d->type->rehashingCompleted) d->type->rehashingCompleted(d);
|
||||
if (d->type->bucketChanged)
|
||||
d->type->bucketChanged(d, -(long long)DICTHT_SIZE(d->ht_size_exp[0]));
|
||||
if (d->ht_table[0]) zfree(d->ht_table[0]);
|
||||
d->ht_size_exp[0] = new_ht_size_exp;
|
||||
d->ht_used[0] = new_ht_used;
|
||||
|
@ -371,6 +375,8 @@ static int dictCheckRehashingCompleted(dict *d) {
|
|||
if (d->ht_used[0] != 0) return 0;
|
||||
|
||||
if (d->type->rehashingCompleted) d->type->rehashingCompleted(d);
|
||||
if (d->type->bucketChanged)
|
||||
d->type->bucketChanged(d, -(long long)DICTHT_SIZE(d->ht_size_exp[0]));
|
||||
zfree(d->ht_table[0]);
|
||||
/* Copy the new ht onto the old one */
|
||||
d->ht_table[0] = d->ht_table[1];
|
||||
|
@ -759,6 +765,10 @@ void dictRelease(dict *d)
|
|||
if (dictIsRehashing(d) && d->type->rehashingCompleted)
|
||||
d->type->rehashingCompleted(d);
|
||||
|
||||
/* Subtract the size of all buckets. */
|
||||
if (d->type->bucketChanged)
|
||||
d->type->bucketChanged(d, -(long long)dictBuckets(d));
|
||||
|
||||
if (d->type->onDictRelease)
|
||||
d->type->onDictRelease(d);
|
||||
|
||||
|
@ -1676,6 +1686,11 @@ void dictEmpty(dict *d, void(callback)(dict*)) {
|
|||
* destroying the dict fake completion. */
|
||||
if (dictIsRehashing(d) && d->type->rehashingCompleted)
|
||||
d->type->rehashingCompleted(d);
|
||||
|
||||
/* Subtract the size of all buckets. */
|
||||
if (d->type->bucketChanged)
|
||||
d->type->bucketChanged(d, -(long long)dictBuckets(d));
|
||||
|
||||
_dictClear(d,0,callback);
|
||||
_dictClear(d,1,callback);
|
||||
d->rehashidx = -1;
|
||||
|
|
|
@ -46,6 +46,9 @@ typedef struct dictType {
|
|||
/* Invoked at the end of dict initialization/rehashing of all the entries from old to new ht. Both ht still exists
|
||||
* and are cleaned up after this callback. */
|
||||
void (*rehashingCompleted)(dict *d);
|
||||
/* Invoked when the size of the dictionary changes.
|
||||
* The `delta` parameter can be positive (size increase) or negative (size decrease). */
|
||||
void (*bucketChanged)(dict *d, long long delta);
|
||||
/* Allow a dict to carry extra caller-defined metadata. The
|
||||
* extra memory is initialized to 0 when a dict is allocated. */
|
||||
size_t (*dictMetadataBytes)(dict *d);
|
||||
|
|
|
@ -47,7 +47,6 @@ struct _kvstore {
|
|||
unsigned long long key_count; /* Total number of keys in this kvstore. */
|
||||
unsigned long long bucket_count; /* Total number of buckets in this kvstore across dictionaries. */
|
||||
unsigned long long *dict_size_index; /* Binary indexed tree (BIT) that describes cumulative key frequencies up until given dict-index. */
|
||||
size_t overhead_hashtable_lut; /* The overhead of all dictionaries. */
|
||||
size_t overhead_hashtable_rehashing; /* The overhead of dictionaries rehashing. */
|
||||
void *metadata[]; /* conditionally allocated based on "flags" */
|
||||
};
|
||||
|
@ -205,8 +204,6 @@ static void kvstoreDictRehashingStarted(dict *d) {
|
|||
|
||||
unsigned long long from, to;
|
||||
dictRehashingInfo(d, &from, &to);
|
||||
kvs->bucket_count += to; /* Started rehashing (Add the new ht size) */
|
||||
kvs->overhead_hashtable_lut += to;
|
||||
kvs->overhead_hashtable_rehashing += from;
|
||||
}
|
||||
|
||||
|
@ -224,11 +221,17 @@ static void kvstoreDictRehashingCompleted(dict *d) {
|
|||
|
||||
unsigned long long from, to;
|
||||
dictRehashingInfo(d, &from, &to);
|
||||
kvs->bucket_count -= from; /* Finished rehashing (Remove the old ht size) */
|
||||
kvs->overhead_hashtable_lut -= from;
|
||||
kvs->overhead_hashtable_rehashing -= from;
|
||||
}
|
||||
|
||||
/* Updates the bucket count for the given dictionary in a DB. It adds the new ht size
|
||||
* of the dictionary or removes the old ht size of the dictionary from the total
|
||||
* sum of buckets for a DB. */
|
||||
static void kvstoreDictBucketChanged(dict *d, long long delta) {
|
||||
kvstore *kvs = d->type->userdata;
|
||||
kvs->bucket_count += delta;
|
||||
}
|
||||
|
||||
/* Returns the size of the DB dict base metadata in bytes. */
|
||||
static size_t kvstoreDictMetaBaseSize(dict *d) {
|
||||
UNUSED(d);
|
||||
|
@ -275,6 +278,7 @@ kvstore *kvstoreCreate(dictType *type, int num_dicts_bits, int flags) {
|
|||
kvs->dtype.dictMetadataBytes = kvstoreDictMetaBaseSize;
|
||||
kvs->dtype.rehashingStarted = kvstoreDictRehashingStarted;
|
||||
kvs->dtype.rehashingCompleted = kvstoreDictRehashingCompleted;
|
||||
kvs->dtype.bucketChanged = kvstoreDictBucketChanged;
|
||||
|
||||
kvs->num_dicts_bits = num_dicts_bits;
|
||||
kvs->num_dicts = 1 << kvs->num_dicts_bits;
|
||||
|
@ -290,7 +294,6 @@ kvstore *kvstoreCreate(dictType *type, int num_dicts_bits, int flags) {
|
|||
kvs->resize_cursor = 0;
|
||||
kvs->dict_size_index = kvs->num_dicts > 1? zcalloc(sizeof(unsigned long long) * (kvs->num_dicts + 1)) : NULL;
|
||||
kvs->bucket_count = 0;
|
||||
kvs->overhead_hashtable_lut = 0;
|
||||
kvs->overhead_hashtable_rehashing = 0;
|
||||
return kvs;
|
||||
}
|
||||
|
@ -322,7 +325,6 @@ void kvstoreEmpty(kvstore *kvs, void(callback)(dict*)) {
|
|||
kvs->bucket_count = 0;
|
||||
if (kvs->dict_size_index)
|
||||
memset(kvs->dict_size_index, 0, sizeof(unsigned long long) * (kvs->num_dicts + 1));
|
||||
kvs->overhead_hashtable_lut = 0;
|
||||
kvs->overhead_hashtable_rehashing = 0;
|
||||
}
|
||||
|
||||
|
@ -378,9 +380,6 @@ size_t kvstoreMemUsage(kvstore *kvs) {
|
|||
/* Values are dict* shared with kvs->dicts */
|
||||
mem += listLength(kvs->rehashing) * sizeof(listNode);
|
||||
|
||||
if (kvs->dict_size_index)
|
||||
mem += sizeof(unsigned long long) * (kvs->num_dicts + 1);
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
|
@ -700,7 +699,7 @@ uint64_t kvstoreIncrementallyRehash(kvstore *kvs, uint64_t threshold_us) {
|
|||
}
|
||||
|
||||
size_t kvstoreOverheadHashtableLut(kvstore *kvs) {
|
||||
return kvs->overhead_hashtable_lut * sizeof(dictEntry *);
|
||||
return kvs->bucket_count * sizeof(dictEntry *);
|
||||
}
|
||||
|
||||
size_t kvstoreOverheadHashtableRehashing(kvstore *kvs) {
|
||||
|
|
|
@ -536,3 +536,27 @@ start_server {tags {"info" "external:skip"}} {
|
|||
assert_equal [dict get $mem_stats db.dict.rehashing.count] {1}
|
||||
}
|
||||
}
|
||||
|
||||
start_cluster 1 0 {tags {external:skip cluster}} {
|
||||
test "Verify that LUT overhead is properly updated when dicts are emptied or reused (issue #13973)" {
|
||||
R 0 set k v ;# Make dbs overhead displayed
|
||||
set info_mem [r memory stats]
|
||||
set overhead_main [dict get $info_mem db.0 overhead.hashtable.main]
|
||||
set overhead_expires [dict get $info_mem db.0 overhead.hashtable.expires]
|
||||
assert_range $overhead_main 1 5000
|
||||
assert_range $overhead_expires 1 1000
|
||||
|
||||
# In cluster mode, we use KVSTORE_FREE_EMPTY_DICTS to ensure that dicts
|
||||
# are freed when they are emptied. This test verifies that after a dict
|
||||
# is cleared, the lut overhead is properly updated, preventing it from
|
||||
# growing indefinitely.
|
||||
for {set j 1} {$j <= 500} {incr j} {
|
||||
R 0 set k v
|
||||
R 0 del k
|
||||
}
|
||||
R 0 set k v ;# Make dbs overhead displayed
|
||||
set info_mem [r memory stats]
|
||||
assert_equal [dict get $info_mem db.0 overhead.hashtable.main] $overhead_main
|
||||
assert_equal [dict get $info_mem db.0 overhead.hashtable.expires] $overhead_expires
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue