Obuf limit, exit during loop in *RAND* commands and KEYS

Related to the hang reported in #11671
Currently, redis can disconnect a client due to reaching output buffer limit,
it'll also avoid feeding that output buffer with more data, but it will keep
running the loop in the command (despite the client already being marked for
disconnection)

This PR is an attempt to mitigate the problem, specifically for commands that
are easy to abuse, specifically: KEYS, HRANDFIELD, SRANDMEMBER, ZRANDMEMBER.
The RAND family of commands can take a negative COUNT argument (which is not
bound to the number of elements in the key), so it's enough to create a key
with one field, and then these commands can be used to hang redis.
For KEYS the caller can use the existing keyspace in redis (if big enough).
This commit is contained in:
Oran Agra 2023-01-12 08:29:20 +02:00
parent 3ebace932d
commit 3148f3e8a5
6 changed files with 33 additions and 2 deletions

View File

@ -768,6 +768,8 @@ void keysCommand(client *c) {
}
decrRefCount(keyobj);
}
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
dictReleaseIterator(di);
setDeferredArrayLen(c,replylen,numkeys);

View File

@ -1029,6 +1029,8 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
addReplyBulkCBuffer(c, key, sdslen(key));
if (withvalues)
addReplyBulkCBuffer(c, value, sdslen(value));
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
} else if (hash->encoding == OBJ_ENCODING_ZIPLIST) {
ziplistEntry *keys, *vals = NULL;
@ -1042,6 +1044,8 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
count -= sample_count;
ziplistRandomPairs(hash->ptr, sample_count, keys, vals);
harndfieldReplyWithZiplist(c, sample_count, keys, vals);
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
zfree(keys);
zfree(vals);

View File

@ -701,6 +701,8 @@ void srandmemberWithCountCommand(client *c) {
} else {
addReplyBulkCBuffer(c,ele,sdslen(ele));
}
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
return;
}

View File

@ -4078,6 +4078,8 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
addReplyBulkCBuffer(c, key, sdslen(key));
if (withscores)
addReplyDouble(c, *(double*)dictGetVal(de));
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
} else if (zsetobj->encoding == OBJ_ENCODING_ZIPLIST) {
ziplistEntry *keys, *vals = NULL;
@ -4091,6 +4093,8 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
count -= sample_count;
ziplistRandomPairs(zsetobj->ptr, sample_count, keys, vals);
zarndmemberReplyWithZiplist(c, sample_count, keys, vals);
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
zfree(keys);
zfree(vals);

View File

@ -540,8 +540,11 @@ proc stop_bg_complex_data {handle} {
catch {exec /bin/kill -9 $handle}
}
proc populate {num prefix size} {
set rd [redis_deferring_client]
# Write num keys with the given key prefix and value size (in bytes). If idx is
# given, it's the index (AKA level) used with the srv procedure and it specifies
# to which Redis instance to write the keys.
proc populate {num {prefix key:} {size 3} {idx 0}} {
set rd [redis_deferring_client $idx]
for {set j 0} {$j < $num} {incr j} {
$rd set $prefix$j [string repeat A $size]
}

View File

@ -182,4 +182,20 @@ start_server {tags {"obuf-limits"}} {
assert_equal "v2" [r get k2]
assert_equal "v3" [r get k3]
}
test "Obuf limit, HRANDFIELD with huge count stopped mid-run" {
r config set client-output-buffer-limit {normal 1000000 0 0}
r hset myhash a b
catch {r hrandfield myhash -999999999} e
assert_match "*I/O error*" $e
reconnect
}
test "Obuf limit, KEYS stopped mid-run" {
r config set client-output-buffer-limit {normal 100000 0 0}
populate 1000 "long-key-name-prefix-of-100-chars-------------------------------------------------------------------"
catch {r keys *} e
assert_match "*I/O error*" $e
reconnect
}
}