mirror of https://mirror.osredm.com/root/redis.git
Merge remote-tracking branch 'upstream/unstable' into HEAD
This commit is contained in:
commit
1d5e13e121
|
@ -0,0 +1,24 @@
|
|||
name: "Codecov"
|
||||
|
||||
# Enabling on each push is to display the coverage changes in every PR,
|
||||
# where each PR needs to be compared against the coverage of the head commit
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
code-coverage:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install lcov and run test
|
||||
run: |
|
||||
sudo apt-get install lcov
|
||||
make lcov
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
file: ./src/redis.info
|
|
@ -27,7 +27,7 @@ jobs:
|
|||
--tags -slow
|
||||
- name: Archive redis log
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-external-redis-log
|
||||
path: external-redis.log
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
|||
--tags -slow
|
||||
- name: Archive redis log
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-external-cluster-log
|
||||
path: external-redis-cluster.log
|
||||
|
@ -79,7 +79,7 @@ jobs:
|
|||
--tags "-slow -needs:debug"
|
||||
- name: Archive redis log
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-external-redis-nodebug-log
|
||||
path: external-redis-nodebug.log
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
[](https://codecov.io/github/redis/redis)
|
||||
|
||||
This README is just a fast *quick start* document. You can find more detailed documentation at [redis.io](https://redis.io).
|
||||
|
||||
What is Redis?
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
coverage:
|
||||
status:
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
|
||||
comment:
|
||||
require_changes: false
|
||||
require_head: false
|
||||
require_base: false
|
||||
layout: "condensed_header, diff, files"
|
||||
hide_project_coverage: false
|
||||
behavior: default
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
|
@ -668,7 +668,7 @@ repl-diskless-sync-max-replicas 0
|
|||
repl-diskless-load disabled
|
||||
|
||||
# Master send PINGs to its replicas in a predefined interval. It's possible to
|
||||
# change this interval with the repl_ping_replica_period option. The default
|
||||
# change this interval with the repl-ping-replica-period option. The default
|
||||
# value is 10 seconds.
|
||||
#
|
||||
# repl-ping-replica-period 10
|
||||
|
|
|
@ -56,4 +56,5 @@ $TCLSH tests/test_helper.tcl \
|
|||
--single unit/moduleapi/moduleauth \
|
||||
--single unit/moduleapi/rdbloadsave \
|
||||
--single unit/moduleapi/crash \
|
||||
--single unit/moduleapi/internalsecret \
|
||||
"${@}"
|
||||
|
|
|
@ -487,8 +487,9 @@ test-cluster: $(REDIS_SERVER_NAME) $(REDIS_CLI_NAME)
|
|||
check: test
|
||||
|
||||
lcov:
|
||||
@lcov --version
|
||||
$(MAKE) gcov
|
||||
@(set -e; cd ..; ./runtest --clients 1)
|
||||
@(set -e; cd ..; ./runtest)
|
||||
@geninfo -o redis.info .
|
||||
@genhtml --legend -o lcov-html redis.info
|
||||
|
||||
|
|
41
src/acl.c
41
src/acl.c
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include "server.h"
|
||||
#include "cluster.h"
|
||||
#include "sha256.h"
|
||||
#include <fcntl.h>
|
||||
#include <ctype.h>
|
||||
|
@ -3194,6 +3195,38 @@ void addReplyCommandCategories(client *c, struct redisCommand *cmd) {
|
|||
setDeferredSetLen(c, flaglen, flagcount);
|
||||
}
|
||||
|
||||
/* When successful, initiates an internal connection, that is able to execute
|
||||
* internal commands (see CMD_INTERNAL). */
|
||||
static void internalAuth(client *c) {
|
||||
if (server.cluster == NULL) {
|
||||
addReplyError(c, "Cannot authenticate as an internal connection on non-cluster instances");
|
||||
return;
|
||||
}
|
||||
|
||||
sds password = c->argv[2]->ptr;
|
||||
|
||||
/* Get internal secret. */
|
||||
size_t len = -1;
|
||||
const char *internal_secret = clusterGetSecret(&len);
|
||||
if (sdslen(password) != len) {
|
||||
addReplyError(c, "-WRONGPASS invalid internal password");
|
||||
return;
|
||||
}
|
||||
if (!time_independent_strcmp((char *)internal_secret, (char *)password, len)) {
|
||||
c->flags |= CLIENT_INTERNAL;
|
||||
/* No further authentication is needed. */
|
||||
c->authenticated = 1;
|
||||
/* Set the user to the unrestricted user, if it is not already set (default). */
|
||||
if (c->user != NULL) {
|
||||
c->user = NULL;
|
||||
moduleNotifyUserChanged(c);
|
||||
}
|
||||
addReply(c, shared.ok);
|
||||
} else {
|
||||
addReplyError(c, "-WRONGPASS invalid internal password");
|
||||
}
|
||||
}
|
||||
|
||||
/* AUTH <password>
|
||||
* AUTH <username> <password> (Redis >= 6.0 form)
|
||||
*
|
||||
|
@ -3227,6 +3260,14 @@ void authCommand(client *c) {
|
|||
username = c->argv[1];
|
||||
password = c->argv[2];
|
||||
redactClientCommandArgument(c, 2);
|
||||
|
||||
/* Handle internal authentication commands.
|
||||
* Note: No user-defined ACL user can have this username (no spaces
|
||||
* allowed), thus no conflicts with ACL possible. */
|
||||
if (!strcmp(username->ptr, "internal connection")) {
|
||||
internalAuth(c);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
robj *err = NULL;
|
||||
|
|
137
src/aof.c
137
src/aof.c
|
@ -30,6 +30,13 @@ aofManifest *aofLoadManifestFromFile(sds am_filepath);
|
|||
void aofManifestFreeAndUpdate(aofManifest *am);
|
||||
void aof_background_fsync_and_close(int fd);
|
||||
|
||||
/* When we call 'startAppendOnly', we will create a temp INCR AOF, and rename
|
||||
* it to the real INCR AOF name when the AOFRW is done, so if want to know the
|
||||
* accurate start offset of the INCR AOF, we need to record it when we create
|
||||
* the temp INCR AOF. This variable is used to record the start offset, and
|
||||
* set the start offset of the real INCR AOF when the AOFRW is done. */
|
||||
static long long tempIncAofStartReplOffset = 0;
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* AOF Manifest file implementation.
|
||||
*
|
||||
|
@ -73,10 +80,15 @@ void aof_background_fsync_and_close(int fd);
|
|||
#define AOF_MANIFEST_KEY_FILE_NAME "file"
|
||||
#define AOF_MANIFEST_KEY_FILE_SEQ "seq"
|
||||
#define AOF_MANIFEST_KEY_FILE_TYPE "type"
|
||||
#define AOF_MANIFEST_KEY_FILE_STARTOFFSET "startoffset"
|
||||
#define AOF_MANIFEST_KEY_FILE_ENDOFFSET "endoffset"
|
||||
|
||||
/* Create an empty aofInfo. */
|
||||
aofInfo *aofInfoCreate(void) {
|
||||
return zcalloc(sizeof(aofInfo));
|
||||
aofInfo *ai = zcalloc(sizeof(aofInfo));
|
||||
ai->start_offset = -1;
|
||||
ai->end_offset = -1;
|
||||
return ai;
|
||||
}
|
||||
|
||||
/* Free the aofInfo structure (pointed to by ai) and its embedded file_name. */
|
||||
|
@ -93,6 +105,8 @@ aofInfo *aofInfoDup(aofInfo *orig) {
|
|||
ai->file_name = sdsdup(orig->file_name);
|
||||
ai->file_seq = orig->file_seq;
|
||||
ai->file_type = orig->file_type;
|
||||
ai->start_offset = orig->start_offset;
|
||||
ai->end_offset = orig->end_offset;
|
||||
return ai;
|
||||
}
|
||||
|
||||
|
@ -105,10 +119,19 @@ sds aofInfoFormat(sds buf, aofInfo *ai) {
|
|||
if (sdsneedsrepr(ai->file_name))
|
||||
filename_repr = sdscatrepr(sdsempty(), ai->file_name, sdslen(ai->file_name));
|
||||
|
||||
sds ret = sdscatprintf(buf, "%s %s %s %lld %s %c\n",
|
||||
sds ret = sdscatprintf(buf, "%s %s %s %lld %s %c",
|
||||
AOF_MANIFEST_KEY_FILE_NAME, filename_repr ? filename_repr : ai->file_name,
|
||||
AOF_MANIFEST_KEY_FILE_SEQ, ai->file_seq,
|
||||
AOF_MANIFEST_KEY_FILE_TYPE, ai->file_type);
|
||||
|
||||
if (ai->start_offset != -1) {
|
||||
ret = sdscatprintf(ret, " %s %lld", AOF_MANIFEST_KEY_FILE_STARTOFFSET, ai->start_offset);
|
||||
if (ai->end_offset != -1) {
|
||||
ret = sdscatprintf(ret, " %s %lld", AOF_MANIFEST_KEY_FILE_ENDOFFSET, ai->end_offset);
|
||||
}
|
||||
}
|
||||
|
||||
ret = sdscatlen(ret, "\n", 1);
|
||||
sdsfree(filename_repr);
|
||||
|
||||
return ret;
|
||||
|
@ -304,6 +327,10 @@ aofManifest *aofLoadManifestFromFile(sds am_filepath) {
|
|||
ai->file_seq = atoll(argv[i+1]);
|
||||
} else if (!strcasecmp(argv[i], AOF_MANIFEST_KEY_FILE_TYPE)) {
|
||||
ai->file_type = (argv[i+1])[0];
|
||||
} else if (!strcasecmp(argv[i], AOF_MANIFEST_KEY_FILE_STARTOFFSET)) {
|
||||
ai->start_offset = atoll(argv[i+1]);
|
||||
} else if (!strcasecmp(argv[i], AOF_MANIFEST_KEY_FILE_ENDOFFSET)) {
|
||||
ai->end_offset = atoll(argv[i+1]);
|
||||
}
|
||||
/* else if (!strcasecmp(argv[i], AOF_MANIFEST_KEY_OTHER)) {} */
|
||||
}
|
||||
|
@ -433,12 +460,13 @@ sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am) {
|
|||
* for example:
|
||||
* appendonly.aof.1.incr.aof
|
||||
*/
|
||||
sds getNewIncrAofName(aofManifest *am) {
|
||||
sds getNewIncrAofName(aofManifest *am, long long start_reploff) {
|
||||
aofInfo *ai = aofInfoCreate();
|
||||
ai->file_type = AOF_FILE_TYPE_INCR;
|
||||
ai->file_name = sdscatprintf(sdsempty(), "%s.%lld%s%s", server.aof_filename,
|
||||
++am->curr_incr_file_seq, INCR_FILE_SUFFIX, AOF_FORMAT_SUFFIX);
|
||||
ai->file_seq = am->curr_incr_file_seq;
|
||||
ai->start_offset = start_reploff;
|
||||
listAddNodeTail(am->incr_aof_list, ai);
|
||||
am->dirty = 1;
|
||||
return ai->file_name;
|
||||
|
@ -456,7 +484,7 @@ sds getLastIncrAofName(aofManifest *am) {
|
|||
|
||||
/* If 'incr_aof_list' is empty, just create a new one. */
|
||||
if (!listLength(am->incr_aof_list)) {
|
||||
return getNewIncrAofName(am);
|
||||
return getNewIncrAofName(am, server.master_repl_offset);
|
||||
}
|
||||
|
||||
/* Or return the last one. */
|
||||
|
@ -781,10 +809,11 @@ int openNewIncrAofForAppend(void) {
|
|||
if (server.aof_state == AOF_WAIT_REWRITE) {
|
||||
/* Use a temporary INCR AOF file to accumulate data during AOF_WAIT_REWRITE. */
|
||||
new_aof_name = getTempIncrAofName();
|
||||
tempIncAofStartReplOffset = server.master_repl_offset;
|
||||
} else {
|
||||
/* Dup a temp aof_manifest to modify. */
|
||||
temp_am = aofManifestDup(server.aof_manifest);
|
||||
new_aof_name = sdsdup(getNewIncrAofName(temp_am));
|
||||
new_aof_name = sdsdup(getNewIncrAofName(temp_am, server.master_repl_offset));
|
||||
}
|
||||
sds new_aof_filepath = makePath(server.aof_dirname, new_aof_name);
|
||||
newfd = open(new_aof_filepath, O_WRONLY|O_TRUNC|O_CREAT, 0644);
|
||||
|
@ -833,6 +862,50 @@ cleanup:
|
|||
return C_ERR;
|
||||
}
|
||||
|
||||
/* When we close gracefully the AOF file, we have the chance to persist the
|
||||
* end replication offset of current INCR AOF. */
|
||||
void updateCurIncrAofEndOffset(void) {
|
||||
if (server.aof_state != AOF_ON) return;
|
||||
serverAssert(server.aof_manifest != NULL);
|
||||
|
||||
if (listLength(server.aof_manifest->incr_aof_list) == 0) return;
|
||||
aofInfo *ai = listNodeValue(listLast(server.aof_manifest->incr_aof_list));
|
||||
ai->end_offset = server.master_repl_offset;
|
||||
server.aof_manifest->dirty = 1;
|
||||
/* It doesn't matter if the persistence fails since this information is not
|
||||
* critical, we can get an approximate value by start offset plus file size. */
|
||||
persistAofManifest(server.aof_manifest);
|
||||
}
|
||||
|
||||
/* After loading AOF data, we need to update the `server.master_repl_offset`
|
||||
* based on the information of the last INCR AOF, to avoid the rollback of
|
||||
* the start offset of new INCR AOF. */
|
||||
void updateReplOffsetAndResetEndOffset(void) {
|
||||
if (server.aof_state != AOF_ON) return;
|
||||
serverAssert(server.aof_manifest != NULL);
|
||||
|
||||
/* If the INCR file has an end offset, we directly use it, and clear it
|
||||
* to avoid the next time we load the manifest file, we will use the same
|
||||
* offset, but the real offset may have advanced. */
|
||||
if (listLength(server.aof_manifest->incr_aof_list) == 0) return;
|
||||
aofInfo *ai = listNodeValue(listLast(server.aof_manifest->incr_aof_list));
|
||||
if (ai->end_offset != -1) {
|
||||
server.master_repl_offset = ai->end_offset;
|
||||
ai->end_offset = -1;
|
||||
server.aof_manifest->dirty = 1;
|
||||
/* We must update the end offset of INCR file correctly, otherwise we
|
||||
* may keep wrong information in the manifest file, since we continue
|
||||
* to append data to the same INCR file. */
|
||||
if (persistAofManifest(server.aof_manifest) != AOF_OK)
|
||||
exit(1);
|
||||
} else {
|
||||
/* If the INCR file doesn't have an end offset, we need to calculate
|
||||
* the replication offset by the start offset plus the file size. */
|
||||
server.master_repl_offset = (ai->start_offset == -1 ? 0 : ai->start_offset) +
|
||||
getAppendOnlyFileSize(ai->file_name, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Whether to limit the execution of Background AOF rewrite.
|
||||
*
|
||||
* At present, if AOFRW fails, redis will automatically retry. If it continues
|
||||
|
@ -938,6 +1011,7 @@ void stopAppendOnly(void) {
|
|||
server.aof_last_fsync = server.mstime;
|
||||
}
|
||||
close(server.aof_fd);
|
||||
updateCurIncrAofEndOffset();
|
||||
|
||||
server.aof_fd = -1;
|
||||
server.aof_selected_db = -1;
|
||||
|
@ -1071,35 +1145,34 @@ void flushAppendOnlyFile(int force) {
|
|||
mstime_t latency;
|
||||
|
||||
if (sdslen(server.aof_buf) == 0) {
|
||||
/* Check if we need to do fsync even the aof buffer is empty,
|
||||
* because previously in AOF_FSYNC_EVERYSEC mode, fsync is
|
||||
* called only when aof buffer is not empty, so if users
|
||||
* stop write commands before fsync called in one second,
|
||||
* the data in page cache cannot be flushed in time. */
|
||||
if (server.aof_fsync == AOF_FSYNC_EVERYSEC &&
|
||||
server.aof_last_incr_fsync_offset != server.aof_last_incr_size &&
|
||||
server.mstime - server.aof_last_fsync >= 1000 &&
|
||||
!(sync_in_progress = aofFsyncInProgress())) {
|
||||
goto try_fsync;
|
||||
|
||||
/* Check if we need to do fsync even the aof buffer is empty,
|
||||
* the reason is described in the previous AOF_FSYNC_EVERYSEC block,
|
||||
* and AOF_FSYNC_ALWAYS is also checked here to handle a case where
|
||||
* aof_fsync is changed from everysec to always. */
|
||||
} else if (server.aof_fsync == AOF_FSYNC_ALWAYS &&
|
||||
server.aof_last_incr_fsync_offset != server.aof_last_incr_size)
|
||||
{
|
||||
goto try_fsync;
|
||||
} else {
|
||||
if (server.aof_last_incr_fsync_offset == server.aof_last_incr_size) {
|
||||
/* All data is fsync'd already: Update fsynced_reploff_pending just in case.
|
||||
* This is needed to avoid a WAITAOF hang in case a module used RM_Call with the NO_AOF flag,
|
||||
* in which case master_repl_offset will increase but fsynced_reploff_pending won't be updated
|
||||
* (because there's no reason, from the AOF POV, to call fsync) and then WAITAOF may wait on
|
||||
* the higher offset (which contains data that was only propagated to replicas, and not to AOF) */
|
||||
if (!sync_in_progress && server.aof_fsync != AOF_FSYNC_NO)
|
||||
* This is needed to avoid a WAITAOF hang in case a module used RM_Call
|
||||
* with the NO_AOF flag, in which case master_repl_offset will increase but
|
||||
* fsynced_reploff_pending won't be updated (because there's no reason, from
|
||||
* the AOF POV, to call fsync) and then WAITAOF may wait on the higher offset
|
||||
* (which contains data that was only propagated to replicas, and not to AOF) */
|
||||
if (!aofFsyncInProgress())
|
||||
atomicSet(server.fsynced_reploff_pending, server.master_repl_offset);
|
||||
return;
|
||||
} else {
|
||||
/* Check if we need to do fsync even the aof buffer is empty,
|
||||
* because previously in AOF_FSYNC_EVERYSEC mode, fsync is
|
||||
* called only when aof buffer is not empty, so if users
|
||||
* stop write commands before fsync called in one second,
|
||||
* the data in page cache cannot be flushed in time. */
|
||||
if (server.aof_fsync == AOF_FSYNC_EVERYSEC &&
|
||||
server.mstime - server.aof_last_fsync >= 1000 &&
|
||||
!(sync_in_progress = aofFsyncInProgress()))
|
||||
goto try_fsync;
|
||||
|
||||
/* Check if we need to do fsync even the aof buffer is empty,
|
||||
* the reason is described in the previous AOF_FSYNC_EVERYSEC block,
|
||||
* and AOF_FSYNC_ALWAYS is also checked here to handle a case where
|
||||
* aof_fsync is changed from everysec to always. */
|
||||
if (server.aof_fsync == AOF_FSYNC_ALWAYS)
|
||||
goto try_fsync;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
|
||||
|
@ -2665,7 +2738,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
|
|||
sds temp_incr_aof_name = getTempIncrAofName();
|
||||
sds temp_incr_filepath = makePath(server.aof_dirname, temp_incr_aof_name);
|
||||
/* Get next new incr aof name. */
|
||||
sds new_incr_filename = getNewIncrAofName(temp_am);
|
||||
sds new_incr_filename = getNewIncrAofName(temp_am, tempIncAofStartReplOffset);
|
||||
new_incr_filepath = makePath(server.aof_dirname, new_incr_filename);
|
||||
latencyStartMonitor(latency);
|
||||
if (rename(temp_incr_filepath, new_incr_filepath) == -1) {
|
||||
|
|
|
@ -131,6 +131,7 @@ char *clusterNodeHostname(clusterNode *node);
|
|||
const char *clusterNodePreferredEndpoint(clusterNode *n);
|
||||
long long clusterNodeReplOffset(clusterNode *node);
|
||||
clusterNode *clusterLookupNode(const char *name, int length);
|
||||
const char *clusterGetSecret(size_t *len);
|
||||
|
||||
/* functions with shared implementations */
|
||||
clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, uint64_t cmd_flags, int *error_code);
|
||||
|
|
|
@ -1030,6 +1030,8 @@ void clusterInit(void) {
|
|||
clusterUpdateMyselfIp();
|
||||
clusterUpdateMyselfHostname();
|
||||
clusterUpdateMyselfHumanNodename();
|
||||
|
||||
getRandomHexChars(server.cluster->internal_secret, CLUSTER_INTERNALSECRETLEN);
|
||||
}
|
||||
|
||||
void clusterInitLast(void) {
|
||||
|
@ -1579,6 +1581,14 @@ clusterNode *clusterLookupNode(const char *name, int length) {
|
|||
return dictGetVal(de);
|
||||
}
|
||||
|
||||
const char *clusterGetSecret(size_t *len) {
|
||||
if (!server.cluster) {
|
||||
return NULL;
|
||||
}
|
||||
*len = CLUSTER_INTERNALSECRETLEN;
|
||||
return server.cluster->internal_secret;
|
||||
}
|
||||
|
||||
/* Get all the nodes in my shard.
|
||||
* Note that the list returned is not computed on the fly
|
||||
* via slaveof; rather, it is maintained permanently to
|
||||
|
@ -2503,6 +2513,10 @@ uint32_t getShardIdPingExtSize(void) {
|
|||
return getAlignedPingExtSize(sizeof(clusterMsgPingExtShardId));
|
||||
}
|
||||
|
||||
uint32_t getInternalSecretPingExtSize(void) {
|
||||
return getAlignedPingExtSize(sizeof(clusterMsgPingExtInternalSecret));
|
||||
}
|
||||
|
||||
uint32_t getForgottenNodeExtSize(void) {
|
||||
return getAlignedPingExtSize(sizeof(clusterMsgPingExtForgottenNode));
|
||||
}
|
||||
|
@ -2594,6 +2608,17 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) {
|
|||
totlen += getShardIdPingExtSize();
|
||||
extensions++;
|
||||
|
||||
/* Populate insternal secret */
|
||||
if (cursor != NULL) {
|
||||
clusterMsgPingExtInternalSecret *ext = preparePingExt(cursor, CLUSTERMSG_EXT_TYPE_INTERNALSECRET, getInternalSecretPingExtSize());
|
||||
memcpy(ext->internal_secret, server.cluster->internal_secret, CLUSTER_INTERNALSECRETLEN);
|
||||
|
||||
/* Move the write cursor */
|
||||
cursor = nextPingExt(cursor);
|
||||
}
|
||||
totlen += getInternalSecretPingExtSize();
|
||||
extensions++;
|
||||
|
||||
if (hdr != NULL) {
|
||||
hdr->extensions = htons(extensions);
|
||||
}
|
||||
|
@ -2634,9 +2659,14 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) {
|
|||
} else if (type == CLUSTERMSG_EXT_TYPE_SHARDID) {
|
||||
clusterMsgPingExtShardId *shardid_ext = (clusterMsgPingExtShardId *) &(ext->ext[0].shard_id);
|
||||
ext_shardid = shardid_ext->shard_id;
|
||||
} else if (type == CLUSTERMSG_EXT_TYPE_INTERNALSECRET) {
|
||||
clusterMsgPingExtInternalSecret *internal_secret_ext = (clusterMsgPingExtInternalSecret *) &(ext->ext[0].internal_secret);
|
||||
if (memcmp(server.cluster->internal_secret, internal_secret_ext->internal_secret, CLUSTER_INTERNALSECRETLEN) > 0 ) {
|
||||
memcpy(server.cluster->internal_secret, internal_secret_ext->internal_secret, CLUSTER_INTERNALSECRETLEN);
|
||||
}
|
||||
} else {
|
||||
/* Unknown type, we will ignore it but log what happened. */
|
||||
serverLog(LL_WARNING, "Received unknown extension type %d", type);
|
||||
serverLog(LL_VERBOSE, "Received unknown extension type %d", type);
|
||||
}
|
||||
|
||||
/* We know this will be valid since we validated it ahead of time */
|
||||
|
|
|
@ -148,10 +148,12 @@ typedef enum {
|
|||
CLUSTERMSG_EXT_TYPE_HUMAN_NODENAME,
|
||||
CLUSTERMSG_EXT_TYPE_FORGOTTEN_NODE,
|
||||
CLUSTERMSG_EXT_TYPE_SHARDID,
|
||||
CLUSTERMSG_EXT_TYPE_INTERNALSECRET,
|
||||
} clusterMsgPingtypes;
|
||||
|
||||
/* Helper function for making sure extensions are eight byte aligned. */
|
||||
#define EIGHT_BYTE_ALIGN(size) ((((size) + 7) / 8) * 8)
|
||||
#define CLUSTER_INTERNALSECRETLEN 40 /* sha1 hex length */
|
||||
|
||||
typedef struct {
|
||||
char hostname[1]; /* The announced hostname, ends with \0. */
|
||||
|
@ -172,6 +174,10 @@ typedef struct {
|
|||
char shard_id[CLUSTER_NAMELEN]; /* The shard_id, 40 bytes fixed. */
|
||||
} clusterMsgPingExtShardId;
|
||||
|
||||
typedef struct {
|
||||
char internal_secret[CLUSTER_INTERNALSECRETLEN]; /* Current shard internal secret */
|
||||
} clusterMsgPingExtInternalSecret;
|
||||
|
||||
typedef struct {
|
||||
uint32_t length; /* Total length of this extension message (including this header) */
|
||||
uint16_t type; /* Type of this extension message (see clusterMsgPingExtTypes) */
|
||||
|
@ -181,6 +187,7 @@ typedef struct {
|
|||
clusterMsgPingExtHumanNodename human_nodename;
|
||||
clusterMsgPingExtForgottenNode forgotten_node;
|
||||
clusterMsgPingExtShardId shard_id;
|
||||
clusterMsgPingExtInternalSecret internal_secret;
|
||||
} ext[]; /* Actual extension information, formatted so that the data is 8
|
||||
* byte aligned, regardless of its content. */
|
||||
} clusterMsgPingExt;
|
||||
|
@ -333,6 +340,7 @@ struct clusterState {
|
|||
clusterNode *migrating_slots_to[CLUSTER_SLOTS];
|
||||
clusterNode *importing_slots_from[CLUSTER_SLOTS];
|
||||
clusterNode *slots[CLUSTER_SLOTS];
|
||||
char internal_secret[CLUSTER_INTERNALSECRETLEN];
|
||||
/* The following fields are used to take the slave state on elections. */
|
||||
mstime_t failover_auth_time; /* Time of previous or next election. */
|
||||
int failover_auth_count; /* Number of votes received so far. */
|
||||
|
|
143
src/commands.def
143
src/commands.def
|
@ -533,7 +533,9 @@ struct COMMAND_ARG CLUSTER_FAILOVER_Args[] = {
|
|||
|
||||
#ifndef SKIP_CMD_HISTORY_TABLE
|
||||
/* CLUSTER FORGET history */
|
||||
#define CLUSTER_FORGET_History NULL
|
||||
commandHistory CLUSTER_FORGET_History[] = {
|
||||
{"7.2.0","Forgotten nodes are automatically propagated across the cluster via gossip."},
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_TIPS_TABLE
|
||||
|
@ -954,7 +956,7 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = {
|
|||
{MAKE_CMD("delslotsrange","Sets hash slot ranges as unbound for a node.","O(N) where N is the total number of the slots between the start slot and end slot arguments.","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_DELSLOTSRANGE_History,0,CLUSTER_DELSLOTSRANGE_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_DELSLOTSRANGE_Keyspecs,0,NULL,1),.args=CLUSTER_DELSLOTSRANGE_Args},
|
||||
{MAKE_CMD("failover","Forces a replica to perform a manual failover of its master.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FAILOVER_History,0,CLUSTER_FAILOVER_Tips,0,clusterCommand,-2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FAILOVER_Keyspecs,0,NULL,1),.args=CLUSTER_FAILOVER_Args},
|
||||
{MAKE_CMD("flushslots","Deletes all slots information from a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FLUSHSLOTS_History,0,CLUSTER_FLUSHSLOTS_Tips,0,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FLUSHSLOTS_Keyspecs,0,NULL,0)},
|
||||
{MAKE_CMD("forget","Removes a node from the nodes table.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FORGET_History,0,CLUSTER_FORGET_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FORGET_Keyspecs,0,NULL,1),.args=CLUSTER_FORGET_Args},
|
||||
{MAKE_CMD("forget","Removes a node from the nodes table.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FORGET_History,1,CLUSTER_FORGET_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FORGET_Keyspecs,0,NULL,1),.args=CLUSTER_FORGET_Args},
|
||||
{MAKE_CMD("getkeysinslot","Returns the key names in a hash slot.","O(N) where N is the number of requested keys","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_GETKEYSINSLOT_History,0,CLUSTER_GETKEYSINSLOT_Tips,1,clusterCommand,4,CMD_STALE,0,CLUSTER_GETKEYSINSLOT_Keyspecs,0,NULL,2),.args=CLUSTER_GETKEYSINSLOT_Args},
|
||||
{MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_HELP_History,0,CLUSTER_HELP_Tips,0,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_HELP_Keyspecs,0,NULL,0)},
|
||||
{MAKE_CMD("info","Returns information about the state of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_INFO_History,0,CLUSTER_INFO_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_INFO_Keyspecs,0,NULL,0)},
|
||||
|
@ -3470,6 +3472,78 @@ struct COMMAND_ARG HGETALL_Args[] = {
|
|||
{MAKE_ARG("key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
};
|
||||
|
||||
/********** HGETDEL ********************/
|
||||
|
||||
#ifndef SKIP_CMD_HISTORY_TABLE
|
||||
/* HGETDEL history */
|
||||
#define HGETDEL_History NULL
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_TIPS_TABLE
|
||||
/* HGETDEL tips */
|
||||
#define HGETDEL_Tips NULL
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_KEY_SPECS_TABLE
|
||||
/* HGETDEL key specs */
|
||||
keySpec HGETDEL_Keyspecs[1] = {
|
||||
{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}
|
||||
};
|
||||
#endif
|
||||
|
||||
/* HGETDEL fields argument table */
|
||||
struct COMMAND_ARG HGETDEL_fields_Subargs[] = {
|
||||
{MAKE_ARG("numfields",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("field",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,0,NULL)},
|
||||
};
|
||||
|
||||
/* HGETDEL argument table */
|
||||
struct COMMAND_ARG HGETDEL_Args[] = {
|
||||
{MAKE_ARG("key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("fields",ARG_TYPE_BLOCK,-1,"FIELDS",NULL,NULL,CMD_ARG_NONE,2,NULL),.subargs=HGETDEL_fields_Subargs},
|
||||
};
|
||||
|
||||
/********** HGETEX ********************/
|
||||
|
||||
#ifndef SKIP_CMD_HISTORY_TABLE
|
||||
/* HGETEX history */
|
||||
#define HGETEX_History NULL
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_TIPS_TABLE
|
||||
/* HGETEX tips */
|
||||
#define HGETEX_Tips NULL
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_KEY_SPECS_TABLE
|
||||
/* HGETEX key specs */
|
||||
keySpec HGETEX_Keyspecs[1] = {
|
||||
{"RW and UPDATE because it changes the TTL",CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}
|
||||
};
|
||||
#endif
|
||||
|
||||
/* HGETEX expiration argument table */
|
||||
struct COMMAND_ARG HGETEX_expiration_Subargs[] = {
|
||||
{MAKE_ARG("seconds",ARG_TYPE_INTEGER,-1,"EX",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("milliseconds",ARG_TYPE_INTEGER,-1,"PX",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("unix-time-seconds",ARG_TYPE_UNIX_TIME,-1,"EXAT",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("unix-time-milliseconds",ARG_TYPE_UNIX_TIME,-1,"PXAT",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("persist",ARG_TYPE_PURE_TOKEN,-1,"PERSIST",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
};
|
||||
|
||||
/* HGETEX fields argument table */
|
||||
struct COMMAND_ARG HGETEX_fields_Subargs[] = {
|
||||
{MAKE_ARG("numfields",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("field",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,0,NULL)},
|
||||
};
|
||||
|
||||
/* HGETEX argument table */
|
||||
struct COMMAND_ARG HGETEX_Args[] = {
|
||||
{MAKE_ARG("key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("expiration",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,5,NULL),.subargs=HGETEX_expiration_Subargs},
|
||||
{MAKE_ARG("fields",ARG_TYPE_BLOCK,-1,"FIELDS",NULL,NULL,CMD_ARG_NONE,2,NULL),.subargs=HGETEX_fields_Subargs},
|
||||
};
|
||||
|
||||
/********** HINCRBY ********************/
|
||||
|
||||
#ifndef SKIP_CMD_HISTORY_TABLE
|
||||
|
@ -3901,6 +3975,60 @@ struct COMMAND_ARG HSET_Args[] = {
|
|||
{MAKE_ARG("data",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,2,NULL),.subargs=HSET_data_Subargs},
|
||||
};
|
||||
|
||||
/********** HSETEX ********************/
|
||||
|
||||
#ifndef SKIP_CMD_HISTORY_TABLE
|
||||
/* HSETEX history */
|
||||
#define HSETEX_History NULL
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_TIPS_TABLE
|
||||
/* HSETEX tips */
|
||||
#define HSETEX_Tips NULL
|
||||
#endif
|
||||
|
||||
#ifndef SKIP_CMD_KEY_SPECS_TABLE
|
||||
/* HSETEX key specs */
|
||||
keySpec HSETEX_Keyspecs[1] = {
|
||||
{NULL,CMD_KEY_RW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}
|
||||
};
|
||||
#endif
|
||||
|
||||
/* HSETEX condition argument table */
|
||||
struct COMMAND_ARG HSETEX_condition_Subargs[] = {
|
||||
{MAKE_ARG("fnx",ARG_TYPE_PURE_TOKEN,-1,"FNX",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("fxx",ARG_TYPE_PURE_TOKEN,-1,"FXX",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
};
|
||||
|
||||
/* HSETEX expiration argument table */
|
||||
struct COMMAND_ARG HSETEX_expiration_Subargs[] = {
|
||||
{MAKE_ARG("seconds",ARG_TYPE_INTEGER,-1,"EX",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("milliseconds",ARG_TYPE_INTEGER,-1,"PX",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("unix-time-seconds",ARG_TYPE_UNIX_TIME,-1,"EXAT",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("unix-time-milliseconds",ARG_TYPE_UNIX_TIME,-1,"PXAT",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("keepttl",ARG_TYPE_PURE_TOKEN,-1,"KEEPTTL",NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
};
|
||||
|
||||
/* HSETEX fields data argument table */
|
||||
struct COMMAND_ARG HSETEX_fields_data_Subargs[] = {
|
||||
{MAKE_ARG("field",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
};
|
||||
|
||||
/* HSETEX fields argument table */
|
||||
struct COMMAND_ARG HSETEX_fields_Subargs[] = {
|
||||
{MAKE_ARG("numfields",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("data",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,2,NULL),.subargs=HSETEX_fields_data_Subargs},
|
||||
};
|
||||
|
||||
/* HSETEX argument table */
|
||||
struct COMMAND_ARG HSETEX_Args[] = {
|
||||
{MAKE_ARG("key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
|
||||
{MAKE_ARG("condition",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=HSETEX_condition_Subargs},
|
||||
{MAKE_ARG("expiration",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,5,NULL),.subargs=HSETEX_expiration_Subargs},
|
||||
{MAKE_ARG("fields",ARG_TYPE_BLOCK,-1,"FIELDS",NULL,NULL,CMD_ARG_NONE,2,NULL),.subargs=HSETEX_fields_Subargs},
|
||||
};
|
||||
|
||||
/********** HSETNX ********************/
|
||||
|
||||
#ifndef SKIP_CMD_HISTORY_TABLE
|
||||
|
@ -11036,11 +11164,13 @@ struct COMMAND_STRUCT redisCommandTable[] = {
|
|||
/* hash */
|
||||
{MAKE_CMD("hdel","Deletes one or more fields and their values from a hash. Deletes the hash if no fields remain.","O(N) where N is the number of fields to be removed.","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HDEL_History,1,HDEL_Tips,0,hdelCommand,-3,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HDEL_Keyspecs,1,NULL,2),.args=HDEL_Args},
|
||||
{MAKE_CMD("hexists","Determines whether a field exists in a hash.","O(1)","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HEXISTS_History,0,HEXISTS_Tips,0,hexistsCommand,3,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HEXISTS_Keyspecs,1,NULL,2),.args=HEXISTS_Args},
|
||||
{MAKE_CMD("hexpire","Set expiry for hash field using relative time to expire (seconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HEXPIRE_History,0,HEXPIRE_Tips,0,hexpireCommand,-6,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HEXPIRE_Keyspecs,1,NULL,4),.args=HEXPIRE_Args},
|
||||
{MAKE_CMD("hexpireat","Set expiry for hash field using an absolute Unix timestamp (seconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HEXPIREAT_History,0,HEXPIREAT_Tips,0,hexpireatCommand,-6,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HEXPIREAT_Keyspecs,1,NULL,4),.args=HEXPIREAT_Args},
|
||||
{MAKE_CMD("hexpire","Set expiry for hash field using relative time to expire (seconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HEXPIRE_History,0,HEXPIRE_Tips,0,hexpireCommand,-6,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HEXPIRE_Keyspecs,1,NULL,4),.args=HEXPIRE_Args},
|
||||
{MAKE_CMD("hexpireat","Set expiry for hash field using an absolute Unix timestamp (seconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HEXPIREAT_History,0,HEXPIREAT_Tips,0,hexpireatCommand,-6,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HEXPIREAT_Keyspecs,1,NULL,4),.args=HEXPIREAT_Args},
|
||||
{MAKE_CMD("hexpiretime","Returns the expiration time of a hash field as a Unix timestamp, in seconds.","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HEXPIRETIME_History,0,HEXPIRETIME_Tips,0,hexpiretimeCommand,-5,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HEXPIRETIME_Keyspecs,1,NULL,2),.args=HEXPIRETIME_Args},
|
||||
{MAKE_CMD("hget","Returns the value of a field in a hash.","O(1)","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HGET_History,0,HGET_Tips,0,hgetCommand,3,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HGET_Keyspecs,1,NULL,2),.args=HGET_Args},
|
||||
{MAKE_CMD("hgetall","Returns all fields and values in a hash.","O(N) where N is the size of the hash.","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HGETALL_History,0,HGETALL_Tips,1,hgetallCommand,2,CMD_READONLY,ACL_CATEGORY_HASH,HGETALL_Keyspecs,1,NULL,1),.args=HGETALL_Args},
|
||||
{MAKE_CMD("hgetdel","Returns the value of a field and deletes it from the hash.","O(N) where N is the number of specified fields","8.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HGETDEL_History,0,HGETDEL_Tips,0,hgetdelCommand,-5,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HGETDEL_Keyspecs,1,NULL,2),.args=HGETDEL_Args},
|
||||
{MAKE_CMD("hgetex","Get the value of one or more fields of a given hash key, and optionally set their expiration.","O(N) where N is the number of specified fields","8.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HGETEX_History,0,HGETEX_Tips,0,hgetexCommand,-5,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HGETEX_Keyspecs,1,NULL,3),.args=HGETEX_Args},
|
||||
{MAKE_CMD("hincrby","Increments the integer value of a field in a hash by a number. Uses 0 as initial value if the field doesn't exist.","O(1)","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HINCRBY_History,0,HINCRBY_Tips,0,hincrbyCommand,4,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HINCRBY_Keyspecs,1,NULL,3),.args=HINCRBY_Args},
|
||||
{MAKE_CMD("hincrbyfloat","Increments the floating point value of a field by a number. Uses 0 as initial value if the field doesn't exist.","O(1)","2.6.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HINCRBYFLOAT_History,0,HINCRBYFLOAT_Tips,0,hincrbyfloatCommand,4,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HINCRBYFLOAT_Keyspecs,1,NULL,3),.args=HINCRBYFLOAT_Args},
|
||||
{MAKE_CMD("hkeys","Returns all fields in a hash.","O(N) where N is the size of the hash.","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HKEYS_History,0,HKEYS_Tips,1,hkeysCommand,2,CMD_READONLY,ACL_CATEGORY_HASH,HKEYS_Keyspecs,1,NULL,1),.args=HKEYS_Args},
|
||||
|
@ -11048,13 +11178,14 @@ struct COMMAND_STRUCT redisCommandTable[] = {
|
|||
{MAKE_CMD("hmget","Returns the values of all fields in a hash.","O(N) where N is the number of fields being requested.","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HMGET_History,0,HMGET_Tips,0,hmgetCommand,-3,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HMGET_Keyspecs,1,NULL,2),.args=HMGET_Args},
|
||||
{MAKE_CMD("hmset","Sets the values of multiple fields.","O(N) where N is the number of fields being set.","2.0.0",CMD_DOC_DEPRECATED,"`HSET` with multiple field-value pairs","4.0.0","hash",COMMAND_GROUP_HASH,HMSET_History,0,HMSET_Tips,0,hsetCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HMSET_Keyspecs,1,NULL,2),.args=HMSET_Args},
|
||||
{MAKE_CMD("hpersist","Removes the expiration time for each specified field","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPERSIST_History,0,HPERSIST_Tips,0,hpersistCommand,-5,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HPERSIST_Keyspecs,1,NULL,2),.args=HPERSIST_Args},
|
||||
{MAKE_CMD("hpexpire","Set expiry for hash field using relative time to expire (milliseconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPEXPIRE_History,0,HPEXPIRE_Tips,0,hpexpireCommand,-6,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HPEXPIRE_Keyspecs,1,NULL,4),.args=HPEXPIRE_Args},
|
||||
{MAKE_CMD("hpexpireat","Set expiry for hash field using an absolute Unix timestamp (milliseconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPEXPIREAT_History,0,HPEXPIREAT_Tips,0,hpexpireatCommand,-6,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HPEXPIREAT_Keyspecs,1,NULL,4),.args=HPEXPIREAT_Args},
|
||||
{MAKE_CMD("hpexpire","Set expiry for hash field using relative time to expire (milliseconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPEXPIRE_History,0,HPEXPIRE_Tips,0,hpexpireCommand,-6,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HPEXPIRE_Keyspecs,1,NULL,4),.args=HPEXPIRE_Args},
|
||||
{MAKE_CMD("hpexpireat","Set expiry for hash field using an absolute Unix timestamp (milliseconds)","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPEXPIREAT_History,0,HPEXPIREAT_Tips,0,hpexpireatCommand,-6,CMD_WRITE|CMD_FAST,ACL_CATEGORY_HASH,HPEXPIREAT_Keyspecs,1,NULL,4),.args=HPEXPIREAT_Args},
|
||||
{MAKE_CMD("hpexpiretime","Returns the expiration time of a hash field as a Unix timestamp, in msec.","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPEXPIRETIME_History,0,HPEXPIRETIME_Tips,0,hpexpiretimeCommand,-5,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HPEXPIRETIME_Keyspecs,1,NULL,2),.args=HPEXPIRETIME_Args},
|
||||
{MAKE_CMD("hpttl","Returns the TTL in milliseconds of a hash field.","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HPTTL_History,0,HPTTL_Tips,1,hpttlCommand,-5,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HPTTL_Keyspecs,1,NULL,2),.args=HPTTL_Args},
|
||||
{MAKE_CMD("hrandfield","Returns one or more random fields from a hash.","O(N) where N is the number of fields returned","6.2.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HRANDFIELD_History,0,HRANDFIELD_Tips,1,hrandfieldCommand,-2,CMD_READONLY,ACL_CATEGORY_HASH,HRANDFIELD_Keyspecs,1,NULL,2),.args=HRANDFIELD_Args},
|
||||
{MAKE_CMD("hscan","Iterates over fields and values of a hash.","O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.","2.8.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HSCAN_History,0,HSCAN_Tips,1,hscanCommand,-3,CMD_READONLY,ACL_CATEGORY_HASH,HSCAN_Keyspecs,1,NULL,5),.args=HSCAN_Args},
|
||||
{MAKE_CMD("hset","Creates or modifies the value of a field in a hash.","O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs.","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HSET_History,1,HSET_Tips,0,hsetCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HSET_Keyspecs,1,NULL,2),.args=HSET_Args},
|
||||
{MAKE_CMD("hsetex","Set the value of one or more fields of a given hash key, and optionally set their expiration.","O(N) where N is the number of fields being set.","8.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HSETEX_History,0,HSETEX_Tips,0,hsetexCommand,-6,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HSETEX_Keyspecs,1,NULL,4),.args=HSETEX_Args},
|
||||
{MAKE_CMD("hsetnx","Sets the value of a field in a hash only when the field doesn't exist.","O(1)","2.0.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HSETNX_History,0,HSETNX_Tips,0,hsetnxCommand,4,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_HASH,HSETNX_Keyspecs,1,NULL,3),.args=HSETNX_Args},
|
||||
{MAKE_CMD("hstrlen","Returns the length of the value of a field.","O(1)","3.2.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HSTRLEN_History,0,HSTRLEN_Tips,0,hstrlenCommand,3,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HSTRLEN_Keyspecs,1,NULL,2),.args=HSTRLEN_Args},
|
||||
{MAKE_CMD("httl","Returns the TTL in seconds of a hash field.","O(N) where N is the number of specified fields","7.4.0",CMD_DOC_NONE,NULL,NULL,"hash",COMMAND_GROUP_HASH,HTTL_History,0,HTTL_Tips,1,httlCommand,-5,CMD_READONLY|CMD_FAST,ACL_CATEGORY_HASH,HTTL_Keyspecs,1,NULL,2),.args=HTTL_Args},
|
||||
|
|
|
@ -7,6 +7,12 @@
|
|||
"arity": 3,
|
||||
"container": "CLUSTER",
|
||||
"function": "clusterCommand",
|
||||
"history": [
|
||||
[
|
||||
"7.2.0",
|
||||
"Forgotten nodes are automatically propagated across the cluster via gossip."
|
||||
]
|
||||
],
|
||||
"command_flags": [
|
||||
"NO_ASYNC_LOADING",
|
||||
"ADMIN",
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"history": [],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"DENYOOM",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"history": [],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"DENYOOM",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
{
|
||||
"HGETDEL": {
|
||||
"summary": "Returns the value of a field and deletes it from the hash.",
|
||||
"complexity": "O(N) where N is the number of specified fields",
|
||||
"group": "hash",
|
||||
"since": "8.0.0",
|
||||
"arity": -5,
|
||||
"function": "hgetdelCommand",
|
||||
"history": [],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
"HASH"
|
||||
],
|
||||
"key_specs": [
|
||||
{
|
||||
"flags": [
|
||||
"RW",
|
||||
"ACCESS",
|
||||
"DELETE"
|
||||
],
|
||||
"begin_search": {
|
||||
"index": {
|
||||
"pos": 1
|
||||
}
|
||||
},
|
||||
"find_keys": {
|
||||
"range": {
|
||||
"lastkey": 0,
|
||||
"step": 1,
|
||||
"limit": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"reply_schema": {
|
||||
"description": "List of values associated with the given fields, in the same order as they are requested.",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"arguments": [
|
||||
{
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0
|
||||
},
|
||||
{
|
||||
"name": "fields",
|
||||
"token": "FIELDS",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "numfields",
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "field",
|
||||
"type": "string",
|
||||
"multiple": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
{
|
||||
"HGETEX": {
|
||||
"summary": "Get the value of one or more fields of a given hash key, and optionally set their expiration.",
|
||||
"complexity": "O(N) where N is the number of specified fields",
|
||||
"group": "hash",
|
||||
"since": "8.0.0",
|
||||
"arity": -5,
|
||||
"function": "hgetexCommand",
|
||||
"history": [],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
"HASH"
|
||||
],
|
||||
"key_specs": [
|
||||
{
|
||||
"notes": "RW and UPDATE because it changes the TTL",
|
||||
"flags": [
|
||||
"RW",
|
||||
"ACCESS",
|
||||
"UPDATE"
|
||||
],
|
||||
"begin_search": {
|
||||
"index": {
|
||||
"pos": 1
|
||||
}
|
||||
},
|
||||
"find_keys": {
|
||||
"range": {
|
||||
"lastkey": 0,
|
||||
"step": 1,
|
||||
"limit": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"reply_schema": {
|
||||
"description": "List of values associated with the given fields, in the same order as they are requested.",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"arguments": [
|
||||
{
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0
|
||||
},
|
||||
{
|
||||
"name": "expiration",
|
||||
"type": "oneof",
|
||||
"optional": true,
|
||||
"arguments": [
|
||||
{
|
||||
"name": "seconds",
|
||||
"type": "integer",
|
||||
"token": "EX"
|
||||
},
|
||||
{
|
||||
"name": "milliseconds",
|
||||
"type": "integer",
|
||||
"token": "PX"
|
||||
},
|
||||
{
|
||||
"name": "unix-time-seconds",
|
||||
"type": "unix-time",
|
||||
"token": "EXAT"
|
||||
},
|
||||
{
|
||||
"name": "unix-time-milliseconds",
|
||||
"type": "unix-time",
|
||||
"token": "PXAT"
|
||||
},
|
||||
{
|
||||
"name": "persist",
|
||||
"type": "pure-token",
|
||||
"token": "PERSIST"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "fields",
|
||||
"token": "FIELDS",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "numfields",
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "field",
|
||||
"type": "string",
|
||||
"multiple": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -9,7 +9,6 @@
|
|||
"history": [],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"DENYOOM",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"history": [],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"DENYOOM",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
{
|
||||
"HSETEX": {
|
||||
"summary": "Set the value of one or more fields of a given hash key, and optionally set their expiration.",
|
||||
"complexity": "O(N) where N is the number of fields being set.",
|
||||
"group": "hash",
|
||||
"since": "8.0.0",
|
||||
"arity": -6,
|
||||
"function": "hsetexCommand",
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"DENYOOM",
|
||||
"FAST"
|
||||
],
|
||||
"acl_categories": [
|
||||
"HASH"
|
||||
],
|
||||
"key_specs": [
|
||||
{
|
||||
"flags": [
|
||||
"RW",
|
||||
"UPDATE"
|
||||
],
|
||||
"begin_search": {
|
||||
"index": {
|
||||
"pos": 1
|
||||
}
|
||||
},
|
||||
"find_keys": {
|
||||
"range": {
|
||||
"lastkey": 0,
|
||||
"step": 1,
|
||||
"limit": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"reply_schema": {
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "No field was set (due to FXX or FNX flags).",
|
||||
"const": 0
|
||||
},
|
||||
{
|
||||
"description": "All the fields were set.",
|
||||
"const": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"arguments": [
|
||||
{
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0
|
||||
},
|
||||
{
|
||||
"name": "condition",
|
||||
"type": "oneof",
|
||||
"optional": true,
|
||||
"arguments": [
|
||||
{
|
||||
"name": "fnx",
|
||||
"type": "pure-token",
|
||||
"token": "FNX"
|
||||
},
|
||||
{
|
||||
"name": "fxx",
|
||||
"type": "pure-token",
|
||||
"token": "FXX"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "expiration",
|
||||
"type": "oneof",
|
||||
"optional": true,
|
||||
"arguments": [
|
||||
{
|
||||
"name": "seconds",
|
||||
"type": "integer",
|
||||
"token": "EX"
|
||||
},
|
||||
{
|
||||
"name": "milliseconds",
|
||||
"type": "integer",
|
||||
"token": "PX"
|
||||
},
|
||||
{
|
||||
"name": "unix-time-seconds",
|
||||
"type": "unix-time",
|
||||
"token": "EXAT"
|
||||
},
|
||||
{
|
||||
"name": "unix-time-milliseconds",
|
||||
"type": "unix-time",
|
||||
"token": "PXAT"
|
||||
},
|
||||
{
|
||||
"name": "keepttl",
|
||||
"type": "pure-token",
|
||||
"token": "KEEPTTL"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "fields",
|
||||
"token": "FIELDS",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "numfields",
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"type": "block",
|
||||
"multiple": true,
|
||||
"arguments": [
|
||||
{
|
||||
"name": "field",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "value",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -29,6 +29,9 @@
|
|||
"replication.backlog": {
|
||||
"type": "integer"
|
||||
},
|
||||
"replica.fullsync.buffer": {
|
||||
"type": "integer"
|
||||
},
|
||||
"clients.slaves": {
|
||||
"type": "integer"
|
||||
},
|
||||
|
|
22
src/db.c
22
src/db.c
|
@ -794,7 +794,10 @@ void flushallSyncBgDone(uint64_t client_id, void *sflush) {
|
|||
client *c = lookupClientByID(client_id);
|
||||
|
||||
/* Verify that client still exists */
|
||||
if (!c) return;
|
||||
if (!c) {
|
||||
zfree(sflush);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Update current_client (Called functions might rely on it) */
|
||||
client *old_client = server.current_client;
|
||||
|
@ -1057,10 +1060,11 @@ void scanCallback(void *privdata, const dictEntry *de) {
|
|||
serverAssert(!((data->type != LLONG_MAX) && o));
|
||||
|
||||
/* Filter an element if it isn't the type we want. */
|
||||
/* TODO: uncomment in redis 8.0
|
||||
if (!o && data->type != LLONG_MAX) {
|
||||
robj *rval = dictGetVal(de);
|
||||
if (!objectTypeCompare(rval, data->type)) return;
|
||||
}
|
||||
}*/
|
||||
|
||||
/* Filter element if it does not match the pattern. */
|
||||
void *keyStr = dictGetKey(de);
|
||||
|
@ -1207,8 +1211,9 @@ void scanGenericCommand(client *c, robj *o, unsigned long long cursor) {
|
|||
typename = c->argv[i+1]->ptr;
|
||||
type = getObjectTypeByName(typename);
|
||||
if (type == LLONG_MAX) {
|
||||
/* TODO: uncomment in redis 8.0
|
||||
addReplyErrorFormat(c, "unknown type name '%s'", typename);
|
||||
return;
|
||||
return; */
|
||||
}
|
||||
i+= 2;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "novalues")) {
|
||||
|
@ -1454,7 +1459,16 @@ void scanGenericCommand(client *c, robj *o, unsigned long long cursor) {
|
|||
while ((ln = listNext(&li))) {
|
||||
sds key = listNodeValue(ln);
|
||||
initStaticStringObject(kobj, key);
|
||||
if (expireIfNeeded(c->db, &kobj, 0)) {
|
||||
/* Filter an element if it isn't the type we want. */
|
||||
/* TODO: remove this in redis 8.0 */
|
||||
if (typename) {
|
||||
robj* typecheck = lookupKeyReadWithFlags(c->db, &kobj, LOOKUP_NOTOUCH|LOOKUP_NONOTIFY);
|
||||
if (!typecheck || !objectTypeCompare(typecheck, type)) {
|
||||
listDelNode(keys, ln);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (expireIfNeeded(c->db, &kobj, 0) != KEY_VALID) {
|
||||
listDelNode(keys, ln);
|
||||
}
|
||||
}
|
||||
|
|
24
src/debug.c
24
src/debug.c
|
@ -397,6 +397,8 @@ void debugCommand(client *c) {
|
|||
" Hard crash and restart after a <milliseconds> delay (default 0).",
|
||||
"DIGEST",
|
||||
" Output a hex signature representing the current DB content.",
|
||||
"INTERNAL_SECRET",
|
||||
" Return the cluster internal secret (hashed with crc16) or error if not in cluster mode.",
|
||||
"DIGEST-VALUE <key> [<key> ...]",
|
||||
" Output a hex signature of the values of all the specified keys.",
|
||||
"ERROR <string>",
|
||||
|
@ -492,6 +494,8 @@ void debugCommand(client *c) {
|
|||
" Enable or disable the main dict and expire dict resizing.",
|
||||
"SCRIPT <LIST|<sha>>",
|
||||
" Output SHA and content of all scripts or of a specific script with its SHA.",
|
||||
"MARK-INTERNAL-CLIENT [UNMARK]",
|
||||
" Promote the current connection to an internal connection.",
|
||||
NULL
|
||||
};
|
||||
addExtendedReplyHelp(c, help, clusterDebugCommandExtendedHelp());
|
||||
|
@ -759,6 +763,15 @@ NULL
|
|||
for (int i = 0; i < 20; i++) d = sdscatprintf(d, "%02x",digest[i]);
|
||||
addReplyStatus(c,d);
|
||||
sdsfree(d);
|
||||
} else if (!strcasecmp(c->argv[1]->ptr,"internal_secret") && c->argc == 2) {
|
||||
size_t len;
|
||||
const char *internal_secret = clusterGetSecret(&len);
|
||||
if (!internal_secret) {
|
||||
addReplyError(c, "Internal secret is missing");
|
||||
} else {
|
||||
uint16_t hash = crc16(internal_secret, len);
|
||||
addReplyLongLong(c, hash);
|
||||
}
|
||||
} else if (!strcasecmp(c->argv[1]->ptr,"digest-value") && c->argc >= 2) {
|
||||
/* DEBUG DIGEST-VALUE key key key ... key. */
|
||||
addReplyArrayLen(c,c->argc-2);
|
||||
|
@ -1063,6 +1076,17 @@ NULL
|
|||
return;
|
||||
}
|
||||
addReply(c,shared.ok);
|
||||
} else if(!strcasecmp(c->argv[1]->ptr,"mark-internal-client") && c->argc < 4) {
|
||||
if (c->argc == 2) {
|
||||
c->flags |= CLIENT_INTERNAL;
|
||||
addReply(c, shared.ok);
|
||||
} else if (c->argc == 3 && !strcasecmp(c->argv[2]->ptr, "unmark")) {
|
||||
c->flags &= ~CLIENT_INTERNAL;
|
||||
addReply(c, shared.ok);
|
||||
} else {
|
||||
addReplySubcommandSyntaxError(c);
|
||||
return;
|
||||
}
|
||||
} else if(!handleDebugClusterCommand(c)) {
|
||||
addReplySubcommandSyntaxError(c);
|
||||
return;
|
||||
|
|
989
src/defrag.c
989
src/defrag.c
File diff suppressed because it is too large
Load Diff
|
@ -162,7 +162,7 @@ int evictionPoolPopulate(redisDb *db, kvstore *samplekvs, struct evictionPoolEnt
|
|||
idle = 255-LFUDecrAndReturn(o);
|
||||
} else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
|
||||
/* In this case the sooner the expire the better. */
|
||||
idle = ULLONG_MAX - (long)dictGetVal(de);
|
||||
idle = ULLONG_MAX - dictGetSignedIntegerVal(de);
|
||||
} else {
|
||||
serverPanic("Unknown eviction policy in evictionPoolPopulate()");
|
||||
}
|
||||
|
|
|
@ -12,9 +12,15 @@
|
|||
* Copyright (c) 2011-Present, Redis Ltd. and contributors.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2024-present, Valkey contributors.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Licensed under your choice of the Redis Source Available License 2.0
|
||||
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
|
||||
*
|
||||
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
|
||||
*/
|
||||
|
||||
#include "fmacros.h"
|
||||
|
||||
#include <string.h>
|
||||
|
@ -802,10 +808,14 @@ unsigned long kvstoreDictScanDefrag(kvstore *kvs, int didx, unsigned long v, dic
|
|||
* within dict, it only reallocates the memory used by the dict structure itself using
|
||||
* the provided allocation function. This feature was added for the active defrag feature.
|
||||
*
|
||||
* The 'defragfn' callback is called with a reference to the dict
|
||||
* that callback can reallocate. */
|
||||
void kvstoreDictLUTDefrag(kvstore *kvs, kvstoreDictLUTDefragFunction *defragfn) {
|
||||
for (int didx = 0; didx < kvs->num_dicts; didx++) {
|
||||
* With 16k dictionaries for cluster mode with 1 shard, this operation may require substantial time
|
||||
* to execute. A "cursor" is used to perform the operation iteratively. When first called, a
|
||||
* cursor value of 0 should be provided. The return value is an updated cursor which should be
|
||||
* provided on the next iteration. The operation is complete when 0 is returned.
|
||||
*
|
||||
* The 'defragfn' callback is called with a reference to the dict that callback can reallocate. */
|
||||
unsigned long kvstoreDictLUTDefrag(kvstore *kvs, unsigned long cursor, kvstoreDictLUTDefragFunction *defragfn) {
|
||||
for (int didx = cursor; didx < kvs->num_dicts; didx++) {
|
||||
dict **d = kvstoreGetDictRef(kvs, didx), *newd;
|
||||
if (!*d)
|
||||
continue;
|
||||
|
@ -818,7 +828,9 @@ void kvstoreDictLUTDefrag(kvstore *kvs, kvstoreDictLUTDefragFunction *defragfn)
|
|||
if (metadata->rehashing_node)
|
||||
metadata->rehashing_node->value = *d;
|
||||
}
|
||||
return (didx + 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t kvstoreGetHash(kvstore *kvs, const void *key)
|
||||
|
@ -1059,13 +1071,14 @@ int kvstoreTest(int argc, char **argv, int flags) {
|
|||
}
|
||||
|
||||
TEST("Verify that a rehashing dict's node in the rehashing list is correctly updated after defragmentation") {
|
||||
unsigned long cursor = 0;
|
||||
kvstore *kvs = kvstoreCreate(&KvstoreDictTestType, 0, KVSTORE_ALLOCATE_DICTS_ON_DEMAND);
|
||||
for (i = 0; i < 256; i++) {
|
||||
de = kvstoreDictAddRaw(kvs, 0, stringFromInt(i), NULL);
|
||||
if (listLength(kvs->rehashing)) break;
|
||||
}
|
||||
assert(listLength(kvs->rehashing));
|
||||
kvstoreDictLUTDefrag(kvs, defragLUTTestCallback);
|
||||
while ((cursor = kvstoreDictLUTDefrag(kvs, cursor, defragLUTTestCallback)) != 0) {}
|
||||
while (kvstoreIncrementallyRehash(kvs, 1000)) {}
|
||||
kvstoreRelease(kvs);
|
||||
}
|
||||
|
|
|
@ -1,3 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2009-Present, Redis Ltd.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2024-present, Valkey contributors.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Licensed under your choice of the Redis Source Available License 2.0
|
||||
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
|
||||
*
|
||||
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
|
||||
*/
|
||||
|
||||
#ifndef DICTARRAY_H_
|
||||
#define DICTARRAY_H_
|
||||
|
||||
|
@ -78,7 +91,7 @@ unsigned int kvstoreDictGetSomeKeys(kvstore *kvs, int didx, dictEntry **des, uns
|
|||
int kvstoreDictExpand(kvstore *kvs, int didx, unsigned long size);
|
||||
unsigned long kvstoreDictScanDefrag(kvstore *kvs, int didx, unsigned long v, dictScanFunction *fn, dictDefragFunctions *defragfns, void *privdata);
|
||||
typedef dict *(kvstoreDictLUTDefragFunction)(dict *d);
|
||||
void kvstoreDictLUTDefrag(kvstore *kvs, kvstoreDictLUTDefragFunction *defragfn);
|
||||
unsigned long kvstoreDictLUTDefrag(kvstore *kvs, unsigned long cursor, kvstoreDictLUTDefragFunction *defragfn);
|
||||
void *kvstoreDictFetchValue(kvstore *kvs, int didx, const void *key);
|
||||
dictEntry *kvstoreDictFind(kvstore *kvs, int didx, void *key);
|
||||
dictEntry *kvstoreDictAddRaw(kvstore *kvs, int didx, void *key, dictEntry **existing);
|
||||
|
|
221
src/module.c
221
src/module.c
|
@ -2,8 +2,13 @@
|
|||
* Copyright (c) 2016-Present, Redis Ltd.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2024-present, Valkey contributors.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Licensed under your choice of the Redis Source Available License 2.0
|
||||
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
|
||||
*
|
||||
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
|
||||
*/
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
|
@ -439,7 +444,7 @@ typedef int (*RedisModuleConfigApplyFunc)(RedisModuleCtx *ctx, void *privdata, R
|
|||
struct ModuleConfig {
|
||||
sds name; /* Fullname of the config (as it appears in the config file) */
|
||||
sds alias; /* Optional alias for the configuration. NULL if none exists */
|
||||
|
||||
|
||||
int unprefixedFlag; /* Indicates if the REDISMODULE_CONFIG_UNPREFIXED flag was set.
|
||||
* If the configuration name was prefixed,during get_fn/set_fn
|
||||
* callbacks, it should be reported without the prefix */
|
||||
|
@ -1151,6 +1156,7 @@ int64_t commandFlagsFromString(char *s) {
|
|||
else if (!strcasecmp(t,"no-cluster")) flags |= CMD_MODULE_NO_CLUSTER;
|
||||
else if (!strcasecmp(t,"no-mandatory-keys")) flags |= CMD_NO_MANDATORY_KEYS;
|
||||
else if (!strcasecmp(t,"allow-busy")) flags |= CMD_ALLOW_BUSY;
|
||||
else if (!strcasecmp(t, "internal")) flags |= (CMD_INTERNAL|CMD_NOSCRIPT); /* We also disallow internal commands in scripts. */
|
||||
else break;
|
||||
}
|
||||
sdsfreesplitres(tokens,count);
|
||||
|
@ -1235,6 +1241,9 @@ RedisModuleCommand *moduleCreateCommandProxy(struct RedisModule *module, sds dec
|
|||
* RM_Yield.
|
||||
* * **"getchannels-api"**: The command implements the interface to return
|
||||
* the arguments that are channels.
|
||||
* * **"internal"**: Internal command, one that should not be exposed to the user connections.
|
||||
* For example, module commands that are called by the modules,
|
||||
* commands that do not perform ACL validations (relying on earlier checks)
|
||||
*
|
||||
* The last three parameters specify which arguments of the new command are
|
||||
* Redis keys. See https://redis.io/commands/command for more information.
|
||||
|
@ -2300,6 +2309,7 @@ void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int api
|
|||
module->options = 0;
|
||||
module->info_cb = 0;
|
||||
module->defrag_cb = 0;
|
||||
module->defrag_cb_2 = 0;
|
||||
module->defrag_start_cb = 0;
|
||||
module->defrag_end_cb = 0;
|
||||
module->loadmod = NULL;
|
||||
|
@ -3901,6 +3911,9 @@ int RM_GetSelectedDb(RedisModuleCtx *ctx) {
|
|||
* context is using RESP3.
|
||||
*
|
||||
* * REDISMODULE_CTX_FLAGS_SERVER_STARTUP: The Redis instance is starting
|
||||
*
|
||||
* * REDISMODULE_CTX_FLAGS_DEBUG_ENABLED: Debug commands are enabled for this
|
||||
* context.
|
||||
*/
|
||||
int RM_GetContextFlags(RedisModuleCtx *ctx) {
|
||||
int flags = 0;
|
||||
|
@ -3923,6 +3936,9 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) {
|
|||
if (c && (c->flags & (CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC))) {
|
||||
flags |= REDISMODULE_CTX_FLAGS_MULTI_DIRTY;
|
||||
}
|
||||
if (c && allowProtectedAction(server.enable_debug_cmd, c)) {
|
||||
flags |= REDISMODULE_CTX_FLAGS_DEBUG_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (scriptIsRunning())
|
||||
|
@ -3990,6 +4006,11 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) {
|
|||
if (listLength(server.loadmodule_queue) > 0)
|
||||
flags |= REDISMODULE_CTX_FLAGS_SERVER_STARTUP;
|
||||
|
||||
/* If debug commands are completely enabled */
|
||||
if (server.enable_debug_cmd == PROTECTED_ACTION_ALLOWED_YES) {
|
||||
flags |= REDISMODULE_CTX_FLAGS_DEBUG_ENABLED;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
@ -6284,6 +6305,8 @@ fmterr:
|
|||
* dependent activity, such as ACL checks within scripts will proceed as
|
||||
* expected.
|
||||
* Otherwise, the command will run as the Redis unrestricted user.
|
||||
* Upon sending a command from an internal connection, this flag is
|
||||
* ignored and the command will run as the Redis unrestricted user.
|
||||
* * `S` -- Run the command in a script mode, this means that it will raise
|
||||
* an error if a command which are not allowed inside a script
|
||||
* (flagged with the `deny-script` flag) is invoked (like SHUTDOWN).
|
||||
|
@ -6392,8 +6415,12 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
|||
}
|
||||
if (ctx->module) ctx->module->in_call++;
|
||||
|
||||
/* Attach the user of the context or client.
|
||||
* Internal connections always run with the unrestricted user. */
|
||||
user *user = NULL;
|
||||
if (flags & REDISMODULE_ARGV_RUN_AS_USER) {
|
||||
if ((flags & REDISMODULE_ARGV_RUN_AS_USER) &&
|
||||
!(ctx->client->flags & CLIENT_INTERNAL))
|
||||
{
|
||||
user = ctx->user ? ctx->user->user : ctx->client->user;
|
||||
if (!user) {
|
||||
errno = ENOTSUP;
|
||||
|
@ -6424,6 +6451,17 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
|||
* if necessary.
|
||||
*/
|
||||
c->cmd = c->lastcmd = c->realcmd = lookupCommand(c->argv,c->argc);
|
||||
|
||||
/* We nullify the command if it is not supposed to be seen by the client,
|
||||
* such that it will be rejected like an unknown command. */
|
||||
if (c->cmd &&
|
||||
(c->cmd->flags & CMD_INTERNAL) &&
|
||||
(flags & REDISMODULE_ARGV_RUN_AS_USER) &&
|
||||
!((ctx->client->flags & CLIENT_INTERNAL) || mustObeyClient(ctx->client)))
|
||||
{
|
||||
c->cmd = c->lastcmd = c->realcmd = NULL;
|
||||
}
|
||||
|
||||
sds err;
|
||||
if (!commandCheckExistence(c, error_as_call_replies? &err : NULL)) {
|
||||
errno = ENOENT;
|
||||
|
@ -6544,7 +6582,9 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
|||
*
|
||||
* If RM_SetContextUser has set a user, that user is used, otherwise
|
||||
* use the attached client's user. If there is no attached client user and no manually
|
||||
* set user, an error will be returned */
|
||||
* set user, an error will be returned.
|
||||
* An internal command should only succeed for an internal connection, AOF,
|
||||
* and master commands. */
|
||||
if (flags & REDISMODULE_ARGV_RUN_AS_USER) {
|
||||
int acl_errpos;
|
||||
int acl_retval;
|
||||
|
@ -11224,7 +11264,7 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) {
|
|||
* The way it should be used:
|
||||
*
|
||||
* RedisModuleScanCursor *c = RedisModule_ScanCursorCreate();
|
||||
* RedisModuleKey *key = RedisModule_OpenKey(...)
|
||||
* RedisModuleKey *key = RedisModule_OpenKey(...);
|
||||
* while(RedisModule_ScanKey(key, c, callback, privateData));
|
||||
* RedisModule_CloseKey(key);
|
||||
* RedisModule_ScanCursorDestroy(c);
|
||||
|
@ -11234,13 +11274,13 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) {
|
|||
*
|
||||
* RedisModuleScanCursor *c = RedisModule_ScanCursorCreate();
|
||||
* RedisModule_ThreadSafeContextLock(ctx);
|
||||
* RedisModuleKey *key = RedisModule_OpenKey(...)
|
||||
* RedisModuleKey *key = RedisModule_OpenKey(...);
|
||||
* while(RedisModule_ScanKey(ctx, c, callback, privateData)){
|
||||
* RedisModule_CloseKey(key);
|
||||
* RedisModule_ThreadSafeContextUnlock(ctx);
|
||||
* // do some background job
|
||||
* RedisModule_ThreadSafeContextLock(ctx);
|
||||
* RedisModuleKey *key = RedisModule_OpenKey(...)
|
||||
* key = RedisModule_OpenKey(...);
|
||||
* }
|
||||
* RedisModule_CloseKey(key);
|
||||
* RedisModule_ScanCursorDestroy(c);
|
||||
|
@ -12465,8 +12505,8 @@ int moduleLoad(const char *path, void **module_argv, int module_argc, int is_loa
|
|||
ctx.module->onload = 0;
|
||||
|
||||
int post_load_err = 0;
|
||||
if (listLength(ctx.module->module_configs) && !ctx.module->configs_initialized) {
|
||||
serverLogRaw(LL_WARNING, "Module Configurations were not set, likely a missing LoadConfigs call. Unloading the module.");
|
||||
if (listLength(ctx.module->module_configs) && !(ctx.module->configs_initialized & MODULE_CONFIGS_USER_VALS)) {
|
||||
serverLogRaw(LL_WARNING, "Module Configurations were not set, missing LoadConfigs call. Unloading the module.");
|
||||
post_load_err = 1;
|
||||
}
|
||||
|
||||
|
@ -12856,6 +12896,22 @@ long long getModuleNumericConfig(ModuleConfig *module_config) {
|
|||
return module_config->get_fn.get_numeric(rname, module_config->privdata);
|
||||
}
|
||||
|
||||
int loadModuleDefaultConfigs(RedisModule *module) {
|
||||
listIter li;
|
||||
listNode *ln;
|
||||
const char *err = NULL;
|
||||
listRewind(module->module_configs, &li);
|
||||
while ((ln = listNext(&li))) {
|
||||
ModuleConfig *module_config = listNodeValue(ln);
|
||||
if (!performModuleConfigSetDefaultFromName(module_config->name, &err)) {
|
||||
serverLog(LL_WARNING, "Issue attempting to set default value of configuration %s : %s", module_config->name, err);
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
}
|
||||
module->configs_initialized |= MODULE_CONFIGS_DEFAULTS;
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* This function takes a module and a list of configs stored as sds NAME VALUE pairs.
|
||||
* It attempts to call set on each of these configs. */
|
||||
int loadModuleConfigs(RedisModule *module) {
|
||||
|
@ -12863,13 +12919,14 @@ int loadModuleConfigs(RedisModule *module) {
|
|||
listNode *ln;
|
||||
const char *err = NULL;
|
||||
listRewind(module->module_configs, &li);
|
||||
const int set_default_if_missing = !(module->configs_initialized & MODULE_CONFIGS_DEFAULTS);
|
||||
while ((ln = listNext(&li))) {
|
||||
ModuleConfig *module_config = listNodeValue(ln);
|
||||
dictEntry *de = dictUnlink(server.module_configs_queue, module_config->name);
|
||||
if ((!de) && (module_config->alias))
|
||||
de = dictUnlink(server.module_configs_queue, module_config->alias);
|
||||
|
||||
/* If found in the queue, set the value. Otherwise, set the default value. */
|
||||
|
||||
/* If found in the queue, set the value. Otherwise, set the default value. */
|
||||
if (de) {
|
||||
if (!performModuleConfigSetFromName(dictGetKey(de), dictGetVal(de), &err)) {
|
||||
serverLog(LL_WARNING, "Issue during loading of configuration %s : %s", (sds) dictGetKey(de), err);
|
||||
|
@ -12878,7 +12935,7 @@ int loadModuleConfigs(RedisModule *module) {
|
|||
return REDISMODULE_ERR;
|
||||
}
|
||||
dictFreeUnlinkedEntry(server.module_configs_queue, de);
|
||||
} else {
|
||||
} else if (set_default_if_missing) {
|
||||
if (!performModuleConfigSetDefaultFromName(module_config->name, &err)) {
|
||||
serverLog(LL_WARNING, "Issue attempting to set default value of configuration %s : %s", module_config->name, err);
|
||||
dictEmpty(server.module_configs_queue, NULL);
|
||||
|
@ -12886,7 +12943,7 @@ int loadModuleConfigs(RedisModule *module) {
|
|||
}
|
||||
}
|
||||
}
|
||||
module->configs_initialized = 1;
|
||||
module->configs_initialized = MODULE_CONFIGS_ALL_APPLIED;
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
|
@ -13232,6 +13289,24 @@ int RM_RegisterNumericConfig(RedisModuleCtx *ctx, const char *name, long long de
|
|||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* Applies all default configurations for the parameters the module registered.
|
||||
* Only call this function if the module would like to make changes to the
|
||||
* configuration values before the actual values are applied by RedisModule_LoadConfigs.
|
||||
* Otherwise it's sufficient to call RedisModule_LoadConfigs, it should already set the default values if needed.
|
||||
* This makes it possible to distinguish between default values and user provided values and apply other changes between setting the defaults and the user values.
|
||||
* This will return REDISMODULE_ERR if it is called:
|
||||
* 1. outside RedisModule_OnLoad
|
||||
* 2. more than once
|
||||
* 3. after the RedisModule_LoadConfigs call */
|
||||
int RM_LoadDefaultConfigs(RedisModuleCtx *ctx) {
|
||||
if (!ctx || !ctx->module || !ctx->module->onload || ctx->module->configs_initialized) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
RedisModule *module = ctx->module;
|
||||
/* Load default configs of the module */
|
||||
return loadModuleDefaultConfigs(module);
|
||||
}
|
||||
|
||||
/* Applies all pending configurations on the module load. This should be called
|
||||
* after all of the configurations have been registered for the module inside of RedisModule_OnLoad.
|
||||
* This will return REDISMODULE_ERR if it is called outside RedisModule_OnLoad.
|
||||
|
@ -13243,8 +13318,7 @@ int RM_LoadConfigs(RedisModuleCtx *ctx) {
|
|||
}
|
||||
RedisModule *module = ctx->module;
|
||||
/* Load configs from conf file or arguments from loadex */
|
||||
if (loadModuleConfigs(module)) return REDISMODULE_ERR;
|
||||
return REDISMODULE_OK;
|
||||
return loadModuleConfigs(module);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
|
@ -13378,6 +13452,17 @@ int RM_RdbSave(RedisModuleCtx *ctx, RedisModuleRdbStream *stream, int flags) {
|
|||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* Returns the internal secret of the cluster.
|
||||
* Should be used to authenticate as an internal connection to a node in the
|
||||
* cluster, and by that gain the permissions to execute internal commands.
|
||||
*/
|
||||
const char* RM_GetInternalSecret(RedisModuleCtx *ctx, size_t *len) {
|
||||
UNUSED(ctx);
|
||||
serverAssert(len != NULL);
|
||||
const char *secret = clusterGetSecret(len);
|
||||
return secret;
|
||||
}
|
||||
|
||||
/* Redis MODULE command.
|
||||
*
|
||||
* MODULE LIST
|
||||
|
@ -13699,16 +13784,6 @@ const char *RM_GetCurrentCommandName(RedisModuleCtx *ctx) {
|
|||
* ## Defrag API
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/* The defrag context, used to manage state during calls to the data type
|
||||
* defrag callback.
|
||||
*/
|
||||
struct RedisModuleDefragCtx {
|
||||
long long int endtime;
|
||||
unsigned long *cursor;
|
||||
struct redisObject *key; /* Optional name of key processed, NULL when unknown. */
|
||||
int dbid; /* The dbid of the key being processed, -1 when unknown. */
|
||||
};
|
||||
|
||||
/* Register a defrag callback for global data, i.e. anything that the module
|
||||
* may allocate that is not tied to a specific data type.
|
||||
*/
|
||||
|
@ -13717,6 +13792,17 @@ int RM_RegisterDefragFunc(RedisModuleCtx *ctx, RedisModuleDefragFunc cb) {
|
|||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* Register a defrag callback for global data, i.e. anything that the module
|
||||
* may allocate that is not tied to a specific data type.
|
||||
* This is a more advanced version of RM_RegisterDefragFunc, in that it takes
|
||||
* a callbacks that has a return value, and can use RM_DefragShouldStop
|
||||
* in and indicate that it should be called again later, or is it done (returned 0).
|
||||
*/
|
||||
int RM_RegisterDefragFunc2(RedisModuleCtx *ctx, RedisModuleDefragFunc2 cb) {
|
||||
ctx->module->defrag_cb_2 = cb;
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* Register a defrag callbacks that will be called when defrag operation starts and ends.
|
||||
*
|
||||
* The callbacks are the same as `RM_RegisterDefragFunc` but the user
|
||||
|
@ -13742,7 +13828,7 @@ int RM_RegisterDefragCallbacks(RedisModuleCtx *ctx, RedisModuleDefragFunc start,
|
|||
* so it generally makes sense to do small batches of work in between calls.
|
||||
*/
|
||||
int RM_DefragShouldStop(RedisModuleDefragCtx *ctx) {
|
||||
return (ctx->endtime != 0 && ctx->endtime < ustime());
|
||||
return (ctx->endtime != 0 && ctx->endtime <= getMonotonicUs());
|
||||
}
|
||||
|
||||
/* Store an arbitrary cursor value for future re-use.
|
||||
|
@ -13844,13 +13930,82 @@ RedisModuleString *RM_DefragRedisModuleString(RedisModuleDefragCtx *ctx, RedisMo
|
|||
return activeDefragStringOb(str);
|
||||
}
|
||||
|
||||
/* Defrag callback for radix tree iterator, called for each node,
|
||||
* used in order to defrag the nodes allocations. */
|
||||
int moduleDefragRaxNode(raxNode **noderef) {
|
||||
raxNode *newnode = activeDefragAlloc(*noderef);
|
||||
if (newnode) {
|
||||
*noderef = newnode;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Defragment a Redis Module Dictionary by scanning its contents and calling a value
|
||||
* callback for each value.
|
||||
*
|
||||
* The callback gets the current value in the dict, and should return non-NULL with a new pointer,
|
||||
* if the value was re-allocated to a different address. The callback also gets the key name just as a reference.
|
||||
*
|
||||
* The API can work incrementally by accepting a seek position to continue from, and
|
||||
* returning the next position to seek to on the next call (or return NULL when the iteration is completed).
|
||||
*
|
||||
* This API returns a new dict if it was re-allocated to a new address (will only
|
||||
* be attempted when *seekTo is NULL on entry).
|
||||
*/
|
||||
RedisModuleDict *RM_DefragRedisModuleDict(RedisModuleDefragCtx *ctx, RedisModuleDict *dict, RedisModuleDefragDictValueCallback valueCB, RedisModuleString **seekTo) {
|
||||
RedisModuleDict *newdict = NULL;
|
||||
raxIterator ri;
|
||||
|
||||
if (*seekTo == NULL) {
|
||||
/* if last seek is NULL, we start new iteration */
|
||||
rax* newrax = NULL;
|
||||
if ((newdict = activeDefragAlloc(dict)))
|
||||
dict = newdict;
|
||||
if ((newrax = activeDefragAlloc(dict->rax)))
|
||||
dict->rax = newrax;
|
||||
}
|
||||
|
||||
raxStart(&ri,dict->rax);
|
||||
if (*seekTo == NULL) {
|
||||
/* assign the iterator node callback before the seek, so that the
|
||||
* initial nodes that are processed till the first item are covered */
|
||||
ri.node_cb = moduleDefragRaxNode;
|
||||
raxSeek(&ri,"^",NULL,0);
|
||||
} else {
|
||||
/* if cursor is non-zero, we seek to the static 'last' */
|
||||
if (!raxSeek(&ri,">", (*seekTo)->ptr, sdslen((*seekTo)->ptr))) {
|
||||
goto cleanup;
|
||||
}
|
||||
/* assign the iterator node callback after the seek, so that the
|
||||
* initial nodes that are processed till now aren't covered */
|
||||
ri.node_cb = moduleDefragRaxNode;
|
||||
}
|
||||
|
||||
while (raxNext(&ri)) {
|
||||
void *newdata = valueCB(ctx, ri.data, ri.key, ri.key_len);
|
||||
if (newdata)
|
||||
raxSetData(ri.node, ri.data=newdata);
|
||||
if (RM_DefragShouldStop(ctx)) {
|
||||
if (*seekTo) RM_FreeString(NULL, *seekTo);
|
||||
*seekTo = RM_CreateString(NULL, (const char *)ri.key, ri.key_len);
|
||||
raxStop(&ri);
|
||||
return newdict;
|
||||
}
|
||||
}
|
||||
cleanup:
|
||||
if (*seekTo) RM_FreeString(NULL, *seekTo);
|
||||
*seekTo = NULL;
|
||||
raxStop(&ri);
|
||||
return newdict;
|
||||
}
|
||||
|
||||
/* Perform a late defrag of a module datatype key.
|
||||
*
|
||||
* Returns a zero value (and initializes the cursor) if no more needs to be done,
|
||||
* or a non-zero value otherwise.
|
||||
*/
|
||||
int moduleLateDefrag(robj *key, robj *value, unsigned long *cursor, long long endtime, int dbid) {
|
||||
int moduleLateDefrag(robj *key, robj *value, unsigned long *cursor, monotime endtime, int dbid) {
|
||||
moduleValue *mv = value->ptr;
|
||||
moduleType *mt = mv->type;
|
||||
|
||||
|
@ -13908,16 +14063,6 @@ int moduleDefragValue(robj *key, robj *value, int dbid) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* Call registered module API defrag functions */
|
||||
void moduleDefragGlobals(void) {
|
||||
dictForEach(modules, struct RedisModule, module,
|
||||
if (module->defrag_cb) {
|
||||
RedisModuleDefragCtx defrag_ctx = { 0, NULL, NULL, -1};
|
||||
module->defrag_cb(&defrag_ctx);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/* Call registered module API defrag start functions */
|
||||
void moduleDefragStart(void) {
|
||||
dictForEach(modules, struct RedisModule, module,
|
||||
|
@ -14297,11 +14442,13 @@ void moduleRegisterCoreAPI(void) {
|
|||
REGISTER_API(GetCurrentCommandName);
|
||||
REGISTER_API(GetTypeMethodVersion);
|
||||
REGISTER_API(RegisterDefragFunc);
|
||||
REGISTER_API(RegisterDefragFunc2);
|
||||
REGISTER_API(RegisterDefragCallbacks);
|
||||
REGISTER_API(DefragAlloc);
|
||||
REGISTER_API(DefragAllocRaw);
|
||||
REGISTER_API(DefragFreeRaw);
|
||||
REGISTER_API(DefragRedisModuleString);
|
||||
REGISTER_API(DefragRedisModuleDict);
|
||||
REGISTER_API(DefragShouldStop);
|
||||
REGISTER_API(DefragCursorSet);
|
||||
REGISTER_API(DefragCursorGet);
|
||||
|
@ -14313,10 +14460,12 @@ void moduleRegisterCoreAPI(void) {
|
|||
REGISTER_API(RegisterNumericConfig);
|
||||
REGISTER_API(RegisterStringConfig);
|
||||
REGISTER_API(RegisterEnumConfig);
|
||||
REGISTER_API(LoadDefaultConfigs);
|
||||
REGISTER_API(LoadConfigs);
|
||||
REGISTER_API(RegisterAuthCallback);
|
||||
REGISTER_API(RdbStreamCreateFromFile);
|
||||
REGISTER_API(RdbStreamFree);
|
||||
REGISTER_API(RdbLoad);
|
||||
REGISTER_API(RdbSave);
|
||||
REGISTER_API(GetInternalSecret);
|
||||
}
|
||||
|
|
|
@ -3099,6 +3099,7 @@ sds catClientInfoString(sds s, client *client) {
|
|||
if (client->flags & CLIENT_NO_EVICT) *p++ = 'e';
|
||||
if (client->flags & CLIENT_NO_TOUCH) *p++ = 'T';
|
||||
if (client->flags & CLIENT_REPL_RDB_CHANNEL) *p++ = 'C';
|
||||
if (client->flags & CLIENT_INTERNAL) *p++ = 'I';
|
||||
if (p == flags) *p++ = 'N';
|
||||
*p++ = '\0';
|
||||
|
||||
|
|
13
src/object.c
13
src/object.c
|
@ -988,7 +988,7 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
|
|||
dict *d;
|
||||
dictIterator *di;
|
||||
struct dictEntry *de;
|
||||
size_t asize = 0, elesize = 0, samples = 0;
|
||||
size_t asize = 0, elesize = 0, elecount = 0, samples = 0;
|
||||
|
||||
if (o->type == OBJ_STRING) {
|
||||
if(o->encoding == OBJ_ENCODING_INT) {
|
||||
|
@ -1007,9 +1007,10 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
|
|||
asize = sizeof(*o)+sizeof(quicklist);
|
||||
do {
|
||||
elesize += sizeof(quicklistNode)+zmalloc_size(node->entry);
|
||||
elecount += node->count;
|
||||
samples++;
|
||||
} while ((node = node->next) && samples < sample_size);
|
||||
asize += (double)elesize/samples*ql->len;
|
||||
asize += (double)elesize/elecount*ql->count;
|
||||
} else if (o->encoding == OBJ_ENCODING_LISTPACK) {
|
||||
asize = sizeof(*o)+zmalloc_size(o->ptr);
|
||||
} else {
|
||||
|
@ -1209,6 +1210,9 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
|
|||
server.repl_backlog->blocks_index->numnodes * sizeof(raxNode) +
|
||||
raxSize(server.repl_backlog->blocks_index) * sizeof(void*);
|
||||
}
|
||||
|
||||
mh->replica_fullsync_buffer = server.repl_full_sync_buffer.mem_used;
|
||||
mem_total += mh->replica_fullsync_buffer;
|
||||
mem_total += mh->repl_backlog;
|
||||
mem_total += mh->clients_slaves;
|
||||
|
||||
|
@ -1560,7 +1564,7 @@ NULL
|
|||
} else if (!strcasecmp(c->argv[1]->ptr,"stats") && c->argc == 2) {
|
||||
struct redisMemOverhead *mh = getMemoryOverheadData();
|
||||
|
||||
addReplyMapLen(c,32+mh->num_dbs);
|
||||
addReplyMapLen(c,33+mh->num_dbs);
|
||||
|
||||
addReplyBulkCString(c,"peak.allocated");
|
||||
addReplyLongLong(c,mh->peak_allocated);
|
||||
|
@ -1574,6 +1578,9 @@ NULL
|
|||
addReplyBulkCString(c,"replication.backlog");
|
||||
addReplyLongLong(c,mh->repl_backlog);
|
||||
|
||||
addReplyBulkCString(c,"replica.fullsync.buffer");
|
||||
addReplyLongLong(c,mh->replica_fullsync_buffer);
|
||||
|
||||
addReplyBulkCString(c,"clients.slaves");
|
||||
addReplyLongLong(c,mh->clients_slaves);
|
||||
|
||||
|
|
|
@ -3118,7 +3118,7 @@ static void usage(int err) {
|
|||
" -i <interval> When -r is used, waits <interval> seconds per command.\n"
|
||||
" It is possible to specify sub-second times like -i 0.1.\n"
|
||||
" This interval is also used in --scan and --stat per cycle.\n"
|
||||
" and in --bigkeys, --memkeys, and --hotkeys per 100 cycles.\n"
|
||||
" and in --bigkeys, --memkeys, --keystats, and --hotkeys per 100 cycles.\n"
|
||||
" -n <db> Database number.\n"
|
||||
" -2 Start session in RESP2 protocol mode.\n"
|
||||
" -3 Start session in RESP3 protocol mode.\n"
|
||||
|
@ -3181,9 +3181,10 @@ version,tls_usage);
|
|||
" --hotkeys Sample Redis keys looking for hot keys.\n"
|
||||
" only works when maxmemory-policy is *lfu.\n"
|
||||
" --scan List all keys using the SCAN command.\n"
|
||||
" --pattern <pat> Keys pattern when using the --scan, --bigkeys or --hotkeys\n"
|
||||
" options (default: *).\n"
|
||||
" --count <count> Count option when using the --scan, --bigkeys or --hotkeys (default: 10).\n"
|
||||
" --pattern <pat> Keys pattern when using the --scan, --bigkeys, --memkeys,\n"
|
||||
" --keystats or --hotkeys options (default: *).\n"
|
||||
" --count <count> Count option when using the --scan, --bigkeys, --memkeys,\n"
|
||||
" --keystats or --hotkeys (default: 10).\n"
|
||||
" --quoted-pattern <pat> Same as --pattern, but the specified string can be\n"
|
||||
" quoted, in order to pass an otherwise non binary-safe string.\n"
|
||||
" --intrinsic-latency <sec> Run a test to measure intrinsic system latency.\n"
|
||||
|
|
|
@ -208,11 +208,13 @@ typedef struct RedisModuleStreamID {
|
|||
#define REDISMODULE_CTX_FLAGS_ASYNC_LOADING (1<<23)
|
||||
/* Redis is starting. */
|
||||
#define REDISMODULE_CTX_FLAGS_SERVER_STARTUP (1<<24)
|
||||
/* This context can call execute debug commands. */
|
||||
#define REDISMODULE_CTX_FLAGS_DEBUG_ENABLED (1<<25)
|
||||
|
||||
/* Next context flag, must be updated when adding new flags above!
|
||||
This flag should not be used directly by the module.
|
||||
* Use RedisModule_GetContextFlagsAll instead. */
|
||||
#define _REDISMODULE_CTX_FLAGS_NEXT (1<<25)
|
||||
#define _REDISMODULE_CTX_FLAGS_NEXT (1<<26)
|
||||
|
||||
/* Keyspace changes notification classes. Every class is associated with a
|
||||
* character for configuration purposes.
|
||||
|
@ -838,7 +840,9 @@ typedef struct RedisModuleDefragCtx RedisModuleDefragCtx;
|
|||
* exposed since you can't cast a function pointer to (void *). */
|
||||
typedef void (*RedisModuleInfoFunc)(RedisModuleInfoCtx *ctx, int for_crash_report);
|
||||
typedef void (*RedisModuleDefragFunc)(RedisModuleDefragCtx *ctx);
|
||||
typedef int (*RedisModuleDefragFunc2)(RedisModuleDefragCtx *ctx);
|
||||
typedef void (*RedisModuleUserChangedFunc) (uint64_t client_id, void *privdata);
|
||||
typedef void *(*RedisModuleDefragDictValueCallback)(RedisModuleDefragCtx *ctx, void *data, unsigned char *key, size_t keylen);
|
||||
|
||||
/* ------------------------- End of common defines ------------------------ */
|
||||
|
||||
|
@ -1303,11 +1307,13 @@ REDISMODULE_API int *(*RedisModule_GetCommandKeys)(RedisModuleCtx *ctx, RedisMod
|
|||
REDISMODULE_API int *(*RedisModule_GetCommandKeysWithFlags)(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, int *num_keys, int **out_flags) REDISMODULE_ATTR;
|
||||
REDISMODULE_API const char *(*RedisModule_GetCurrentCommandName)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_RegisterDefragFunc)(RedisModuleCtx *ctx, RedisModuleDefragFunc func) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_RegisterDefragFunc2)(RedisModuleCtx *ctx, RedisModuleDefragFunc2 func) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_RegisterDefragCallbacks)(RedisModuleCtx *ctx, RedisModuleDefragFunc start, RedisModuleDefragFunc end) REDISMODULE_ATTR;
|
||||
REDISMODULE_API void *(*RedisModule_DefragAlloc)(RedisModuleDefragCtx *ctx, void *ptr) REDISMODULE_ATTR;
|
||||
REDISMODULE_API void *(*RedisModule_DefragAllocRaw)(RedisModuleDefragCtx *ctx, size_t size) REDISMODULE_ATTR;
|
||||
REDISMODULE_API void (*RedisModule_DefragFreeRaw)(RedisModuleDefragCtx *ctx, void *ptr) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleString *(*RedisModule_DefragRedisModuleString)(RedisModuleDefragCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleDict *(*RedisModule_DefragRedisModuleDict)(RedisModuleDefragCtx *ctx, RedisModuleDict *dict, RedisModuleDefragDictValueCallback valueCB, RedisModuleString **seekTo) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_DefragShouldStop)(RedisModuleDefragCtx *ctx) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_DefragCursorSet)(RedisModuleDefragCtx *ctx, unsigned long cursor) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_DefragCursorGet)(RedisModuleDefragCtx *ctx, unsigned long *cursor) REDISMODULE_ATTR;
|
||||
|
@ -1321,10 +1327,12 @@ REDISMODULE_API int (*RedisModule_RegisterNumericConfig)(RedisModuleCtx *ctx, co
|
|||
REDISMODULE_API int (*RedisModule_RegisterStringConfig)(RedisModuleCtx *ctx, const char *name, const char *default_val, unsigned int flags, RedisModuleConfigGetStringFunc getfn, RedisModuleConfigSetStringFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_RegisterEnumConfig)(RedisModuleCtx *ctx, const char *name, int default_val, unsigned int flags, const char **enum_values, const int *int_values, int num_enum_vals, RedisModuleConfigGetEnumFunc getfn, RedisModuleConfigSetEnumFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_LoadConfigs)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_LoadDefaultConfigs)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleRdbStream *(*RedisModule_RdbStreamCreateFromFile)(const char *filename) REDISMODULE_ATTR;
|
||||
REDISMODULE_API void (*RedisModule_RdbStreamFree)(RedisModuleRdbStream *stream) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_RdbLoad)(RedisModuleCtx *ctx, RedisModuleRdbStream *stream, int flags) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_RdbSave)(RedisModuleCtx *ctx, RedisModuleRdbStream *stream, int flags) REDISMODULE_ATTR;
|
||||
REDISMODULE_API const char * (*RedisModule_GetInternalSecret)(RedisModuleCtx *ctx, size_t *len) REDISMODULE_ATTR;
|
||||
|
||||
#define RedisModule_IsAOFClient(id) ((id) == UINT64_MAX)
|
||||
|
||||
|
@ -1674,11 +1682,13 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
|
|||
REDISMODULE_GET_API(GetCommandKeysWithFlags);
|
||||
REDISMODULE_GET_API(GetCurrentCommandName);
|
||||
REDISMODULE_GET_API(RegisterDefragFunc);
|
||||
REDISMODULE_GET_API(RegisterDefragFunc2);
|
||||
REDISMODULE_GET_API(RegisterDefragCallbacks);
|
||||
REDISMODULE_GET_API(DefragAlloc);
|
||||
REDISMODULE_GET_API(DefragAllocRaw);
|
||||
REDISMODULE_GET_API(DefragFreeRaw);
|
||||
REDISMODULE_GET_API(DefragRedisModuleString);
|
||||
REDISMODULE_GET_API(DefragRedisModuleDict);
|
||||
REDISMODULE_GET_API(DefragShouldStop);
|
||||
REDISMODULE_GET_API(DefragCursorSet);
|
||||
REDISMODULE_GET_API(DefragCursorGet);
|
||||
|
@ -1692,10 +1702,12 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
|
|||
REDISMODULE_GET_API(RegisterStringConfig);
|
||||
REDISMODULE_GET_API(RegisterEnumConfig);
|
||||
REDISMODULE_GET_API(LoadConfigs);
|
||||
REDISMODULE_GET_API(LoadDefaultConfigs);
|
||||
REDISMODULE_GET_API(RdbStreamCreateFromFile);
|
||||
REDISMODULE_GET_API(RdbStreamFree);
|
||||
REDISMODULE_GET_API(RdbLoad);
|
||||
REDISMODULE_GET_API(RdbSave);
|
||||
REDISMODULE_GET_API(GetInternalSecret);
|
||||
|
||||
if (RedisModule_IsModuleNameBusy && RedisModule_IsModuleNameBusy(name)) return REDISMODULE_ERR;
|
||||
RedisModule_SetModuleAttribs(ctx,name,ver,apiver);
|
||||
|
|
|
@ -519,6 +519,9 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
|
|||
/* We can't have slaves attached and no backlog. */
|
||||
serverAssert(!(listLength(slaves) != 0 && server.repl_backlog == NULL));
|
||||
|
||||
/* Update the time of sending replication stream to replicas. */
|
||||
server.repl_stream_lastio = server.unixtime;
|
||||
|
||||
/* Must install write handler for all replicas first before feeding
|
||||
* replication stream. */
|
||||
prepareReplicasToWrite();
|
||||
|
@ -660,6 +663,10 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv,
|
|||
listRewind(monitors,&li);
|
||||
while((ln = listNext(&li))) {
|
||||
client *monitor = ln->value;
|
||||
/* Do not show internal commands to non-internal clients. */
|
||||
if (c->realcmd && (c->realcmd->flags & CMD_INTERNAL) && !(monitor->flags & CLIENT_INTERNAL)) {
|
||||
continue;
|
||||
}
|
||||
addReply(monitor,cmdobj);
|
||||
updateClientMemUsageAndBucket(monitor);
|
||||
}
|
||||
|
@ -2444,6 +2451,14 @@ void readSyncBulkPayload(connection *conn) {
|
|||
/* Send the initial ACK immediately to put this replica in online state. */
|
||||
if (usemark) replicationSendAck();
|
||||
|
||||
/* Restart the AOF subsystem now that we finished the sync. This
|
||||
* will trigger an AOF rewrite, and when done will start appending
|
||||
* to the new file. */
|
||||
if (server.aof_enabled) {
|
||||
serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Starting AOF after a successful sync");
|
||||
startAppendOnlyWithRetry();
|
||||
}
|
||||
|
||||
if (rdbchannel) {
|
||||
int close_asap;
|
||||
|
||||
|
@ -2465,13 +2480,6 @@ void readSyncBulkPayload(connection *conn) {
|
|||
freeClientAsync(server.master);
|
||||
}
|
||||
|
||||
/* Restart the AOF subsystem now that we finished the sync. This
|
||||
* will trigger an AOF rewrite, and when done will start appending
|
||||
* to the new file. */
|
||||
if (server.aof_enabled) {
|
||||
serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Starting AOF after a successful sync");
|
||||
startAppendOnlyWithRetry();
|
||||
}
|
||||
return;
|
||||
|
||||
error:
|
||||
|
@ -3671,6 +3679,7 @@ static void rdbChannelReplDataBufInit(void) {
|
|||
serverAssert(server.repl_full_sync_buffer.blocks == NULL);
|
||||
server.repl_full_sync_buffer.size = 0;
|
||||
server.repl_full_sync_buffer.used = 0;
|
||||
server.repl_full_sync_buffer.mem_used = 0;
|
||||
server.repl_full_sync_buffer.blocks = listCreate();
|
||||
server.repl_full_sync_buffer.blocks->free = zfree;
|
||||
}
|
||||
|
@ -3682,6 +3691,7 @@ static void rdbChannelReplDataBufFree(void) {
|
|||
server.repl_full_sync_buffer.blocks = NULL;
|
||||
server.repl_full_sync_buffer.size = 0;
|
||||
server.repl_full_sync_buffer.used = 0;
|
||||
server.repl_full_sync_buffer.mem_used = 0;
|
||||
}
|
||||
|
||||
/* Replication: Replica side.
|
||||
|
@ -3752,6 +3762,7 @@ void rdbChannelBufferReplData(connection *conn) {
|
|||
|
||||
listAddNodeTail(server.repl_full_sync_buffer.blocks, tail);
|
||||
server.repl_full_sync_buffer.size += tail->size;
|
||||
server.repl_full_sync_buffer.mem_used += usable_size + sizeof(listNode);
|
||||
|
||||
/* Update buffer's peak */
|
||||
if (server.repl_full_sync_buffer.peak < server.repl_full_sync_buffer.size)
|
||||
|
@ -3791,7 +3802,8 @@ int rdbChannelStreamReplDataToDb(client *c) {
|
|||
|
||||
server.repl_full_sync_buffer.used -= used;
|
||||
server.repl_full_sync_buffer.size -= size;
|
||||
|
||||
server.repl_full_sync_buffer.mem_used -= (size + sizeof(listNode) +
|
||||
sizeof(replDataBufBlock));
|
||||
if (server.repl_debug_pause & REPL_DEBUG_ON_STREAMING_REPL_BUF)
|
||||
debugPauseProcess();
|
||||
|
||||
|
@ -4470,8 +4482,6 @@ long long replicationGetSlaveOffset(void) {
|
|||
|
||||
/* Replication cron function, called 1 time per second. */
|
||||
void replicationCron(void) {
|
||||
static long long replication_cron_loops = 0;
|
||||
|
||||
/* Check failover status first, to see if we need to start
|
||||
* handling the failover. */
|
||||
updateFailoverStatus();
|
||||
|
@ -4524,9 +4534,12 @@ void replicationCron(void) {
|
|||
listNode *ln;
|
||||
robj *ping_argv[1];
|
||||
|
||||
/* First, send PING according to ping_slave_period. */
|
||||
if ((replication_cron_loops % server.repl_ping_slave_period) == 0 &&
|
||||
listLength(server.slaves))
|
||||
/* First, send PING according to ping_slave_period. The reason why master
|
||||
* sends PING is to keep the connection with replica active, so master need
|
||||
* not send PING to replicas if already sent replication stream in the past
|
||||
* repl_ping_slave_period time. */
|
||||
if (server.masterhost == NULL && listLength(server.slaves) &&
|
||||
server.unixtime >= server.repl_stream_lastio + server.repl_ping_slave_period)
|
||||
{
|
||||
/* Note that we don't send the PING if the clients are paused during
|
||||
* a Redis Cluster manual failover: the PING we send will otherwise
|
||||
|
@ -4568,7 +4581,7 @@ void replicationCron(void) {
|
|||
(slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END &&
|
||||
server.rdb_child_type != RDB_CHILD_TYPE_SOCKET));
|
||||
|
||||
if (is_presync) {
|
||||
if (is_presync && !(slave->flags & CLIENT_CLOSE_ASAP)) {
|
||||
connWrite(slave->conn, "\n", 1);
|
||||
}
|
||||
}
|
||||
|
@ -4663,7 +4676,6 @@ void replicationCron(void) {
|
|||
|
||||
/* Refresh the number of slaves with lag <= min-slaves-max-lag. */
|
||||
refreshGoodSlavesCount();
|
||||
replication_cron_loops++; /* Incremented with frequency 1 HZ. */
|
||||
}
|
||||
|
||||
int shouldStartChildReplication(int *mincapa_out, int *req_out) {
|
||||
|
|
118
src/server.c
118
src/server.c
|
@ -1637,25 +1637,7 @@ void whileBlockedCron(void) {
|
|||
mstime_t latency;
|
||||
latencyStartMonitor(latency);
|
||||
|
||||
/* In some cases we may be called with big intervals, so we may need to do
|
||||
* extra work here. This is because some of the functions in serverCron rely
|
||||
* on the fact that it is performed every 10 ms or so. For instance, if
|
||||
* activeDefragCycle needs to utilize 25% cpu, it will utilize 2.5ms, so we
|
||||
* need to call it multiple times. */
|
||||
long hz_ms = 1000/server.hz;
|
||||
while (server.blocked_last_cron < server.mstime) {
|
||||
|
||||
/* Defrag keys gradually. */
|
||||
activeDefragCycle();
|
||||
|
||||
server.blocked_last_cron += hz_ms;
|
||||
|
||||
/* Increment cronloop so that run_with_period works. */
|
||||
server.cronloops++;
|
||||
}
|
||||
|
||||
/* Other cron jobs do not need to be done in a loop. No need to check
|
||||
* server.blocked_last_cron since we have an early exit at the top. */
|
||||
defragWhileBlocked();
|
||||
|
||||
/* Update memory stats during loading (excluding blocked scripts) */
|
||||
if (server.loading) cronUpdateMemoryStats();
|
||||
|
@ -2034,6 +2016,7 @@ void createSharedObjects(void) {
|
|||
shared.set = createStringObject("SET",3);
|
||||
shared.eval = createStringObject("EVAL",4);
|
||||
shared.hpexpireat = createStringObject("HPEXPIREAT",10);
|
||||
shared.hpersist = createStringObject("HPERSIST",8);
|
||||
shared.hdel = createStringObject("HDEL",4);
|
||||
|
||||
/* Shared command argument */
|
||||
|
@ -2192,6 +2175,7 @@ void initServerConfig(void) {
|
|||
server.repl_down_since = 0; /* Never connected, repl is down since EVER. */
|
||||
server.master_repl_offset = 0;
|
||||
server.fsynced_reploff_pending = 0;
|
||||
server.repl_stream_lastio = server.unixtime;
|
||||
|
||||
/* Replication partial resync backlog */
|
||||
server.repl_backlog = NULL;
|
||||
|
@ -2756,8 +2740,6 @@ void initServer(void) {
|
|||
server.db[j].watched_keys = dictCreate(&keylistDictType);
|
||||
server.db[j].id = j;
|
||||
server.db[j].avg_ttl = 0;
|
||||
server.db[j].defrag_later = listCreate();
|
||||
listSetFreeMethod(server.db[j].defrag_later, sdsfreegeneric);
|
||||
}
|
||||
evictionPoolAlloc(); /* Initialize the LRU keys pool. */
|
||||
/* Note that server.pubsub_channels was chosen to be a kvstore (with only one dict, which
|
||||
|
@ -3562,6 +3544,11 @@ int incrCommandStatsOnError(struct redisCommand *cmd, int flags) {
|
|||
return res;
|
||||
}
|
||||
|
||||
/* Returns true if the command is not internal, or the connection is internal. */
|
||||
static bool commandVisibleForClient(client *c, struct redisCommand *cmd) {
|
||||
return (!(cmd->flags & CMD_INTERNAL)) || (c->flags & CLIENT_INTERNAL);
|
||||
}
|
||||
|
||||
/* Call() is the core of Redis execution of a command.
|
||||
*
|
||||
* The following flags can be passed:
|
||||
|
@ -3713,7 +3700,8 @@ void call(client *c, int flags) {
|
|||
* Other exceptions is a client which is unblocked and retrying to process the command
|
||||
* or we are currently in the process of loading AOF. */
|
||||
if (update_command_stats && !reprocessing_command &&
|
||||
!(c->cmd->flags & (CMD_SKIP_MONITOR|CMD_ADMIN))) {
|
||||
!(c->cmd->flags & (CMD_SKIP_MONITOR|CMD_ADMIN)))
|
||||
{
|
||||
robj **argv = c->original_argv ? c->original_argv : c->argv;
|
||||
int argc = c->original_argv ? c->original_argc : c->argc;
|
||||
replicationFeedMonitors(c,server.monitors,c->db->id,argv,argc);
|
||||
|
@ -3992,6 +3980,7 @@ int processCommand(client *c) {
|
|||
* we do not have to repeat the same checks */
|
||||
if (!client_reprocessing_command) {
|
||||
struct redisCommand *cmd = c->iolookedcmd ? c->iolookedcmd : lookupCommand(c->argv, c->argc);
|
||||
|
||||
if (!cmd) {
|
||||
/* Handle possible security attacks. */
|
||||
if (!strcasecmp(c->argv[0]->ptr,"host:") || !strcasecmp(c->argv[0]->ptr,"post")) {
|
||||
|
@ -3999,6 +3988,13 @@ int processCommand(client *c) {
|
|||
return C_ERR;
|
||||
}
|
||||
}
|
||||
|
||||
/* Internal commands seem unexistent to non-internal connections.
|
||||
* masters and AOF loads are implicitly internal. */
|
||||
if (cmd && (cmd->flags & CMD_INTERNAL) && !((c->flags & CLIENT_INTERNAL) || mustObeyClient(c))) {
|
||||
cmd = NULL;
|
||||
}
|
||||
|
||||
c->cmd = c->lastcmd = c->realcmd = cmd;
|
||||
sds err;
|
||||
if (!commandCheckExistence(c, &err)) {
|
||||
|
@ -4585,6 +4581,9 @@ int finishShutdown(void) {
|
|||
}
|
||||
}
|
||||
|
||||
/* Update the end offset of current INCR AOF if possible. */
|
||||
updateCurIncrAofEndOffset();
|
||||
|
||||
/* Free the AOF manifest. */
|
||||
if (server.aof_manifest) aofManifestFree(server.aof_manifest);
|
||||
|
||||
|
@ -5019,7 +5018,7 @@ void addReplyCommandKeySpecs(client *c, struct redisCommand *cmd) {
|
|||
|
||||
/* Reply with an array of sub-command using the provided reply callback. */
|
||||
void addReplyCommandSubCommands(client *c, struct redisCommand *cmd, void (*reply_function)(client*, struct redisCommand*), int use_map) {
|
||||
if (!cmd->subcommands_dict) {
|
||||
if (!cmd->subcommands_dict || !commandVisibleForClient(c, cmd)) {
|
||||
addReplySetLen(c, 0);
|
||||
return;
|
||||
}
|
||||
|
@ -5041,7 +5040,7 @@ void addReplyCommandSubCommands(client *c, struct redisCommand *cmd, void (*repl
|
|||
|
||||
/* Output the representation of a Redis command. Used by the COMMAND command and COMMAND INFO. */
|
||||
void addReplyCommandInfo(client *c, struct redisCommand *cmd) {
|
||||
if (!cmd) {
|
||||
if (!cmd || !commandVisibleForClient(c, cmd)) {
|
||||
addReplyNull(c);
|
||||
} else {
|
||||
int firstkey = 0, lastkey = 0, keystep = 0;
|
||||
|
@ -5145,7 +5144,7 @@ void getKeysSubcommandImpl(client *c, int with_flags) {
|
|||
getKeysResult result = GETKEYS_RESULT_INIT;
|
||||
int j;
|
||||
|
||||
if (!cmd) {
|
||||
if (!cmd || !commandVisibleForClient(c, cmd)) {
|
||||
addReplyError(c,"Invalid command specified");
|
||||
return;
|
||||
} else if (!doesCommandHaveKeys(cmd)) {
|
||||
|
@ -5189,22 +5188,39 @@ void getKeysSubcommand(client *c) {
|
|||
getKeysSubcommandImpl(c, 0);
|
||||
}
|
||||
|
||||
/* COMMAND (no args) */
|
||||
void commandCommand(client *c) {
|
||||
void genericCommandCommand(client *c, int count_only) {
|
||||
dictIterator *di;
|
||||
dictEntry *de;
|
||||
void *len = NULL;
|
||||
int count = 0;
|
||||
|
||||
if (!count_only)
|
||||
len = addReplyDeferredLen(c);
|
||||
|
||||
addReplyArrayLen(c, dictSize(server.commands));
|
||||
di = dictGetIterator(server.commands);
|
||||
while ((de = dictNext(di)) != NULL) {
|
||||
addReplyCommandInfo(c, dictGetVal(de));
|
||||
struct redisCommand *cmd = dictGetVal(de);
|
||||
if (!commandVisibleForClient(c, cmd))
|
||||
continue;
|
||||
if (!count_only)
|
||||
addReplyCommandInfo(c, dictGetVal(de));
|
||||
count++;
|
||||
}
|
||||
dictReleaseIterator(di);
|
||||
if (count_only)
|
||||
addReplyLongLong(c, count);
|
||||
else
|
||||
setDeferredArrayLen(c, len, count);
|
||||
}
|
||||
|
||||
/* COMMAND (no args) */
|
||||
void commandCommand(client *c) {
|
||||
genericCommandCommand(c, 0);
|
||||
}
|
||||
|
||||
/* COMMAND COUNT */
|
||||
void commandCountCommand(client *c) {
|
||||
addReplyLongLong(c, dictSize(server.commands));
|
||||
genericCommandCommand(c, 1);
|
||||
}
|
||||
|
||||
typedef enum {
|
||||
|
@ -5258,7 +5274,7 @@ void commandListWithFilter(client *c, dict *commands, commandListFilter filter,
|
|||
|
||||
while ((de = dictNext(di)) != NULL) {
|
||||
struct redisCommand *cmd = dictGetVal(de);
|
||||
if (!shouldFilterFromCommandList(cmd,&filter)) {
|
||||
if (commandVisibleForClient(c, cmd) && !shouldFilterFromCommandList(cmd,&filter)) {
|
||||
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
|
||||
(*numcmds)++;
|
||||
}
|
||||
|
@ -5277,8 +5293,10 @@ void commandListWithoutFilter(client *c, dict *commands, int *numcmds) {
|
|||
|
||||
while ((de = dictNext(di)) != NULL) {
|
||||
struct redisCommand *cmd = dictGetVal(de);
|
||||
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
|
||||
(*numcmds)++;
|
||||
if (commandVisibleForClient(c, cmd)) {
|
||||
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
|
||||
(*numcmds)++;
|
||||
}
|
||||
|
||||
if (cmd->subcommands_dict) {
|
||||
commandListWithoutFilter(c, cmd->subcommands_dict, numcmds);
|
||||
|
@ -5334,14 +5352,7 @@ void commandInfoCommand(client *c) {
|
|||
int i;
|
||||
|
||||
if (c->argc == 2) {
|
||||
dictIterator *di;
|
||||
dictEntry *de;
|
||||
addReplyArrayLen(c, dictSize(server.commands));
|
||||
di = dictGetIterator(server.commands);
|
||||
while ((de = dictNext(di)) != NULL) {
|
||||
addReplyCommandInfo(c, dictGetVal(de));
|
||||
}
|
||||
dictReleaseIterator(di);
|
||||
genericCommandCommand(c, 0);
|
||||
} else {
|
||||
addReplyArrayLen(c, c->argc-2);
|
||||
for (i = 2; i < c->argc; i++) {
|
||||
|
@ -5353,25 +5364,29 @@ void commandInfoCommand(client *c) {
|
|||
/* COMMAND DOCS [command-name [command-name ...]] */
|
||||
void commandDocsCommand(client *c) {
|
||||
int i;
|
||||
int numcmds = 0;
|
||||
if (c->argc == 2) {
|
||||
/* Reply with an array of all commands */
|
||||
dictIterator *di;
|
||||
dictEntry *de;
|
||||
addReplyMapLen(c, dictSize(server.commands));
|
||||
void *replylen = addReplyDeferredLen(c);
|
||||
di = dictGetIterator(server.commands);
|
||||
while ((de = dictNext(di)) != NULL) {
|
||||
struct redisCommand *cmd = dictGetVal(de);
|
||||
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
|
||||
addReplyCommandDocs(c, cmd);
|
||||
if (commandVisibleForClient(c, cmd)) {
|
||||
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
|
||||
addReplyCommandDocs(c, cmd);
|
||||
numcmds++;
|
||||
}
|
||||
}
|
||||
dictReleaseIterator(di);
|
||||
setDeferredMapLen(c,replylen,numcmds);
|
||||
} else {
|
||||
/* Reply with an array of the requested commands (if we find them) */
|
||||
int numcmds = 0;
|
||||
void *replylen = addReplyDeferredLen(c);
|
||||
for (i = 2; i < c->argc; i++) {
|
||||
struct redisCommand *cmd = lookupCommandBySds(c->argv[i]->ptr);
|
||||
if (!cmd)
|
||||
if (!cmd || !commandVisibleForClient(c, cmd))
|
||||
continue;
|
||||
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
|
||||
addReplyCommandDocs(c, cmd);
|
||||
|
@ -5818,6 +5833,7 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
|
|||
"mem_not_counted_for_evict:%zu\r\n", freeMemoryGetNotCountedMemory(),
|
||||
"mem_replication_backlog:%zu\r\n", mh->repl_backlog,
|
||||
"mem_total_replication_buffers:%zu\r\n", server.repl_buffer_mem,
|
||||
"mem_replica_full_sync_buffer:%zu\r\n", server.repl_full_sync_buffer.mem_used,
|
||||
"mem_clients_slaves:%zu\r\n", mh->clients_slaves,
|
||||
"mem_clients_normal:%zu\r\n", mh->clients_normal,
|
||||
"mem_cluster_links:%zu\r\n", mh->cluster_links,
|
||||
|
@ -6793,6 +6809,15 @@ void dismissMemoryInChild(void) {
|
|||
dismissMemory(o, o->size);
|
||||
}
|
||||
|
||||
/* Dismiss accumulated repl buffer on replica. */
|
||||
if (server.repl_full_sync_buffer.blocks) {
|
||||
listRewind(server.repl_full_sync_buffer.blocks, &li);
|
||||
while((ln = listNext(&li))) {
|
||||
replDataBufBlock *o = listNodeValue(ln);
|
||||
dismissMemory(o, o->size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Dismiss all clients memory. */
|
||||
listRewind(server.clients, &li);
|
||||
while((ln = listNext(&li))) {
|
||||
|
@ -6823,6 +6848,7 @@ void loadDataFromDisk(void) {
|
|||
exit(1);
|
||||
if (ret != AOF_NOT_EXIST)
|
||||
serverLog(LL_NOTICE, "DB loaded from append only file: %.3f seconds", (float)(ustime()-start)/1000000);
|
||||
updateReplOffsetAndResetEndOffset();
|
||||
} else {
|
||||
rdbSaveInfo rsi = RDB_SAVE_INFO_INIT;
|
||||
int rsi_is_valid = 0;
|
||||
|
|
46
src/server.h
46
src/server.h
|
@ -225,6 +225,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
|
|||
#define CMD_ALLOW_BUSY ((1ULL<<26))
|
||||
#define CMD_MODULE_GETCHANNELS (1ULL<<27) /* Use the modules getchannels interface. */
|
||||
#define CMD_TOUCHES_ARBITRARY_KEYS (1ULL<<28)
|
||||
#define CMD_INTERNAL (1ULL<<29) /* Internal command. */
|
||||
|
||||
/* Command flags that describe ACLs categories. */
|
||||
#define ACL_CATEGORY_KEYSPACE (1ULL<<0)
|
||||
|
@ -396,6 +397,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
|
|||
#define CLIENT_MODULE_PREVENT_REPL_PROP (1ULL<<49) /* Module client do not want to propagate to replica */
|
||||
#define CLIENT_REPROCESSING_COMMAND (1ULL<<50) /* The client is re-processing the command. */
|
||||
#define CLIENT_REPL_RDB_CHANNEL (1ULL<<51) /* Client which is used for rdb delivery as part of rdb channel replication */
|
||||
#define CLIENT_INTERNAL (1ULL<<52) /* Internal client connection */
|
||||
|
||||
/* Any flag that does not let optimize FLUSH SYNC to run it in bg as blocking client ASYNC */
|
||||
#define CLIENT_AVOID_BLOCKING_ASYNC_FLUSH (CLIENT_DENY_BLOCKING|CLIENT_MULTI|CLIENT_LUA_DEBUG|CLIENT_LUA_DEBUG_SYNC|CLIENT_MODULE)
|
||||
|
@ -864,6 +866,13 @@ typedef struct moduleValue {
|
|||
void *value;
|
||||
} moduleValue;
|
||||
|
||||
/* Describe the state of the module during loading, and the indication which configs were loaded / applied already. */
|
||||
typedef enum {
|
||||
MODULE_CONFIGS_DEFAULTS = 0x1, /* The registered defaults were applied. */
|
||||
MODULE_CONFIGS_USER_VALS = 0x2, /* The user provided values were applied. */
|
||||
MODULE_CONFIGS_ALL_APPLIED = 0x3 /* Both of the above applied. */
|
||||
} ModuleConfigsApplied;
|
||||
|
||||
/* This structure represents a module inside the system. */
|
||||
struct RedisModule {
|
||||
void *handle; /* Module dlopen() handle. */
|
||||
|
@ -875,13 +884,14 @@ struct RedisModule {
|
|||
list *using; /* List of modules we use some APIs of. */
|
||||
list *filters; /* List of filters the module has registered. */
|
||||
list *module_configs; /* List of configurations the module has registered */
|
||||
int configs_initialized; /* Have the module configurations been initialized? */
|
||||
ModuleConfigsApplied configs_initialized; /* Have the module configurations been initialized? */
|
||||
int in_call; /* RM_Call() nesting level */
|
||||
int in_hook; /* Hooks callback nesting level for this module (0 or 1). */
|
||||
int options; /* Module options and capabilities. */
|
||||
int blocked_clients; /* Count of RedisModuleBlockedClient in this module. */
|
||||
RedisModuleInfoFunc info_cb; /* Callback for module to add INFO fields. */
|
||||
RedisModuleDefragFunc defrag_cb; /* Callback for global data defrag. */
|
||||
RedisModuleDefragFunc2 defrag_cb_2; /* Version 2 callback for global data defrag. */
|
||||
RedisModuleDefragFunc defrag_start_cb; /* Callback indicating defrag started. */
|
||||
RedisModuleDefragFunc defrag_end_cb; /* Callback indicating defrag ended. */
|
||||
struct moduleLoadQueueEntry *loadmod; /* Module load arguments for config rewrite. */
|
||||
|
@ -891,6 +901,16 @@ struct RedisModule {
|
|||
};
|
||||
typedef struct RedisModule RedisModule;
|
||||
|
||||
/* The defrag context, used to manage state during calls to the data type
|
||||
* defrag callback.
|
||||
*/
|
||||
struct RedisModuleDefragCtx {
|
||||
monotime endtime;
|
||||
unsigned long *cursor;
|
||||
struct redisObject *key; /* Optional name of key processed, NULL when unknown. */
|
||||
int dbid; /* The dbid of the key being processed, -1 when unknown. */
|
||||
};
|
||||
|
||||
/* This is a wrapper for the 'rio' streams used inside rdb.c in Redis, so that
|
||||
* the user does not have to take the total count of the written bytes nor
|
||||
* to care about error conditions. */
|
||||
|
@ -1042,7 +1062,6 @@ typedef struct redisDb {
|
|||
int id; /* Database ID */
|
||||
long long avg_ttl; /* Average TTL, just for stats */
|
||||
unsigned long expires_cursor; /* Cursor of the active expire cycle. */
|
||||
list *defrag_later; /* List of key names to attempt to defrag one by one, gradually. */
|
||||
} redisDb;
|
||||
|
||||
/* forward declaration for functions ctx */
|
||||
|
@ -1200,6 +1219,7 @@ typedef struct replDataBufBlock {
|
|||
* rdb channel replication on replica side. */
|
||||
typedef struct replDataBuf {
|
||||
list *blocks; /* List of replDataBufBlock */
|
||||
size_t mem_used; /* Total allocated memory */
|
||||
size_t size; /* Total number of bytes available in all blocks. */
|
||||
size_t used; /* Total number of bytes actually used in all blocks. */
|
||||
size_t peak; /* Peak number of bytes stored in all blocks. */
|
||||
|
@ -1424,7 +1444,7 @@ struct sharedObjectsStruct {
|
|||
*rpop, *lpop, *lpush, *rpoplpush, *lmove, *blmove, *zpopmin, *zpopmax,
|
||||
*emptyscan, *multi, *exec, *left, *right, *hset, *srem, *xgroup, *xclaim,
|
||||
*script, *replconf, *eval, *persist, *set, *pexpireat, *pexpire,
|
||||
*hdel, *hpexpireat,
|
||||
*hdel, *hpexpireat, *hpersist,
|
||||
*time, *pxat, *absttl, *retrycount, *force, *justid, *entriesread,
|
||||
*lastid, *ping, *setid, *keepttl, *load, *createconsumer,
|
||||
*getack, *special_asterick, *special_equals, *default_username, *redacted,
|
||||
|
@ -1498,6 +1518,7 @@ struct redisMemOverhead {
|
|||
size_t total_allocated;
|
||||
size_t startup_allocated;
|
||||
size_t repl_backlog;
|
||||
size_t replica_fullsync_buffer;
|
||||
size_t clients_slaves;
|
||||
size_t clients_normal;
|
||||
size_t cluster_links;
|
||||
|
@ -1609,6 +1630,8 @@ typedef struct {
|
|||
sds file_name; /* file name */
|
||||
long long file_seq; /* file sequence */
|
||||
aof_file_type file_type; /* file type */
|
||||
long long start_offset; /* the start replication offset of the file */
|
||||
long long end_offset; /* the end replication offset of the file */
|
||||
} aofInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -2001,6 +2024,7 @@ struct redisServer {
|
|||
size_t repl_buffer_mem; /* The memory of replication buffer. */
|
||||
list *repl_buffer_blocks; /* Replication buffers blocks list
|
||||
* (serving replica clients and repl backlog) */
|
||||
time_t repl_stream_lastio; /* Unix time of the latest sending replication stream. */
|
||||
/* Replication (slave) */
|
||||
char *masteruser; /* AUTH with this user and masterauth with master */
|
||||
sds masterauth; /* AUTH with this password with master */
|
||||
|
@ -2427,6 +2451,9 @@ typedef int redisGetKeysProc(struct redisCommand *cmd, robj **argv, int argc, ge
|
|||
* CMD_TOUCHES_ARBITRARY_KEYS: The command may touch (and cause lazy-expire)
|
||||
* arbitrary key (i.e not provided in argv)
|
||||
*
|
||||
* CMD_INTERNAL: The command may perform operations without performing
|
||||
* validations such as ACL.
|
||||
*
|
||||
* The following additional flags are only used in order to put commands
|
||||
* in a specific ACL category. Commands can have multiple ACL categories.
|
||||
* See redis.conf for the exact meaning of each.
|
||||
|
@ -2658,8 +2685,7 @@ size_t moduleGetFreeEffort(robj *key, robj *val, int dbid);
|
|||
size_t moduleGetMemUsage(robj *key, robj *val, size_t sample_size, int dbid);
|
||||
robj *moduleTypeDupOrReply(client *c, robj *fromkey, robj *tokey, int todb, robj *value);
|
||||
int moduleDefragValue(robj *key, robj *obj, int dbid);
|
||||
int moduleLateDefrag(robj *key, robj *value, unsigned long *cursor, long long endtime, int dbid);
|
||||
void moduleDefragGlobals(void);
|
||||
int moduleLateDefrag(robj *key, robj *value, unsigned long *cursor, monotime endtime, int dbid);
|
||||
void moduleDefragStart(void);
|
||||
void moduleDefragEnd(void);
|
||||
void *moduleGetHandleByName(char *modulename);
|
||||
|
@ -3044,6 +3070,8 @@ void aofOpenIfNeededOnServerStart(void);
|
|||
void aofManifestFree(aofManifest *am);
|
||||
int aofDelHistoryFiles(void);
|
||||
int aofRewriteLimited(void);
|
||||
void updateCurIncrAofEndOffset(void);
|
||||
void updateReplOffsetAndResetEndOffset(void);
|
||||
|
||||
/* Child info */
|
||||
void openChildInfoPipe(void);
|
||||
|
@ -3250,6 +3278,7 @@ void enterExecutionUnit(int update_cached_time, long long us);
|
|||
void exitExecutionUnit(void);
|
||||
void resetServerStats(void);
|
||||
void activeDefragCycle(void);
|
||||
void defragWhileBlocked(void);
|
||||
unsigned int getLRUClock(void);
|
||||
unsigned int LRU_CLOCK(void);
|
||||
const char *evictPolicyToString(void);
|
||||
|
@ -3342,7 +3371,9 @@ typedef struct dictExpireMetadata {
|
|||
#define HFE_LAZY_AVOID_HASH_DEL (1<<1) /* Avoid deleting hash if the field is the last one */
|
||||
#define HFE_LAZY_NO_NOTIFICATION (1<<2) /* Do not send notification, used when multiple fields
|
||||
* may expire and only one notification is desired. */
|
||||
#define HFE_LAZY_ACCESS_EXPIRED (1<<3) /* Avoid lazy expire and allow access to expired fields */
|
||||
#define HFE_LAZY_NO_SIGNAL (1<<3) /* Do not send signal, used when multiple fields
|
||||
* may expire and only one signal is desired. */
|
||||
#define HFE_LAZY_ACCESS_EXPIRED (1<<4) /* Avoid lazy expire and allow access to expired fields */
|
||||
|
||||
void hashTypeConvert(robj *o, int enc, ebuckets *hexpires);
|
||||
void hashTypeTryConversion(redisDb *db, robj *subject, robj **argv, int start, int end);
|
||||
|
@ -3862,6 +3893,7 @@ void strlenCommand(client *c);
|
|||
void zrankCommand(client *c);
|
||||
void zrevrankCommand(client *c);
|
||||
void hsetCommand(client *c);
|
||||
void hsetexCommand(client *c);
|
||||
void hpexpireCommand(client *c);
|
||||
void hexpireCommand(client *c);
|
||||
void hpexpireatCommand(client *c);
|
||||
|
@ -3874,6 +3906,8 @@ void hpersistCommand(client *c);
|
|||
void hsetnxCommand(client *c);
|
||||
void hgetCommand(client *c);
|
||||
void hmgetCommand(client *c);
|
||||
void hgetexCommand(client *c);
|
||||
void hgetdelCommand(client *c);
|
||||
void hdelCommand(client *c);
|
||||
void hlenCommand(client *c);
|
||||
void hstrlenCommand(client *c);
|
||||
|
|
770
src/t_hash.c
770
src/t_hash.c
|
@ -48,7 +48,7 @@ typedef listpackEntry CommonEntry; /* extend usage beyond lp */
|
|||
static ExpireAction onFieldExpire(eItem item, void *ctx);
|
||||
static ExpireMeta* hfieldGetExpireMeta(const eItem field);
|
||||
static ExpireMeta *hashGetExpireMeta(const eItem hash);
|
||||
static void hexpireGenericCommand(client *c, const char *cmd, long long basetime, int unit);
|
||||
static void hexpireGenericCommand(client *c, long long basetime, int unit);
|
||||
static ExpireAction hashTypeActiveExpire(eItem hashObj, void *ctx);
|
||||
static uint64_t hashTypeExpire(robj *o, ExpireCtx *expireCtx, int updateGlobalHFE);
|
||||
static void hfieldPersist(robj *hashObj, hfield field);
|
||||
|
@ -214,15 +214,13 @@ typedef struct HashTypeSetEx {
|
|||
* minimum expiration time. If minimum recorded
|
||||
* is above minExpire of the hash, then we don't
|
||||
* have to update global HFE DS */
|
||||
int fieldDeleted; /* Number of fields deleted */
|
||||
int fieldUpdated; /* Number of fields updated */
|
||||
|
||||
/* Optionally provide client for notification */
|
||||
client *c;
|
||||
const char *cmd;
|
||||
} HashTypeSetEx;
|
||||
|
||||
int hashTypeSetExInit(robj *key, robj *o, client *c, redisDb *db, const char *cmd,
|
||||
int hashTypeSetExInit(robj *key, robj *o, client *c, redisDb *db,
|
||||
ExpireSetCond expireSetCond, HashTypeSetEx *ex);
|
||||
|
||||
SetExRes hashTypeSetEx(robj *o, sds field, uint64_t expireAt, HashTypeSetEx *exInfo);
|
||||
|
@ -531,6 +529,15 @@ SetExRes hashTypeSetExpiryListpack(HashTypeSetEx *ex, sds field,
|
|||
prevExpire = (uint64_t) expireTime;
|
||||
}
|
||||
|
||||
/* Special value of EXPIRE_TIME_INVALID indicates field should be persisted.*/
|
||||
if (expireAt == EB_EXPIRE_TIME_INVALID) {
|
||||
/* Return error if already there is no ttl. */
|
||||
if (prevExpire == EB_EXPIRE_TIME_INVALID)
|
||||
return HSETEX_NO_CONDITION_MET;
|
||||
listpackExUpdateExpiry(ex->hashObj, field, fptr, vptr, HASH_LP_NO_TTL);
|
||||
return HSETEX_OK;
|
||||
}
|
||||
|
||||
if (prevExpire == EB_EXPIRE_TIME_INVALID) {
|
||||
/* For fields without expiry, LT condition is considered valid */
|
||||
if (ex->expireSetCond & (HFE_XX | HFE_GT))
|
||||
|
@ -551,13 +558,7 @@ SetExRes hashTypeSetExpiryListpack(HashTypeSetEx *ex, sds field,
|
|||
if (unlikely(checkAlreadyExpired(expireAt))) {
|
||||
propagateHashFieldDeletion(ex->db, ex->key->ptr, field, sdslen(field));
|
||||
hashTypeDelete(ex->hashObj, field, 1);
|
||||
|
||||
/* get listpack length */
|
||||
listpackEx *lpt = ((listpackEx *) ex->hashObj->ptr);
|
||||
unsigned long length = lpLength(lpt->lp) / 3;
|
||||
updateKeysizesHist(ex->db, getKeySlot(ex->key->ptr), OBJ_HASH, length+1, length);
|
||||
server.stat_expired_subkeys++;
|
||||
ex->fieldDeleted++;
|
||||
return HSETEX_DELETED;
|
||||
}
|
||||
|
||||
|
@ -565,7 +566,6 @@ SetExRes hashTypeSetExpiryListpack(HashTypeSetEx *ex, sds field,
|
|||
ex->minExpireFields = expireAt;
|
||||
|
||||
listpackExUpdateExpiry(ex->hashObj, field, fptr, vptr, expireAt);
|
||||
ex->fieldUpdated++;
|
||||
return HSETEX_OK;
|
||||
}
|
||||
|
||||
|
@ -788,7 +788,8 @@ GetFieldRes hashTypeGetValue(redisDb *db, robj *o, sds field, unsigned char **vs
|
|||
dbDelete(db,keyObj);
|
||||
res = GETF_EXPIRED_HASH;
|
||||
}
|
||||
signalModifiedKey(NULL, db, keyObj);
|
||||
if (!(hfeFlags & HFE_LAZY_NO_SIGNAL))
|
||||
signalModifiedKey(NULL, db, keyObj);
|
||||
decrRefCount(keyObj);
|
||||
return res;
|
||||
}
|
||||
|
@ -1010,34 +1011,33 @@ int hashTypeSet(redisDb *db, robj *o, sds field, sds value, int flags) {
|
|||
SetExRes hashTypeSetExpiryHT(HashTypeSetEx *exInfo, sds field, uint64_t expireAt) {
|
||||
dict *ht = exInfo->hashObj->ptr;
|
||||
dictEntry *existingEntry = NULL;
|
||||
hfield hfNew = NULL;
|
||||
|
||||
/* New field with expiration metadata */
|
||||
hfield hfNew = hfieldNew(field, sdslen(field), 1 /*withExpireMeta*/);
|
||||
|
||||
if ((existingEntry = dictFind(ht, field)) == NULL) {
|
||||
hfieldFree(hfNew);
|
||||
if ((existingEntry = dictFind(ht, field)) == NULL)
|
||||
return HSETEX_NO_FIELD;
|
||||
}
|
||||
|
||||
hfield hfOld = dictGetKey(existingEntry);
|
||||
/* Special value of EXPIRE_TIME_INVALID indicates field should be persisted.*/
|
||||
if (expireAt == EB_EXPIRE_TIME_INVALID) {
|
||||
/* Return error if already there is no ttl. */
|
||||
if (hfieldGetExpireTime(hfOld) == EB_EXPIRE_TIME_INVALID)
|
||||
return HSETEX_NO_CONDITION_MET;
|
||||
|
||||
hfieldPersist(exInfo->hashObj, hfOld);
|
||||
return HSETEX_OK;
|
||||
}
|
||||
|
||||
/* If field doesn't have expiry metadata attached */
|
||||
if (!hfieldIsExpireAttached(hfOld)) {
|
||||
|
||||
/* For fields without expiry, LT condition is considered valid */
|
||||
if (exInfo->expireSetCond & (HFE_XX | HFE_GT)) {
|
||||
hfieldFree(hfNew);
|
||||
if (exInfo->expireSetCond & (HFE_XX | HFE_GT))
|
||||
return HSETEX_NO_CONDITION_MET;
|
||||
}
|
||||
|
||||
/* Delete old field. Below goanna dictSetKey(..,hfNew) */
|
||||
hfieldFree(hfOld);
|
||||
|
||||
/* New field with expiration metadata */
|
||||
hfNew = hfieldNew(field, sdslen(field), 1);
|
||||
} else { /* field has ExpireMeta struct attached */
|
||||
|
||||
/* No need for hfNew (Just modify expire-time of existing field) */
|
||||
hfieldFree(hfNew);
|
||||
|
||||
uint64_t prevExpire = hfieldGetExpireTime(hfOld);
|
||||
|
||||
/* If field has valid expiration time, then check GT|LT|NX */
|
||||
|
@ -1073,13 +1073,10 @@ SetExRes hashTypeSetExpiryHT(HashTypeSetEx *exInfo, sds field, uint64_t expireAt
|
|||
/* If expired, then delete the field and propagate the deletion.
|
||||
* If replica, continue like the field is valid */
|
||||
if (unlikely(checkAlreadyExpired(expireAt))) {
|
||||
unsigned long length = dictSize(ht);
|
||||
updateKeysizesHist(exInfo->db, getKeySlot(exInfo->key->ptr), OBJ_HASH, length, length-1);
|
||||
/* replicas should not initiate deletion of fields */
|
||||
propagateHashFieldDeletion(exInfo->db, exInfo->key->ptr, field, sdslen(field));
|
||||
hashTypeDelete(exInfo->hashObj, field, 1);
|
||||
server.stat_expired_subkeys++;
|
||||
exInfo->fieldDeleted++;
|
||||
return HSETEX_DELETED;
|
||||
}
|
||||
|
||||
|
@ -1088,7 +1085,6 @@ SetExRes hashTypeSetExpiryHT(HashTypeSetEx *exInfo, sds field, uint64_t expireAt
|
|||
|
||||
dictExpireMetadata *dm = (dictExpireMetadata *) dictMetadata(ht);
|
||||
ebAdd(&dm->hfe, &hashFieldExpireBucketsType, hfNew, expireAt);
|
||||
exInfo->fieldUpdated++;
|
||||
return HSETEX_OK;
|
||||
}
|
||||
|
||||
|
@ -1097,20 +1093,18 @@ SetExRes hashTypeSetExpiryHT(HashTypeSetEx *exInfo, sds field, uint64_t expireAt
|
|||
*
|
||||
* Take care to call first hashTypeSetExInit() and then call this function.
|
||||
* Finally, call hashTypeSetExDone() to notify and update global HFE DS.
|
||||
*
|
||||
* Special value of EB_EXPIRE_TIME_INVALID for 'expireAt' argument will persist
|
||||
* the field.
|
||||
*/
|
||||
SetExRes hashTypeSetEx(robj *o, sds field, uint64_t expireAt, HashTypeSetEx *exInfo)
|
||||
{
|
||||
if (o->encoding == OBJ_ENCODING_LISTPACK_EX)
|
||||
{
|
||||
SetExRes hashTypeSetEx(robj *o, sds field, uint64_t expireAt, HashTypeSetEx *exInfo) {
|
||||
if (o->encoding == OBJ_ENCODING_LISTPACK_EX) {
|
||||
unsigned char *fptr = NULL, *vptr = NULL, *tptr = NULL;
|
||||
|
||||
listpackEx *lpt = o->ptr;
|
||||
long long expireTime = HASH_LP_NO_TTL;
|
||||
|
||||
if ((fptr = lpFirst(lpt->lp)) == NULL)
|
||||
return HSETEX_NO_FIELD;
|
||||
|
||||
fptr = lpFind(lpt->lp, fptr, (unsigned char*)field, sdslen(field), 2);
|
||||
fptr = lpFirst(lpt->lp);
|
||||
if (fptr)
|
||||
fptr = lpFind(lpt->lp, fptr, (unsigned char*)field, sdslen(field), 2);
|
||||
|
||||
if (!fptr)
|
||||
return HSETEX_NO_FIELD;
|
||||
|
@ -1120,7 +1114,7 @@ SetExRes hashTypeSetEx(robj *o, sds field, uint64_t expireAt, HashTypeSetEx *exI
|
|||
serverAssert(vptr != NULL);
|
||||
|
||||
tptr = lpNext(lpt->lp, vptr);
|
||||
serverAssert(tptr && lpGetIntegerValue(tptr, &expireTime));
|
||||
serverAssert(tptr);
|
||||
|
||||
/* update TTL */
|
||||
return hashTypeSetExpiryListpack(exInfo, field, fptr, vptr, tptr, expireAt);
|
||||
|
@ -1144,19 +1138,16 @@ void initDictExpireMetadata(sds key, robj *o) {
|
|||
}
|
||||
|
||||
/* Init HashTypeSetEx struct before calling hashTypeSetEx() */
|
||||
int hashTypeSetExInit(robj *key, robj *o, client *c, redisDb *db, const char *cmd,
|
||||
int hashTypeSetExInit(robj *key, robj *o, client *c, redisDb *db,
|
||||
ExpireSetCond expireSetCond, HashTypeSetEx *ex)
|
||||
{
|
||||
dict *ht = o->ptr;
|
||||
ex->expireSetCond = expireSetCond;
|
||||
ex->minExpire = EB_EXPIRE_TIME_INVALID;
|
||||
ex->c = c;
|
||||
ex->cmd = cmd;
|
||||
ex->db = db;
|
||||
ex->key = key;
|
||||
ex->hashObj = o;
|
||||
ex->fieldDeleted = 0;
|
||||
ex->fieldUpdated = 0;
|
||||
ex->minExpireFields = EB_EXPIRE_TIME_INVALID;
|
||||
|
||||
/* Take care that HASH support expiration */
|
||||
|
@ -1220,50 +1211,38 @@ int hashTypeSetExInit(robj *key, robj *o, client *c, redisDb *db, const char *cm
|
|||
|
||||
/*
|
||||
* After calling hashTypeSetEx() for setting fields or their expiry, call this
|
||||
* function to notify and update global HFE DS.
|
||||
* function to update global HFE DS.
|
||||
*/
|
||||
void hashTypeSetExDone(HashTypeSetEx *ex) {
|
||||
/* Notify keyspace event, update dirty count and update global HFE DS */
|
||||
if (ex->fieldDeleted + ex->fieldUpdated > 0) {
|
||||
|
||||
server.dirty += ex->fieldDeleted + ex->fieldUpdated;
|
||||
if (ex->fieldDeleted && hashTypeLength(ex->hashObj, 0) == 0) {
|
||||
dbDelete(ex->db,ex->key);
|
||||
signalModifiedKey(ex->c, ex->db, ex->key);
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hdel", ex->key, ex->db->id);
|
||||
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",ex->key, ex->db->id);
|
||||
} else {
|
||||
signalModifiedKey(ex->c, ex->db, ex->key);
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, ex->fieldDeleted ? "hdel" : "hexpire",
|
||||
ex->key, ex->db->id);
|
||||
if (hashTypeLength(ex->hashObj, 0) == 0)
|
||||
return;
|
||||
|
||||
/* If minimum HFE of the hash is smaller than expiration time of the
|
||||
* specified fields in the command as well as it is smaller or equal
|
||||
* than expiration time provided in the command, then the minimum
|
||||
* HFE of the hash won't change following this command. */
|
||||
if ((ex->minExpire < ex->minExpireFields))
|
||||
return;
|
||||
/* If minimum HFE of the hash is smaller than expiration time of the
|
||||
* specified fields in the command as well as it is smaller or equal
|
||||
* than expiration time provided in the command, then the minimum
|
||||
* HFE of the hash won't change following this command. */
|
||||
if ((ex->minExpire < ex->minExpireFields))
|
||||
return;
|
||||
|
||||
/* Retrieve new expired time. It might have changed. */
|
||||
uint64_t newMinExpire = hashTypeGetMinExpire(ex->hashObj, 1 /*accurate*/);
|
||||
/* Retrieve new expired time. It might have changed. */
|
||||
uint64_t newMinExpire = hashTypeGetMinExpire(ex->hashObj, 1 /*accurate*/);
|
||||
|
||||
/* Calculate the diff between old minExpire and newMinExpire. If it is
|
||||
* only few seconds, then don't have to update global HFE DS. At the worst
|
||||
* case fields of hash will be active-expired up to few seconds later.
|
||||
*
|
||||
* In any case, active-expire operation will know to update global
|
||||
* HFE DS more efficiently than here for a single item.
|
||||
*/
|
||||
uint64_t diff = (ex->minExpire > newMinExpire) ?
|
||||
(ex->minExpire - newMinExpire) : (newMinExpire - ex->minExpire);
|
||||
if (diff < HASH_NEW_EXPIRE_DIFF_THRESHOLD) return;
|
||||
/* Calculate the diff between old minExpire and newMinExpire. If it is
|
||||
* only few seconds, then don't have to update global HFE DS. At the worst
|
||||
* case fields of hash will be active-expired up to few seconds later.
|
||||
*
|
||||
* In any case, active-expire operation will know to update global
|
||||
* HFE DS more efficiently than here for a single item.
|
||||
*/
|
||||
uint64_t diff = (ex->minExpire > newMinExpire) ?
|
||||
(ex->minExpire - newMinExpire) : (newMinExpire - ex->minExpire);
|
||||
if (diff < HASH_NEW_EXPIRE_DIFF_THRESHOLD) return;
|
||||
|
||||
if (ex->minExpire != EB_EXPIRE_TIME_INVALID)
|
||||
ebRemove(&ex->db->hexpires, &hashExpireBucketsType, ex->hashObj);
|
||||
if (newMinExpire != EB_EXPIRE_TIME_INVALID)
|
||||
ebAdd(&ex->db->hexpires, &hashExpireBucketsType, ex->hashObj, newMinExpire);
|
||||
}
|
||||
}
|
||||
if (ex->minExpire != EB_EXPIRE_TIME_INVALID)
|
||||
ebRemove(&ex->db->hexpires, &hashExpireBucketsType, ex->hashObj);
|
||||
if (newMinExpire != EB_EXPIRE_TIME_INVALID)
|
||||
ebAdd(&ex->db->hexpires, &hashExpireBucketsType, ex->hashObj, newMinExpire);
|
||||
}
|
||||
|
||||
/* Delete an element from a hash.
|
||||
|
@ -2222,6 +2201,303 @@ void hsetCommand(client *c) {
|
|||
server.dirty += (c->argc - 2)/2;
|
||||
}
|
||||
|
||||
/* Parse expire time from argument and do boundary checks. */
|
||||
static int parseExpireTime(client *c, robj *o, int unit, long long basetime,
|
||||
long long *expire)
|
||||
{
|
||||
long long val;
|
||||
|
||||
/* Read the expiry time from command */
|
||||
if (getLongLongFromObjectOrReply(c, o, &val, NULL) != C_OK)
|
||||
return C_ERR;
|
||||
|
||||
if (val < 0) {
|
||||
addReplyError(c,"invalid expire time, must be >= 0");
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
if (unit == UNIT_SECONDS) {
|
||||
if (val > (long long) HFE_MAX_ABS_TIME_MSEC / 1000) {
|
||||
addReplyErrorExpireTime(c);
|
||||
return C_ERR;
|
||||
}
|
||||
val *= 1000;
|
||||
}
|
||||
|
||||
if (val > (long long) HFE_MAX_ABS_TIME_MSEC - basetime) {
|
||||
addReplyErrorExpireTime(c);
|
||||
return C_ERR;
|
||||
}
|
||||
val += basetime;
|
||||
*expire = val;
|
||||
return C_OK;
|
||||
}
|
||||
|
||||
/* Flags that are used as part of HGETEX and HSETEX commands. */
|
||||
#define HFE_EX (1<<0) /* Expiration time in seconds */
|
||||
#define HFE_PX (1<<1) /* Expiration time in milliseconds */
|
||||
#define HFE_EXAT (1<<2) /* Expiration time in unix seconds */
|
||||
#define HFE_PXAT (1<<3) /* Expiration time in unix milliseconds */
|
||||
#define HFE_PERSIST (1<<4) /* Persist fields */
|
||||
#define HFE_KEEPTTL (1<<5) /* Do not discard field ttl on set op */
|
||||
#define HFE_FXX (1<<6) /* Set fields if all the fields already exist */
|
||||
#define HFE_FNX (1<<7) /* Set fields if none of the fields exist */
|
||||
|
||||
/* Parse hsetex command arguments.
|
||||
* HSETEX <key>
|
||||
* [FNX|FXX]
|
||||
* [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]
|
||||
* FIELDS <numfields> field value [field value ...]
|
||||
*/
|
||||
static int hsetexParseArgs(client *c, int *flags,
|
||||
long long *expire_time, int *expire_time_pos,
|
||||
int *first_field_pos, int *field_count) {
|
||||
*flags = 0;
|
||||
*first_field_pos = -1;
|
||||
*field_count = -1;
|
||||
*expire_time_pos = -1;
|
||||
|
||||
for (int i = 2; i < c->argc; i++) {
|
||||
if (!strcasecmp(c->argv[i]->ptr, "fields")) {
|
||||
long val;
|
||||
|
||||
if (i >= c->argc - 3) {
|
||||
addReplyErrorArity(c);
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
if (getRangeLongFromObjectOrReply(c, c->argv[i + 1], 1, INT_MAX, &val,
|
||||
"invalid number of fields") != C_OK)
|
||||
return C_ERR;
|
||||
|
||||
int remaining = (c->argc - i - 2);
|
||||
if (remaining % 2 != 0 || val != remaining / 2) {
|
||||
addReplyErrorArity(c);
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
*first_field_pos = i + 2;
|
||||
*field_count = (int) val;
|
||||
return C_OK;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "EX")) {
|
||||
if (*flags & (HFE_EX | HFE_EXAT | HFE_PX | HFE_PXAT | HFE_KEEPTTL))
|
||||
goto err_expiration;
|
||||
|
||||
if (i >= c->argc - 1)
|
||||
goto err_missing_expire;
|
||||
|
||||
*flags |= HFE_EX;
|
||||
i++;
|
||||
|
||||
if (parseExpireTime(c, c->argv[i], UNIT_SECONDS,
|
||||
commandTimeSnapshot(), expire_time) != C_OK)
|
||||
return C_ERR;
|
||||
|
||||
*expire_time_pos = i;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "PX")) {
|
||||
if (*flags & (HFE_EX | HFE_EXAT | HFE_PX | HFE_PXAT | HFE_KEEPTTL))
|
||||
goto err_expiration;
|
||||
|
||||
if (i >= c->argc - 1)
|
||||
goto err_missing_expire;
|
||||
|
||||
*flags |= HFE_PX;
|
||||
i++;
|
||||
if (parseExpireTime(c, c->argv[i], UNIT_MILLISECONDS,
|
||||
commandTimeSnapshot(), expire_time) != C_OK)
|
||||
return C_ERR;
|
||||
|
||||
*expire_time_pos = i;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "EXAT")) {
|
||||
if (*flags & (HFE_EX | HFE_EXAT | HFE_PX | HFE_PXAT | HFE_KEEPTTL))
|
||||
goto err_expiration;
|
||||
|
||||
if (i >= c->argc - 1)
|
||||
goto err_missing_expire;
|
||||
|
||||
*flags |= HFE_EXAT;
|
||||
i++;
|
||||
if (parseExpireTime(c, c->argv[i], UNIT_SECONDS, 0, expire_time) != C_OK)
|
||||
return C_ERR;
|
||||
|
||||
*expire_time_pos = i;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "PXAT")) {
|
||||
if (*flags & (HFE_EX | HFE_EXAT | HFE_PX | HFE_PXAT | HFE_KEEPTTL))
|
||||
goto err_expiration;
|
||||
|
||||
if (i >= c->argc - 1)
|
||||
goto err_missing_expire;
|
||||
|
||||
*flags |= HFE_PXAT;
|
||||
i++;
|
||||
if (parseExpireTime(c, c->argv[i], UNIT_MILLISECONDS, 0,
|
||||
expire_time) != C_OK)
|
||||
return C_ERR;
|
||||
|
||||
*expire_time_pos = i;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "KEEPTTL")) {
|
||||
if (*flags & (HFE_EX | HFE_EXAT | HFE_PX | HFE_PXAT | HFE_KEEPTTL))
|
||||
goto err_expiration;
|
||||
*flags |= HFE_KEEPTTL;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "FXX")) {
|
||||
if (*flags & (HFE_FXX | HFE_FNX))
|
||||
goto err_condition;
|
||||
*flags |= HFE_FXX;
|
||||
} else if (!strcasecmp(c->argv[i]->ptr, "FNX")) {
|
||||
if (*flags & (HFE_FXX | HFE_FNX))
|
||||
goto err_condition;
|
||||
*flags |= HFE_FNX;
|
||||
} else {
|
||||
addReplyErrorFormat(c, "unknown argument: %s", (char*) c->argv[i]->ptr);
|
||||
return C_ERR;
|
||||
}
|
||||
}
|
||||
|
||||
serverAssert(0);
|
||||
|
||||
err_missing_expire:
|
||||
addReplyError(c, "missing expire time");
|
||||
return C_ERR;
|
||||
err_condition:
|
||||
addReplyError(c, "Only one of FXX or FNX arguments can be specified");
|
||||
return C_ERR;
|
||||
err_expiration:
|
||||
addReplyError(c, "Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments can be specified");
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
/* Set the value of one or more fields of a given hash key, and optionally set
|
||||
* their expiration.
|
||||
*
|
||||
* HSETEX key
|
||||
* [FNX | FXX]
|
||||
* [EX seconds | PX milliseconds | EXAT unix-time-seconds | PXAT unix-time-milliseconds | KEEPTTL]
|
||||
* FIELDS <numfields> field value [field value...]
|
||||
*
|
||||
* Reply:
|
||||
* Integer reply: 0 if no fields were set (due to FXX/FNX args)
|
||||
* Integer reply: 1 if all the fields were set
|
||||
*/
|
||||
void hsetexCommand(client *c) {
|
||||
int flags = 0, first_field_pos = 0, field_count = 0, expire_time_pos = -1;
|
||||
int updated = 0, deleted = 0, set_expiry;
|
||||
long long expire_time = EB_EXPIRE_TIME_INVALID;
|
||||
unsigned long oldlen, newlen;
|
||||
robj *o;
|
||||
HashTypeSetEx setex;
|
||||
|
||||
if (hsetexParseArgs(c, &flags, &expire_time, &expire_time_pos,
|
||||
&first_field_pos, &field_count) != C_OK)
|
||||
return;
|
||||
|
||||
o = lookupKeyWrite(c->db, c->argv[1]);
|
||||
if (checkType(c, o, OBJ_HASH))
|
||||
return;
|
||||
|
||||
if (!o) {
|
||||
if (flags & HFE_FXX) {
|
||||
addReplyLongLong(c, 0);
|
||||
return;
|
||||
}
|
||||
o = createHashObject();
|
||||
dbAdd(c->db, c->argv[1], o);
|
||||
}
|
||||
oldlen = hashTypeLength(o, 0);
|
||||
|
||||
if (flags & (HFE_FXX | HFE_FNX)) {
|
||||
int found = 0;
|
||||
for (int i = 0; i < field_count; i++) {
|
||||
sds field = c->argv[first_field_pos + (i * 2)]->ptr;
|
||||
const int opt = HFE_LAZY_NO_NOTIFICATION |
|
||||
HFE_LAZY_NO_SIGNAL |
|
||||
HFE_LAZY_AVOID_HASH_DEL;
|
||||
int exists = hashTypeExists(c->db, o, field, opt, NULL);
|
||||
found += (exists != 0);
|
||||
|
||||
/* Check for early exit if the condition is already invalid. */
|
||||
if (((flags & HFE_FXX) && !exists) ||
|
||||
((flags & HFE_FNX) && exists))
|
||||
break;
|
||||
}
|
||||
|
||||
int all_exists = (found == field_count);
|
||||
int non_exists = (found == 0);
|
||||
|
||||
if (((flags & HFE_FNX) && !non_exists) ||
|
||||
((flags & HFE_FXX) && !all_exists))
|
||||
{
|
||||
addReplyLongLong(c, 0);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
hashTypeTryConversion(c->db, o,c->argv, first_field_pos, c->argc - 1);
|
||||
|
||||
/* Check if we will set the expiration time. */
|
||||
set_expiry = flags & (HFE_EX | HFE_PX | HFE_EXAT | HFE_PXAT);
|
||||
if (set_expiry)
|
||||
hashTypeSetExInit(c->argv[1], o, c, c->db, 0, &setex);
|
||||
|
||||
|
||||
for (int i = 0; i < field_count; i++) {
|
||||
sds field = c->argv[first_field_pos + (i * 2)]->ptr;
|
||||
sds value = c->argv[first_field_pos + (i * 2) + 1]->ptr;
|
||||
|
||||
int opt = HASH_SET_COPY;
|
||||
/* If we are going to set the expiration time later, no need to discard
|
||||
* it as part of set operation now. */
|
||||
if (flags & (HFE_EX | HFE_PX | HFE_EXAT | HFE_PXAT | HFE_KEEPTTL))
|
||||
opt |= HASH_SET_KEEP_TTL;
|
||||
|
||||
hashTypeSet(c->db, o, field, value, opt);
|
||||
|
||||
/* Update the expiration time. */
|
||||
if (set_expiry) {
|
||||
int ret = hashTypeSetEx(o, field, expire_time, &setex);
|
||||
updated += (ret == HSETEX_OK);
|
||||
deleted += (ret == HSETEX_DELETED);
|
||||
}
|
||||
}
|
||||
|
||||
if (set_expiry)
|
||||
hashTypeSetExDone(&setex);
|
||||
|
||||
server.dirty += field_count;
|
||||
signalModifiedKey(c, c->db, c->argv[1]);
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hset", c->argv[1], c->db->id);
|
||||
if (deleted || updated)
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, deleted ? "hdel": "hexpire",
|
||||
c->argv[1], c->db->id);
|
||||
|
||||
if (deleted) {
|
||||
/* If fields are deleted due to timestamp is being in the past, hdel's
|
||||
* are already propagated. No need to propagate the command itself. */
|
||||
preventCommandPropagation(c);
|
||||
} else if (set_expiry && !(flags & HFE_PXAT)) {
|
||||
/* Propagate as 'HSETEX <key> PXAT ..' if there is EX/EXAT/PX flag*/
|
||||
|
||||
/* Replace EX/EXAT/PX with PXAT */
|
||||
rewriteClientCommandArgument(c, expire_time_pos - 1, shared.pxat);
|
||||
/* Replace timestamp with unix timestamp milliseconds. */
|
||||
robj *expire = createStringObjectFromLongLong(expire_time);
|
||||
rewriteClientCommandArgument(c, expire_time_pos, expire);
|
||||
decrRefCount(expire);
|
||||
}
|
||||
|
||||
addReplyLongLong(c, 1);
|
||||
|
||||
out:
|
||||
/* Key may become empty due to lazy expiry in hashTypeExists()
|
||||
* or the new expiration time is in the past.*/
|
||||
newlen = hashTypeLength(o, 0);
|
||||
if (newlen == 0) {
|
||||
dbDelete(c->db, c->argv[1]);
|
||||
notifyKeyspaceEvent(NOTIFY_GENERIC, "del", c->argv[1], c->db->id);
|
||||
}
|
||||
if (oldlen != newlen)
|
||||
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_HASH,
|
||||
oldlen, newlen);
|
||||
}
|
||||
|
||||
void hincrbyCommand(client *c) {
|
||||
long long value, incr, oldvalue;
|
||||
robj *o;
|
||||
|
@ -2393,6 +2669,254 @@ void hmgetCommand(client *c) {
|
|||
}
|
||||
}
|
||||
|
||||
/* Get and delete the value of one or more fields of a given hash key.
|
||||
* HGETDEL <key> FIELDS <numfields> field1 field2 ...
|
||||
* Reply: list of the value associated with each field or nil if the field
|
||||
* doesn’t exist.
|
||||
*/
|
||||
void hgetdelCommand(client *c) {
|
||||
int res = 0, hfe = 0, deleted = 0, expired = 0;
|
||||
unsigned long oldlen = 0, newlen= 0;
|
||||
long num_fields = 0;
|
||||
robj *o;
|
||||
|
||||
o = lookupKeyWrite(c->db, c->argv[1]);
|
||||
if (checkType(c, o, OBJ_HASH))
|
||||
return;
|
||||
|
||||
if (strcasecmp(c->argv[2]->ptr, "FIELDS") != 0) {
|
||||
addReplyError(c, "Mandatory argument FIELDS is missing or not at the right position");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Read number of fields */
|
||||
if (getRangeLongFromObjectOrReply(c, c->argv[3], 1, LONG_MAX, &num_fields,
|
||||
"Number of fields must be a positive integer") != C_OK)
|
||||
return;
|
||||
|
||||
/* Verify `numFields` is consistent with number of arguments */
|
||||
if (num_fields != c->argc - 4) {
|
||||
addReplyError(c, "The `numfields` parameter must match the number of arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Hash field expiration is optimized to avoid frequent update global HFE DS
|
||||
* for each field deletion. Eventually active-expiration will run and update
|
||||
* or remove the hash from global HFE DS gracefully. Nevertheless, statistic
|
||||
* "subexpiry" might reflect wrong number of hashes with HFE to the user if
|
||||
* it is the last field with expiration. The following logic checks if this
|
||||
* is the last field with expiration and removes it from global HFE DS. */
|
||||
if (o) {
|
||||
hfe = hashTypeIsFieldsWithExpire(o);
|
||||
oldlen = hashTypeLength(o, 0);
|
||||
}
|
||||
|
||||
addReplyArrayLen(c, num_fields);
|
||||
for (int i = 4; i < c->argc; i++) {
|
||||
const int flags = HFE_LAZY_NO_NOTIFICATION |
|
||||
HFE_LAZY_NO_SIGNAL |
|
||||
HFE_LAZY_AVOID_HASH_DEL;
|
||||
res = addHashFieldToReply(c, o, c->argv[i]->ptr, flags);
|
||||
expired += (res == GETF_EXPIRED);
|
||||
/* Try to delete only if it's found and not expired lazily. */
|
||||
if (res == GETF_OK) {
|
||||
deleted++;
|
||||
serverAssert(hashTypeDelete(o, c->argv[i]->ptr, 1) == 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return if no modification has been made. */
|
||||
if (expired == 0 && deleted == 0)
|
||||
return;
|
||||
|
||||
signalModifiedKey(c, c->db, c->argv[1]);
|
||||
|
||||
if (expired)
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hexpired", c->argv[1], c->db->id);
|
||||
if (deleted) {
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hdel", c->argv[1], c->db->id);
|
||||
server.dirty += deleted;
|
||||
|
||||
/* Propagate as HDEL command.
|
||||
* Orig: HGETDEL <key> FIELDS <numfields> field1 field2 ...
|
||||
* Repl: HDEL <key> field1 field2 ... */
|
||||
rewriteClientCommandArgument(c, 0, shared.hdel);
|
||||
rewriteClientCommandArgument(c, 2, NULL); /* Delete FIELDS arg */
|
||||
rewriteClientCommandArgument(c, 2, NULL); /* Delete <numfields> arg */
|
||||
}
|
||||
|
||||
/* Key may have become empty because of deleting fields or lazy expire. */
|
||||
newlen = hashTypeLength(o, 0);
|
||||
if (newlen == 0) {
|
||||
dbDelete(c->db, c->argv[1]);
|
||||
notifyKeyspaceEvent(NOTIFY_GENERIC, "del", c->argv[1], c->db->id);
|
||||
} else if (hfe && (hashTypeIsFieldsWithExpire(o) == 0)) { /*is it last HFE*/
|
||||
ebRemove(&c->db->hexpires, &hashExpireBucketsType, o);
|
||||
}
|
||||
|
||||
if (oldlen != newlen)
|
||||
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_HASH,
|
||||
oldlen, newlen);
|
||||
}
|
||||
|
||||
/* Get and delete the value of one or more fields of a given hash key.
|
||||
*
|
||||
* HGETEX <key>
|
||||
* [EX seconds | PX milliseconds | EXAT unix-time-seconds | PXAT unix-time-milliseconds | PERSIST]
|
||||
* FIELDS <numfields> field1 field2 ...
|
||||
*
|
||||
* Reply: list of the value associated with each field or nil if the field
|
||||
* doesn’t exist.
|
||||
*/
|
||||
void hgetexCommand(client *c) {
|
||||
int expired = 0, deleted = 0, updated = 0;
|
||||
int num_fields_pos = 3, cond = 0;
|
||||
long num_fields;
|
||||
unsigned long oldlen = 0, newlen = 0;
|
||||
long long expire_time = 0;
|
||||
robj *o;
|
||||
HashTypeSetEx setex;
|
||||
|
||||
o = lookupKeyWrite(c->db, c->argv[1]);
|
||||
if (checkType(c, o, OBJ_HASH))
|
||||
return;
|
||||
|
||||
/* Read optional arg */
|
||||
if (!strcasecmp(c->argv[2]->ptr, "ex"))
|
||||
cond = HFE_EX;
|
||||
else if (!strcasecmp(c->argv[2]->ptr, "px"))
|
||||
cond = HFE_PX;
|
||||
else if (!strcasecmp(c->argv[2]->ptr, "exat"))
|
||||
cond = HFE_EXAT;
|
||||
else if (!strcasecmp(c->argv[2]->ptr, "pxat"))
|
||||
cond = HFE_PXAT;
|
||||
else if (!strcasecmp(c->argv[2]->ptr, "persist"))
|
||||
cond = HFE_PERSIST;
|
||||
|
||||
/* Parse expiration time */
|
||||
if (cond & (HFE_EX | HFE_PX | HFE_EXAT | HFE_PXAT)) {
|
||||
num_fields_pos += 2;
|
||||
int unit = (cond & (HFE_EX | HFE_EXAT)) ? UNIT_SECONDS : UNIT_MILLISECONDS;
|
||||
long long basetime = cond & (HFE_EX | HFE_PX) ? commandTimeSnapshot() : 0;
|
||||
if (parseExpireTime(c, c->argv[3], unit, basetime, &expire_time) != C_OK)
|
||||
return;
|
||||
} else if (cond & HFE_PERSIST) {
|
||||
num_fields_pos += 1;
|
||||
}
|
||||
|
||||
if (strcasecmp(c->argv[num_fields_pos - 1]->ptr, "FIELDS") != 0) {
|
||||
addReplyError(c, "Mandatory argument FIELDS is missing or not at the right position");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Read number of fields */
|
||||
if (getRangeLongFromObjectOrReply(c, c->argv[num_fields_pos], 1, LONG_MAX, &num_fields,
|
||||
"Number of fields must be a positive integer") != C_OK)
|
||||
return;
|
||||
|
||||
/* Check number of fields is consistent with number of arguments */
|
||||
if (num_fields != c->argc - num_fields_pos - 1) {
|
||||
addReplyError(c, "The `numfields` parameter must match the number of arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Non-existing keys and empty hashes are the same thing. Reply null if the
|
||||
* key does not exist.*/
|
||||
if (!o) {
|
||||
addReplyArrayLen(c, num_fields);
|
||||
for (int i = 0; i < num_fields; i++)
|
||||
addReplyNull(c);
|
||||
return;
|
||||
}
|
||||
|
||||
oldlen = hashTypeLength(o, 0);
|
||||
if (cond)
|
||||
hashTypeSetExInit(c->argv[1], o, c, c->db, 0, &setex);
|
||||
|
||||
addReplyArrayLen(c, num_fields);
|
||||
for (int i = num_fields_pos + 1; i < c->argc; i++) {
|
||||
const int flags = HFE_LAZY_NO_NOTIFICATION |
|
||||
HFE_LAZY_NO_SIGNAL |
|
||||
HFE_LAZY_AVOID_HASH_DEL;
|
||||
sds field = c->argv[i]->ptr;
|
||||
int res = addHashFieldToReply(c, o, field, flags);
|
||||
expired += (res == GETF_EXPIRED);
|
||||
|
||||
/* Set expiration only if the field exists and not expired lazily. */
|
||||
if (res == GETF_OK && cond) {
|
||||
if (cond & HFE_PERSIST)
|
||||
expire_time = EB_EXPIRE_TIME_INVALID;
|
||||
|
||||
res = hashTypeSetEx(o, field, expire_time, &setex);
|
||||
deleted += (res == HSETEX_DELETED);
|
||||
updated += (res == HSETEX_OK);
|
||||
}
|
||||
}
|
||||
|
||||
if (cond)
|
||||
hashTypeSetExDone(&setex);
|
||||
|
||||
/* Exit early if no modification has been made. */
|
||||
if (expired == 0 && deleted == 0 && updated == 0)
|
||||
return;
|
||||
|
||||
server.dirty += deleted + updated;
|
||||
signalModifiedKey(c, c->db, c->argv[1]);
|
||||
|
||||
/* Key may become empty due to lazy expiry in addHashFieldToReply()
|
||||
* or the new expiration time is in the past.*/
|
||||
newlen = hashTypeLength(o, 0);
|
||||
if (newlen == 0) {
|
||||
dbDelete(c->db, c->argv[1]);
|
||||
notifyKeyspaceEvent(NOTIFY_GENERIC, "del", c->argv[1], c->db->id);
|
||||
}
|
||||
if (oldlen != newlen)
|
||||
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_HASH,
|
||||
oldlen, newlen);
|
||||
|
||||
/* This command will never be propagated as it is. It will be propagated as
|
||||
* HDELs when fields are lazily expired or deleted, if the new timestamp is
|
||||
* in the past. HDEL's will be emitted as part of addHashFieldToReply()
|
||||
* or hashTypeSetEx() in this case.
|
||||
*
|
||||
* If PERSIST flags is used, it will be propagated as HPERSIST command.
|
||||
* IF EX/EXAT/PX/PXAT flags are used, it will be replicated as HPEXPRITEAT.
|
||||
*/
|
||||
if (expired)
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hexpired", c->argv[1], c->db->id);
|
||||
if (updated) {
|
||||
if (cond & HFE_PERSIST) {
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hpersist", c->argv[1], c->db->id);
|
||||
|
||||
/* Propagate as HPERSIST command.
|
||||
* Orig: HGETEX <key> PERSIST FIELDS <numfields> field1 field2 ...
|
||||
* Repl: HPERSIST <key> FIELDS <numfields> field1 field2 ... */
|
||||
rewriteClientCommandArgument(c, 0, shared.hpersist);
|
||||
rewriteClientCommandArgument(c, 2, NULL); /* Delete PERSIST arg */
|
||||
} else {
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hexpire", c->argv[1], c->db->id);
|
||||
|
||||
/* Propagate as HPEXPIREAT command.
|
||||
* Orig: HGETEX <key> [EX|PX|EXAT|PXAT] ttl FIELDS <numfields> field1 field2 ...
|
||||
* Repl: HPEXPIREAT <key> ttl FIELDS <numfields> field1 field2 ... */
|
||||
rewriteClientCommandArgument(c, 0, shared.hpexpireat);
|
||||
rewriteClientCommandArgument(c, 2, NULL); /* Del [EX|PX|EXAT|PXAT]*/
|
||||
|
||||
/* Rewrite TTL if it is not unix time milliseconds already. */
|
||||
if (!(cond & HFE_PXAT)) {
|
||||
robj *expire = createStringObjectFromLongLong(expire_time);
|
||||
rewriteClientCommandArgument(c, 2, expire);
|
||||
decrRefCount(expire);
|
||||
}
|
||||
}
|
||||
} else if (deleted) {
|
||||
/* If we are here, fields are deleted because new timestamp was in the
|
||||
* past. HDELs are already propagated as part of hashTypeSetEx(). */
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, "hdel", c->argv[1], c->db->id);
|
||||
preventCommandPropagation(c);
|
||||
}
|
||||
}
|
||||
|
||||
void hdelCommand(client *c) {
|
||||
robj *o;
|
||||
int j, deleted = 0, keyremoved = 0;
|
||||
|
@ -3174,10 +3698,11 @@ static void httlGenericCommand(client *c, const char *cmd, long long basetime, i
|
|||
* not met, then command will be rejected. Otherwise, EXPIRE command will be
|
||||
* propagated for given key.
|
||||
*/
|
||||
static void hexpireGenericCommand(client *c, const char *cmd, long long basetime, int unit) {
|
||||
static void hexpireGenericCommand(client *c, long long basetime, int unit) {
|
||||
long numFields = 0, numFieldsAt = 4;
|
||||
long long expire; /* unix time in msec */
|
||||
int fieldAt, fieldsNotSet = 0, expireSetCond = 0;
|
||||
int fieldAt, fieldsNotSet = 0, expireSetCond = 0, updated = 0, deleted = 0;
|
||||
unsigned long oldlen, newlen;
|
||||
robj *hashObj, *keyArg = c->argv[1], *expireArg = c->argv[2];
|
||||
|
||||
/* Read the hash object */
|
||||
|
@ -3186,29 +3711,9 @@ static void hexpireGenericCommand(client *c, const char *cmd, long long basetime
|
|||
return;
|
||||
|
||||
/* Read the expiry time from command */
|
||||
if (getLongLongFromObjectOrReply(c, expireArg, &expire, NULL) != C_OK)
|
||||
if (parseExpireTime(c, expireArg, unit, basetime, &expire) != C_OK)
|
||||
return;
|
||||
|
||||
if (expire < 0) {
|
||||
addReplyError(c,"invalid expire time, must be >= 0");
|
||||
return;
|
||||
}
|
||||
|
||||
if (unit == UNIT_SECONDS) {
|
||||
if (expire > (long long) HFE_MAX_ABS_TIME_MSEC / 1000) {
|
||||
addReplyErrorExpireTime(c);
|
||||
return;
|
||||
}
|
||||
expire *= 1000;
|
||||
}
|
||||
|
||||
/* Ensure that the final absolute Unix timestamp does not exceed EB_EXPIRE_TIME_MAX. */
|
||||
if (expire > (long long) HFE_MAX_ABS_TIME_MSEC - basetime) {
|
||||
addReplyErrorExpireTime(c);
|
||||
return;
|
||||
}
|
||||
expire += basetime;
|
||||
|
||||
/* Read optional expireSetCond [NX|XX|GT|LT] */
|
||||
char *optArg = c->argv[3]->ptr;
|
||||
if (!strcasecmp(optArg, "nx")) {
|
||||
|
@ -3247,14 +3752,18 @@ static void hexpireGenericCommand(client *c, const char *cmd, long long basetime
|
|||
return;
|
||||
}
|
||||
|
||||
oldlen = hashTypeLength(hashObj, 0);
|
||||
|
||||
HashTypeSetEx exCtx;
|
||||
hashTypeSetExInit(keyArg, hashObj, c, c->db, cmd, expireSetCond, &exCtx);
|
||||
hashTypeSetExInit(keyArg, hashObj, c, c->db, expireSetCond, &exCtx);
|
||||
addReplyArrayLen(c, numFields);
|
||||
|
||||
fieldAt = numFieldsAt + 1;
|
||||
while (fieldAt < c->argc) {
|
||||
sds field = c->argv[fieldAt]->ptr;
|
||||
SetExRes res = hashTypeSetEx(hashObj, field, expire, &exCtx);
|
||||
updated += (res == HSETEX_OK);
|
||||
deleted += (res == HSETEX_DELETED);
|
||||
|
||||
if (unlikely(res != HSETEX_OK)) {
|
||||
/* If the field was not set, prevent field propagation */
|
||||
|
@ -3269,17 +3778,34 @@ static void hexpireGenericCommand(client *c, const char *cmd, long long basetime
|
|||
|
||||
hashTypeSetExDone(&exCtx);
|
||||
|
||||
if (deleted + updated > 0) {
|
||||
server.dirty += deleted + updated;
|
||||
signalModifiedKey(c, c->db, keyArg);
|
||||
notifyKeyspaceEvent(NOTIFY_HASH, deleted ? "hdel" : "hexpire",
|
||||
keyArg, c->db->id);
|
||||
}
|
||||
|
||||
newlen = hashTypeLength(hashObj, 0);
|
||||
if (newlen == 0) {
|
||||
dbDelete(c->db, keyArg);
|
||||
notifyKeyspaceEvent(NOTIFY_GENERIC, "del", keyArg, c->db->id);
|
||||
}
|
||||
|
||||
if (oldlen != newlen)
|
||||
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_HASH,
|
||||
oldlen, newlen);
|
||||
|
||||
/* Avoid propagating command if not even one field was updated (Either because
|
||||
* the time is in the past, and corresponding HDELs were sent, or conditions
|
||||
* not met) then it is useless and invalid to propagate command with no fields */
|
||||
if (exCtx.fieldUpdated == 0) {
|
||||
if (updated == 0) {
|
||||
preventCommandPropagation(c);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If some fields were dropped, rewrite the number of fields */
|
||||
if (fieldsNotSet) {
|
||||
robj *numFieldsObj = createStringObjectFromLongLong(exCtx.fieldUpdated);
|
||||
robj *numFieldsObj = createStringObjectFromLongLong(updated);
|
||||
rewriteClientCommandArgument(c, numFieldsAt, numFieldsObj);
|
||||
decrRefCount(numFieldsObj);
|
||||
}
|
||||
|
@ -3297,48 +3823,48 @@ static void hexpireGenericCommand(client *c, const char *cmd, long long basetime
|
|||
}
|
||||
}
|
||||
|
||||
/* HPEXPIRE key milliseconds [ NX | XX | GT | LT] numfields <field [field ...]> */
|
||||
/* HPEXPIRE key milliseconds [ NX | XX | GT | LT] FIELDS numfields <field [field ...]> */
|
||||
void hpexpireCommand(client *c) {
|
||||
hexpireGenericCommand(c,"hpexpire", commandTimeSnapshot(),UNIT_MILLISECONDS);
|
||||
hexpireGenericCommand(c,commandTimeSnapshot(),UNIT_MILLISECONDS);
|
||||
}
|
||||
|
||||
/* HEXPIRE key seconds [NX | XX | GT | LT] numfields <field [field ...]> */
|
||||
/* HEXPIRE key seconds [NX | XX | GT | LT] FIELDS numfields <field [field ...]> */
|
||||
void hexpireCommand(client *c) {
|
||||
hexpireGenericCommand(c,"hexpire", commandTimeSnapshot(),UNIT_SECONDS);
|
||||
hexpireGenericCommand(c,commandTimeSnapshot(),UNIT_SECONDS);
|
||||
}
|
||||
|
||||
/* HEXPIREAT key unix-time-seconds [NX | XX | GT | LT] numfields <field [field ...]> */
|
||||
/* HEXPIREAT key unix-time-seconds [NX | XX | GT | LT] FIELDS numfields <field [field ...]> */
|
||||
void hexpireatCommand(client *c) {
|
||||
hexpireGenericCommand(c,"hexpireat", 0,UNIT_SECONDS);
|
||||
hexpireGenericCommand(c,0,UNIT_SECONDS);
|
||||
}
|
||||
|
||||
/* HPEXPIREAT key unix-time-milliseconds [NX | XX | GT | LT] numfields <field [field ...]> */
|
||||
/* HPEXPIREAT key unix-time-milliseconds [NX | XX | GT | LT] FIELDS numfields <field [field ...]> */
|
||||
void hpexpireatCommand(client *c) {
|
||||
hexpireGenericCommand(c,"hpexpireat", 0,UNIT_MILLISECONDS);
|
||||
hexpireGenericCommand(c,0,UNIT_MILLISECONDS);
|
||||
}
|
||||
|
||||
/* for each specified field: get the remaining time to live in seconds*/
|
||||
/* HTTL key numfields <field [field ...]> */
|
||||
/* HTTL key FIELDS numfields <field [field ...]> */
|
||||
void httlCommand(client *c) {
|
||||
httlGenericCommand(c, "httl", commandTimeSnapshot(), UNIT_SECONDS);
|
||||
}
|
||||
|
||||
/* HPTTL key numfields <field [field ...]> */
|
||||
/* HPTTL key FIELDS numfields <field [field ...]> */
|
||||
void hpttlCommand(client *c) {
|
||||
httlGenericCommand(c, "hpttl", commandTimeSnapshot(), UNIT_MILLISECONDS);
|
||||
}
|
||||
|
||||
/* HEXPIRETIME key numFields <field [field ...]> */
|
||||
/* HEXPIRETIME key FIELDS numfields <field [field ...]> */
|
||||
void hexpiretimeCommand(client *c) {
|
||||
httlGenericCommand(c, "hexpiretime", 0, UNIT_SECONDS);
|
||||
}
|
||||
|
||||
/* HPEXPIRETIME key numFields <field [field ...]> */
|
||||
/* HPEXPIRETIME key FIELDS numfields <field [field ...]> */
|
||||
void hpexpiretimeCommand(client *c) {
|
||||
httlGenericCommand(c, "hexpiretime", 0, UNIT_MILLISECONDS);
|
||||
}
|
||||
|
||||
/* HPERSIST key <FIELDS count field [field ...]> */
|
||||
/* HPERSIST key FIELDS numfields <field [field ...]> */
|
||||
void hpersistCommand(client *c) {
|
||||
robj *hashObj;
|
||||
long numFields = 0, numFieldsAt = 3;
|
||||
|
|
|
@ -504,15 +504,24 @@ void getrangeCommand(client *c) {
|
|||
strlen = sdslen(str);
|
||||
}
|
||||
|
||||
if (start < 0) start += strlen;
|
||||
if (end < 0) end += strlen;
|
||||
if (strlen == 0 || start >= (long long)strlen || end < 0 || start > end) {
|
||||
/* Convert negative indexes */
|
||||
if (start < 0 && end < 0 && start > end) {
|
||||
addReply(c,shared.emptybulk);
|
||||
return;
|
||||
}
|
||||
if (start < 0) start = strlen+start;
|
||||
if (end < 0) end = strlen+end;
|
||||
if (start < 0) start = 0;
|
||||
if (end >= (long long)strlen) end = strlen-1;
|
||||
addReplyBulkCBuffer(c,(char*)str+start,end-start+1);
|
||||
if (end < 0) end = 0;
|
||||
if ((unsigned long long)end >= strlen) end = strlen-1;
|
||||
|
||||
/* Precondition: end >= 0 && end < strlen, so the only condition where
|
||||
* nothing can be returned is: start > end. */
|
||||
if (start > end || strlen == 0) {
|
||||
addReply(c,shared.emptybulk);
|
||||
} else {
|
||||
addReplyBulkCBuffer(c,(char*)str+start,end-start+1);
|
||||
}
|
||||
}
|
||||
|
||||
void mgetCommand(client *c) {
|
||||
|
|
|
@ -633,7 +633,7 @@ static void tlsHandleEvent(tls_connection *conn, int mask) {
|
|||
int ret, conn_error;
|
||||
|
||||
TLSCONN_DEBUG("tlsEventHandler(): fd=%d, state=%d, mask=%d, r=%d, w=%d, flags=%d",
|
||||
fd, conn->c.state, mask, conn->c.read_handler != NULL, conn->c.write_handler != NULL,
|
||||
conn->c.fd, conn->c.state, mask, conn->c.read_handler != NULL, conn->c.write_handler != NULL,
|
||||
conn->flags);
|
||||
|
||||
ERR_clear_error();
|
||||
|
|
|
@ -96,6 +96,31 @@ proc assert_cluster_state {state} {
|
|||
fail "Cluster node $id cluster_state:[CI $id cluster_state]"
|
||||
}
|
||||
}
|
||||
|
||||
wait_for_secrets_match 50 100
|
||||
}
|
||||
|
||||
proc num_unique_secrets {} {
|
||||
set secrets [list]
|
||||
foreach_redis_id id {
|
||||
if {[instance_is_killed redis $id]} continue
|
||||
lappend secrets [R $id debug internal_secret]
|
||||
}
|
||||
set num_secrets [llength [lsort -unique $secrets]]
|
||||
return $num_secrets
|
||||
}
|
||||
|
||||
# Check that cluster nodes agree about "state", or raise an error.
|
||||
proc assert_secrets_match {} {
|
||||
assert_equal {1} [num_unique_secrets]
|
||||
}
|
||||
|
||||
proc wait_for_secrets_match {maxtries delay} {
|
||||
wait_for_condition $maxtries $delay {
|
||||
[num_unique_secrets] eq 1
|
||||
} else {
|
||||
fail "Failed waiting for secrets to sync"
|
||||
}
|
||||
}
|
||||
|
||||
# Search the first node starting from ID $first that is not
|
||||
|
@ -188,9 +213,11 @@ proc cluster_config_consistent {} {
|
|||
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
|
||||
if {$j == 0} {
|
||||
set base_cfg [R $j cluster slots]
|
||||
set base_secret [R $j debug internal_secret]
|
||||
} else {
|
||||
set cfg [R $j cluster slots]
|
||||
if {$cfg != $base_cfg} {
|
||||
set secret [R $j debug internal_secret]
|
||||
if {$cfg != $base_cfg || $secret != $base_secret} {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1329,4 +1329,210 @@ tags {"external:skip"} {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Test Part 3
|
||||
#
|
||||
# Test if INCR AOF offset information is as expected
|
||||
test {Multi Part AOF writes start offset in the manifest} {
|
||||
set aof_dirpath "$server_path/$aof_dirname"
|
||||
set aof_manifest_file "$server_path/$aof_dirname/${aof_basename}$::manifest_suffix"
|
||||
|
||||
start_server_aof [list dir $server_path] {
|
||||
set client [redis [srv host] [srv port] 0 $::tls]
|
||||
wait_done_loading $client
|
||||
|
||||
# The manifest file has startoffset now
|
||||
assert_aof_manifest_content $aof_manifest_file {
|
||||
{file appendonly.aof.1.base.rdb seq 1 type b}
|
||||
{file appendonly.aof.1.incr.aof seq 1 type i startoffset 0}
|
||||
}
|
||||
}
|
||||
|
||||
clean_aof_persistence $aof_dirpath
|
||||
}
|
||||
|
||||
test {Multi Part AOF won't add the offset of incr AOF from old version} {
|
||||
create_aof $aof_dirpath $aof_base1_file {
|
||||
append_to_aof [formatCommand set k1 v1]
|
||||
}
|
||||
|
||||
create_aof $aof_dirpath $aof_incr1_file {
|
||||
append_to_aof [formatCommand set k2 v2]
|
||||
}
|
||||
|
||||
create_aof_manifest $aof_dirpath $aof_manifest_file {
|
||||
append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
|
||||
append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
|
||||
}
|
||||
|
||||
start_server_aof [list dir $server_path] {
|
||||
assert_equal 1 [is_alive [srv pid]]
|
||||
set client [redis [srv host] [srv port] 0 $::tls]
|
||||
wait_done_loading $client
|
||||
|
||||
assert_equal v1 [$client get k1]
|
||||
assert_equal v2 [$client get k2]
|
||||
|
||||
$client set k3 v3
|
||||
catch {$client shutdown}
|
||||
|
||||
# Should not add offset to the manifest since we also don't know the right
|
||||
# starting replication of them.
|
||||
set fp [open $aof_manifest_file r]
|
||||
set content [read $fp]
|
||||
close $fp
|
||||
assert ![regexp {startoffset} $content]
|
||||
|
||||
# The manifest file still have information from the old version
|
||||
assert_aof_manifest_content $aof_manifest_file {
|
||||
{file appendonly.aof.1.base.aof seq 1 type b}
|
||||
{file appendonly.aof.1.incr.aof seq 1 type i}
|
||||
}
|
||||
}
|
||||
|
||||
clean_aof_persistence $aof_dirpath
|
||||
}
|
||||
|
||||
test {Multi Part AOF can update master_repl_offset with only startoffset info} {
|
||||
create_aof $aof_dirpath $aof_base1_file {
|
||||
append_to_aof [formatCommand set k1 v1]
|
||||
}
|
||||
|
||||
create_aof $aof_dirpath $aof_incr1_file {
|
||||
append_to_aof [formatCommand set k2 v2]
|
||||
}
|
||||
|
||||
create_aof_manifest $aof_dirpath $aof_manifest_file {
|
||||
append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
|
||||
append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i startoffset 100\n"
|
||||
}
|
||||
|
||||
start_server [list overrides [list dir $server_path appendonly yes ]] {
|
||||
wait_done_loading r
|
||||
r select 0
|
||||
assert_equal v1 [r get k1]
|
||||
assert_equal v2 [r get k2]
|
||||
|
||||
# After loading AOF, redis will update the replication offset based on
|
||||
# the information of the last INCR AOF, to avoid the rollback of the
|
||||
# start offset of new INCR AOF. If the INCR file doesn't have an end offset
|
||||
# info, redis will calculate the replication offset by the start offset
|
||||
# plus the file size.
|
||||
set file_size [file size $aof_incr1_file]
|
||||
set offset [expr $file_size + 100]
|
||||
assert_equal $offset [s master_repl_offset]
|
||||
}
|
||||
|
||||
clean_aof_persistence $aof_dirpath
|
||||
}
|
||||
|
||||
test {Multi Part AOF can update master_repl_offset with endoffset info} {
|
||||
create_aof $aof_dirpath $aof_base1_file {
|
||||
append_to_aof [formatCommand set k1 v1]
|
||||
}
|
||||
|
||||
create_aof $aof_dirpath $aof_incr1_file {
|
||||
append_to_aof [formatCommand set k2 v2]
|
||||
}
|
||||
|
||||
create_aof_manifest $aof_dirpath $aof_manifest_file {
|
||||
append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
|
||||
append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i startoffset 100 endoffset 200\n"
|
||||
}
|
||||
|
||||
start_server [list overrides [list dir $server_path appendonly yes ]] {
|
||||
wait_done_loading r
|
||||
r select 0
|
||||
assert_equal v1 [r get k1]
|
||||
assert_equal v2 [r get k2]
|
||||
|
||||
# If the INCR file has an end offset, redis directly uses it as replication offset
|
||||
assert_equal 200 [s master_repl_offset]
|
||||
|
||||
# We should reset endoffset in manifest file
|
||||
set fp [open $aof_manifest_file r]
|
||||
set content [read $fp]
|
||||
close $fp
|
||||
assert ![regexp {endoffset} $content]
|
||||
}
|
||||
|
||||
clean_aof_persistence $aof_dirpath
|
||||
}
|
||||
|
||||
test {Multi Part AOF will add the end offset if we close gracefully the AOF} {
|
||||
start_server_aof [list dir $server_path] {
|
||||
set client [redis [srv host] [srv port] 0 $::tls]
|
||||
wait_done_loading $client
|
||||
|
||||
assert_aof_manifest_content $aof_manifest_file {
|
||||
{file appendonly.aof.1.base.rdb seq 1 type b}
|
||||
{file appendonly.aof.1.incr.aof seq 1 type i startoffset 0}
|
||||
}
|
||||
|
||||
$client set k1 v1
|
||||
$client set k2 v2
|
||||
# Close AOF gracefully when stopping appendonly, we should add endoffset
|
||||
# in the manifest file, 'endoffset' should be 2 since writing 2 commands
|
||||
r config set appendonly no
|
||||
assert_aof_manifest_content $aof_manifest_file {
|
||||
{file appendonly.aof.1.base.rdb seq 1 type b}
|
||||
{file appendonly.aof.1.incr.aof seq 1 type i startoffset 0 endoffset 2}
|
||||
}
|
||||
r config set appendonly yes
|
||||
waitForBgrewriteaof $client
|
||||
|
||||
$client set k3 v3
|
||||
# Close AOF gracefully when shutting down server, we should add endoffset
|
||||
# in the manifest file, 'endoffset' should be 3 since writing 3 commands
|
||||
catch {$client shutdown}
|
||||
assert_aof_manifest_content $aof_manifest_file {
|
||||
{file appendonly.aof.2.base.rdb seq 2 type b}
|
||||
{file appendonly.aof.2.incr.aof seq 2 type i startoffset 2 endoffset 3}
|
||||
}
|
||||
}
|
||||
|
||||
clean_aof_persistence $aof_dirpath
|
||||
}
|
||||
|
||||
test {INCR AOF has accurate start offset when AOFRW} {
|
||||
start_server [list overrides [list dir $server_path appendonly yes ]] {
|
||||
r config set auto-aof-rewrite-percentage 0
|
||||
|
||||
# Start write load to let the master_repl_offset continue increasing
|
||||
# since appendonly is enabled
|
||||
set load_handle0 [start_write_load [srv 0 host] [srv 0 port] 10]
|
||||
wait_for_condition 50 100 {
|
||||
[r dbsize] > 0
|
||||
} else {
|
||||
fail "No write load detected."
|
||||
}
|
||||
|
||||
# We obtain the master_repl_offset at the time of bgrewriteaof by pausing
|
||||
# the redis process, sending pipeline commands, and then resuming the process
|
||||
set rd [redis_deferring_client]
|
||||
pause_process [srv 0 pid]
|
||||
set buf "info replication\r\n"
|
||||
append buf "bgrewriteaof\r\n"
|
||||
$rd write $buf
|
||||
$rd flush
|
||||
resume_process [srv 0 pid]
|
||||
# Read the replication offset and the start of the bgrewriteaof
|
||||
regexp {master_repl_offset:(\d+)} [$rd read] -> offset1
|
||||
assert_match {*rewriting started*} [$rd read]
|
||||
$rd close
|
||||
|
||||
# Get the start offset from the manifest file after bgrewriteaof
|
||||
waitForBgrewriteaof r
|
||||
set fp [open $aof_manifest_file r]
|
||||
set content [read $fp]
|
||||
close $fp
|
||||
set offset2 [lindex [regexp -inline {startoffset (\d+)} $content] 1]
|
||||
|
||||
# The start offset of INCR AOF should be the same as master_repl_offset
|
||||
# when we trigger bgrewriteaof
|
||||
assert {$offset1 == $offset2}
|
||||
stop_write_load $load_handle0
|
||||
wait_load_handlers_disconnected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,6 +79,7 @@ start_server {tags {"repl external:skip"}} {
|
|||
$replica1 config set repl-rdb-channel yes
|
||||
$replica2 config set repl-rdb-channel no
|
||||
|
||||
set loglines [count_log_lines 0]
|
||||
set prev_forks [s 0 total_forks]
|
||||
$master set x 2
|
||||
|
||||
|
@ -87,9 +88,10 @@ start_server {tags {"repl external:skip"}} {
|
|||
$replica1 replicaof $master_host $master_port
|
||||
$replica2 replicaof $master_host $master_port
|
||||
|
||||
set res [wait_for_log_messages 0 {"*Starting BGSAVE* replicas sockets (rdb-channel)*"} 0 2000 10]
|
||||
set loglines [lindex $res 1]
|
||||
wait_for_log_messages 0 {"*Starting BGSAVE* replicas sockets*"} $loglines 2000 10
|
||||
# There will be two forks subsequently, one for rdbchannel
|
||||
# replica, another for the replica without rdbchannel config.
|
||||
wait_for_log_messages 0 {"*Starting BGSAVE* replicas sockets (rdb-channel)*"} $loglines 300 100
|
||||
wait_for_log_messages 0 {"*Starting BGSAVE* replicas sockets"} $loglines 300 100
|
||||
|
||||
wait_replica_online $master 0 100 100
|
||||
wait_replica_online $master 1 100 100
|
||||
|
@ -396,10 +398,10 @@ start_server {tags {"repl external:skip"}} {
|
|||
populate 20000 master 100 -1
|
||||
|
||||
$replica replicaof $master_host $master_port
|
||||
wait_for_condition 50 200 {
|
||||
wait_for_condition 100 200 {
|
||||
[s 0 loading] == 1
|
||||
} else {
|
||||
fail "[s 0 loading] sdsdad"
|
||||
fail "Replica did not start loading"
|
||||
}
|
||||
|
||||
# Generate some traffic for backlog ~2mb
|
||||
|
@ -465,12 +467,11 @@ start_server {tags {"repl external:skip"}} {
|
|||
fail "Sync did not start"
|
||||
}
|
||||
|
||||
# Wait for both replicas main conns to establish psync
|
||||
# Verify replicas are connected
|
||||
wait_for_condition 500 100 {
|
||||
[s -2 connected_slaves] == 2
|
||||
} else {
|
||||
fail "Replicas didn't establish psync:
|
||||
sync_partial_ok: [s -2 sync_partial_ok]"
|
||||
fail "Replicas didn't connect: [s -2 connected_slaves]"
|
||||
}
|
||||
|
||||
# kill one of the replicas
|
||||
|
@ -488,6 +489,14 @@ start_server {tags {"repl external:skip"}} {
|
|||
sync_full:[s -2 sync_full]
|
||||
connected_slaves: [s -2 connected_slaves]"
|
||||
}
|
||||
|
||||
# Wait until replica catches up
|
||||
wait_replica_online $master 0 200 100
|
||||
wait_for_condition 200 100 {
|
||||
[s 0 mem_replica_full_sync_buffer] == 0
|
||||
} else {
|
||||
fail "Replica did not consume buffer in time"
|
||||
}
|
||||
}
|
||||
|
||||
test "Test master aborts rdb delivery if all replicas are dropped" {
|
||||
|
@ -773,7 +782,6 @@ start_server {tags {"repl external:skip"}} {
|
|||
|
||||
# Speed up loading
|
||||
$replica config set key-load-delay 0
|
||||
stop_write_load $load_handle
|
||||
|
||||
# Wait until replica recovers and becomes online
|
||||
wait_replica_online $master 0 100 100
|
||||
|
|
|
@ -63,7 +63,8 @@ TEST_MODULES = \
|
|||
postnotifications.so \
|
||||
moduleauthtwo.so \
|
||||
rdbloadsave.so \
|
||||
crash.so
|
||||
crash.so \
|
||||
internalsecret.so
|
||||
|
||||
.PHONY: all
|
||||
|
||||
|
|
|
@ -794,6 +794,21 @@ int TestAssertIntegerReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, lon
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* Replies "yes", "no" otherwise if the context may execute debug commands */
|
||||
int TestCanDebug(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
REDISMODULE_NOT_USED(argc);
|
||||
int flags = RedisModule_GetContextFlags(ctx);
|
||||
int allFlags = RedisModule_GetContextFlagsAll();
|
||||
if ((allFlags & REDISMODULE_CTX_FLAGS_DEBUG_ENABLED) &&
|
||||
(flags & REDISMODULE_CTX_FLAGS_DEBUG_ENABLED)) {
|
||||
RedisModule_ReplyWithSimpleString(ctx, "yes");
|
||||
} else {
|
||||
RedisModule_ReplyWithSimpleString(ctx, "no");
|
||||
}
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
#define T(name,...) \
|
||||
do { \
|
||||
RedisModule_Log(ctx,"warning","Testing %s", name); \
|
||||
|
@ -802,7 +817,7 @@ int TestAssertIntegerReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, lon
|
|||
|
||||
/* TEST.BASICS -- Run all the tests.
|
||||
* Note: it is useful to run these tests from the module rather than TCL
|
||||
* since it's easier to check the reply types like that (make a distinction
|
||||
* since it's easier to check the reply types like that make a distinction
|
||||
* between 0 and "0", etc. */
|
||||
int TestBasics(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
|
@ -1017,6 +1032,10 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
|
|||
TestGetResp,"readonly",1,1,1) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"test.candebug",
|
||||
TestCanDebug,"readonly",1,1,1) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
RedisModule_SubscribeToKeyspaceEvents(ctx,
|
||||
REDISMODULE_NOTIFY_HASH |
|
||||
REDISMODULE_NOTIFY_SET |
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#define UNUSED(V) ((void) V)
|
||||
|
||||
static RedisModuleType *FragType;
|
||||
|
||||
struct FragObject {
|
||||
|
@ -21,34 +23,176 @@ unsigned long int datatype_defragged = 0;
|
|||
unsigned long int datatype_raw_defragged = 0;
|
||||
unsigned long int datatype_resumes = 0;
|
||||
unsigned long int datatype_wrong_cursor = 0;
|
||||
unsigned long int global_attempts = 0;
|
||||
unsigned long int defrag_started = 0;
|
||||
unsigned long int defrag_ended = 0;
|
||||
unsigned long int global_defragged = 0;
|
||||
unsigned long int global_strings_attempts = 0;
|
||||
unsigned long int global_strings_defragged = 0;
|
||||
unsigned long int global_strings_pauses = 0;
|
||||
unsigned long int global_dicts_resumes = 0; /* Number of dict defragmentation resumed from a previous break */
|
||||
unsigned long int global_dicts_attempts = 0; /* Number of attempts to defragment dictionary */
|
||||
unsigned long int global_dicts_defragged = 0; /* Number of dictionaries successfully defragmented */
|
||||
|
||||
int global_strings_len = 0;
|
||||
unsigned long global_strings_len = 0;
|
||||
RedisModuleString **global_strings = NULL;
|
||||
|
||||
static void createGlobalStrings(RedisModuleCtx *ctx, int count)
|
||||
unsigned long global_dicts_len = 0;
|
||||
RedisModuleDict **global_dicts = NULL;
|
||||
|
||||
static void createGlobalStrings(RedisModuleCtx *ctx, unsigned long count)
|
||||
{
|
||||
global_strings_len = count;
|
||||
global_strings = RedisModule_Alloc(sizeof(RedisModuleString *) * count);
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
for (unsigned long i = 0; i < count; i++) {
|
||||
global_strings[i] = RedisModule_CreateStringFromLongLong(ctx, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void defragGlobalStrings(RedisModuleDefragCtx *ctx)
|
||||
static int defragGlobalStrings(RedisModuleDefragCtx *ctx)
|
||||
{
|
||||
for (int i = 0; i < global_strings_len; i++) {
|
||||
RedisModuleString *new = RedisModule_DefragRedisModuleString(ctx, global_strings[i]);
|
||||
global_attempts++;
|
||||
unsigned long cursor = 0;
|
||||
RedisModule_DefragCursorGet(ctx, &cursor);
|
||||
|
||||
RedisModule_Assert(cursor < global_strings_len);
|
||||
for (; cursor < global_strings_len; cursor++) {
|
||||
RedisModuleString *str = global_strings[cursor];
|
||||
if (!str) continue;
|
||||
RedisModuleString *new = RedisModule_DefragRedisModuleString(ctx, str);
|
||||
global_strings_attempts++;
|
||||
if (new != NULL) {
|
||||
global_strings[i] = new;
|
||||
global_defragged++;
|
||||
global_strings[cursor] = new;
|
||||
global_strings_defragged++;
|
||||
}
|
||||
|
||||
if (RedisModule_DefragShouldStop(ctx)) {
|
||||
global_strings_pauses++;
|
||||
RedisModule_DefragCursorSet(ctx, cursor);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void createFragGlobalStrings(RedisModuleCtx *ctx) {
|
||||
for (unsigned long i = 0; i < global_strings_len; i++) {
|
||||
if (i % 2 == 1) {
|
||||
RedisModule_FreeString(ctx, global_strings[i]);
|
||||
global_strings[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void createGlobalDicts(RedisModuleCtx *ctx, unsigned long count) {
|
||||
global_dicts_len = count;
|
||||
global_dicts = RedisModule_Alloc(sizeof(RedisModuleDict *) * count);
|
||||
|
||||
for (unsigned long i = 0; i < count; i++) {
|
||||
RedisModuleDict *dict = RedisModule_CreateDict(ctx);
|
||||
for (unsigned long j = 0; j < 10; j ++) {
|
||||
RedisModuleString *str = RedisModule_CreateStringFromULongLong(ctx, j);
|
||||
RedisModule_DictSet(dict, str, str);
|
||||
}
|
||||
global_dicts[i] = dict;
|
||||
}
|
||||
}
|
||||
|
||||
static void createFragGlobalDicts(RedisModuleCtx *ctx) {
|
||||
char *key;
|
||||
size_t keylen;
|
||||
RedisModuleString *val;
|
||||
|
||||
for (unsigned long i = 0; i < global_dicts_len; i++) {
|
||||
RedisModuleDict *dict = global_dicts[i];
|
||||
if (!dict) continue;
|
||||
|
||||
/* Handle dictionaries differently based on their index in global_dicts array:
|
||||
* 1. For odd indices (i % 2 == 1): Remove the entire dictionary.
|
||||
* 2. For even indices: Keep the dictionary but remove half of its items. */
|
||||
if (i % 2 == 1) {
|
||||
RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(dict, "^", NULL, 0);
|
||||
while ((key = RedisModule_DictNextC(iter, &keylen, (void**)&val))) {
|
||||
RedisModule_FreeString(ctx, val);
|
||||
}
|
||||
RedisModule_FreeDict(ctx, dict);
|
||||
global_dicts[i] = NULL;
|
||||
RedisModule_DictIteratorStop(iter);
|
||||
} else {
|
||||
int key_index = 0;
|
||||
RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(dict, "^", NULL, 0);
|
||||
while ((key = RedisModule_DictNextC(iter, &keylen, (void**)&val))) {
|
||||
if (key_index++ % 2 == 1) {
|
||||
RedisModule_FreeString(ctx, val);
|
||||
RedisModule_DictReplaceC(dict, key, keylen, NULL);
|
||||
}
|
||||
}
|
||||
RedisModule_DictIteratorStop(iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void *defragGlobalDictValueCB(RedisModuleDefragCtx *ctx, void *data, unsigned char *key, size_t keylen) {
|
||||
REDISMODULE_NOT_USED(key);
|
||||
REDISMODULE_NOT_USED(keylen);
|
||||
if (!data) return NULL;
|
||||
return RedisModule_DefragRedisModuleString(ctx, data);
|
||||
}
|
||||
|
||||
static int defragGlobalDicts(RedisModuleDefragCtx *ctx) {
|
||||
static RedisModuleString *seekTo = NULL;
|
||||
static unsigned long dict_index = 0;
|
||||
unsigned long cursor = 0;
|
||||
|
||||
RedisModule_DefragCursorGet(ctx, &cursor);
|
||||
if (cursor == 0) { /* Start a new defrag. */
|
||||
if (seekTo) {
|
||||
RedisModule_FreeString(NULL, seekTo);
|
||||
seekTo = NULL;
|
||||
}
|
||||
dict_index = 0;
|
||||
} else {
|
||||
global_dicts_resumes++;
|
||||
}
|
||||
|
||||
RedisModule_Assert(dict_index < global_dicts_len);
|
||||
for (; dict_index < global_strings_len; dict_index++) {
|
||||
RedisModuleDict *dict = global_dicts[dict_index];
|
||||
if (!dict) continue;
|
||||
RedisModuleDict *new = RedisModule_DefragRedisModuleDict(ctx, dict, defragGlobalDictValueCB, &seekTo);
|
||||
global_dicts_attempts++;
|
||||
if (new != NULL) {
|
||||
global_dicts[dict_index] = new;
|
||||
global_dicts_defragged++;
|
||||
}
|
||||
|
||||
if (seekTo != NULL) {
|
||||
/* Set cursor to 1 to indicate defragmentation is not finished. */
|
||||
RedisModule_DefragCursorSet(ctx, 1);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set cursor to 0 to indicate completion. */
|
||||
dict_index = 0;
|
||||
RedisModule_DefragCursorSet(ctx, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef enum { DEFRAG_NOT_START, DEFRAG_STRING, DEFRAG_DICT } defrag_module_stage;
|
||||
static int defragGlobal(RedisModuleDefragCtx *ctx) {
|
||||
static defrag_module_stage stage = DEFRAG_NOT_START;
|
||||
if (stage == DEFRAG_NOT_START) {
|
||||
stage = DEFRAG_STRING; /* Start a new global defrag. */
|
||||
}
|
||||
|
||||
if (stage == DEFRAG_STRING) {
|
||||
if (defragGlobalStrings(ctx) != 0) return 1;
|
||||
stage = DEFRAG_DICT;
|
||||
}
|
||||
if (stage == DEFRAG_DICT) {
|
||||
if (defragGlobalDicts(ctx) != 0) return 1;
|
||||
stage = DEFRAG_NOT_START;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void defragStart(RedisModuleDefragCtx *ctx) {
|
||||
|
@ -70,8 +214,12 @@ static void FragInfo(RedisModuleInfoCtx *ctx, int for_crash_report) {
|
|||
RedisModule_InfoAddFieldLongLong(ctx, "datatype_raw_defragged", datatype_raw_defragged);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "datatype_resumes", datatype_resumes);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "datatype_wrong_cursor", datatype_wrong_cursor);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_attempts", global_attempts);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_defragged", global_defragged);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_strings_attempts", global_strings_attempts);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_strings_defragged", global_strings_defragged);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_strings_pauses", global_strings_pauses);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_dicts_resumes", global_dicts_resumes);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_dicts_attempts", global_dicts_attempts);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "global_dicts_defragged", global_dicts_defragged);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "defrag_started", defrag_started);
|
||||
RedisModule_InfoAddFieldLongLong(ctx, "defrag_ended", defrag_ended);
|
||||
}
|
||||
|
@ -99,8 +247,12 @@ static int fragResetStatsCommand(RedisModuleCtx *ctx, RedisModuleString **argv,
|
|||
datatype_raw_defragged = 0;
|
||||
datatype_resumes = 0;
|
||||
datatype_wrong_cursor = 0;
|
||||
global_attempts = 0;
|
||||
global_defragged = 0;
|
||||
global_strings_attempts = 0;
|
||||
global_strings_defragged = 0;
|
||||
global_strings_pauses = 0;
|
||||
global_dicts_resumes = 0;
|
||||
global_dicts_attempts = 0;
|
||||
global_dicts_defragged = 0;
|
||||
defrag_started = 0;
|
||||
defrag_ended = 0;
|
||||
|
||||
|
@ -144,6 +296,18 @@ static int fragCreateCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int
|
|||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* FRAG.create_frag_global */
|
||||
static int fragCreateGlobalCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
UNUSED(argv);
|
||||
if (argc != 1)
|
||||
return RedisModule_WrongArity(ctx);
|
||||
|
||||
createFragGlobalStrings(ctx);
|
||||
createFragGlobalDicts(ctx);
|
||||
RedisModule_ReplyWithSimpleString(ctx, "OK");
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
void FragFree(void *value) {
|
||||
struct FragObject *o = value;
|
||||
|
||||
|
@ -238,6 +402,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
|
|||
}
|
||||
|
||||
createGlobalStrings(ctx, glen);
|
||||
createGlobalDicts(ctx, glen);
|
||||
|
||||
RedisModuleTypeMethods tm = {
|
||||
.version = REDISMODULE_TYPE_METHOD_VERSION,
|
||||
|
@ -253,12 +418,16 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
|
|||
fragCreateCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx, "frag.create_frag_global",
|
||||
fragCreateGlobalCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx, "frag.resetstats",
|
||||
fragResetStatsCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
RedisModule_RegisterInfoFunc(ctx, FragInfo);
|
||||
RedisModule_RegisterDefragFunc(ctx, defragGlobalStrings);
|
||||
RedisModule_RegisterDefragFunc2(ctx, defragGlobal);
|
||||
RedisModule_RegisterDefragCallbacks(ctx, defragStart, defragEnd);
|
||||
|
||||
return REDISMODULE_OK;
|
||||
|
|
|
@ -0,0 +1,154 @@
|
|||
#include "redismodule.h"
|
||||
#include <errno.h>
|
||||
|
||||
int InternalAuth_GetInternalSecret(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
REDISMODULE_NOT_USED(argc);
|
||||
|
||||
/* NOTE: The internal secret SHOULD NOT be exposed by any module. This is
|
||||
done for testing purposes only. */
|
||||
size_t len;
|
||||
const char *secret = RedisModule_GetInternalSecret(ctx, &len);
|
||||
if(secret) {
|
||||
RedisModule_ReplyWithStringBuffer(ctx, secret, len);
|
||||
} else {
|
||||
RedisModule_ReplyWithError(ctx, "ERR no internal secret available");
|
||||
}
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
int InternalAuth_InternalCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
REDISMODULE_NOT_USED(argc);
|
||||
|
||||
RedisModule_ReplyWithSimpleString(ctx, "OK");
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
typedef enum {
|
||||
RM_CALL_REGULAR = 0,
|
||||
RM_CALL_WITHUSER = 1,
|
||||
RM_CALL_WITHDETACHEDCLIENT = 2,
|
||||
RM_CALL_REPLICATED = 3
|
||||
} RMCallMode;
|
||||
|
||||
int call_rm_call(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, RMCallMode mode) {
|
||||
if(argc < 2){
|
||||
return RedisModule_WrongArity(ctx);
|
||||
}
|
||||
RedisModuleCallReply *rep = NULL;
|
||||
RedisModuleCtx *detached_ctx = NULL;
|
||||
const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
|
||||
|
||||
switch (mode) {
|
||||
case RM_CALL_REGULAR:
|
||||
// Regular call, with the unrestricted user.
|
||||
rep = RedisModule_Call(ctx, cmd, "vE", argv + 2, argc - 2);
|
||||
break;
|
||||
case RM_CALL_WITHUSER:
|
||||
// Simply call the command with the current client.
|
||||
rep = RedisModule_Call(ctx, cmd, "vCE", argv + 2, argc - 2);
|
||||
break;
|
||||
case RM_CALL_WITHDETACHEDCLIENT:
|
||||
// Use a context created with the thread-safe-context API
|
||||
detached_ctx = RedisModule_GetThreadSafeContext(NULL);
|
||||
if(!detached_ctx){
|
||||
RedisModule_ReplyWithError(ctx, "ERR failed to create detached context");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
// Dispatch the command with the detached context
|
||||
rep = RedisModule_Call(detached_ctx, cmd, "vCE", argv + 2, argc - 2);
|
||||
break;
|
||||
case RM_CALL_REPLICATED:
|
||||
rep = RedisModule_Call(ctx, cmd, "vE", argv + 2, argc - 2);
|
||||
}
|
||||
|
||||
if(!rep) {
|
||||
char err[100];
|
||||
switch (errno) {
|
||||
case EACCES:
|
||||
RedisModule_ReplyWithError(ctx, "ERR NOPERM");
|
||||
break;
|
||||
case ENOENT:
|
||||
RedisModule_ReplyWithError(ctx, "ERR unknown command");
|
||||
break;
|
||||
default:
|
||||
snprintf(err, sizeof(err) - 1, "ERR errno=%d", errno);
|
||||
RedisModule_ReplyWithError(ctx, err);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
RedisModule_ReplyWithCallReply(ctx, rep);
|
||||
RedisModule_FreeCallReply(rep);
|
||||
if (mode == RM_CALL_REPLICATED)
|
||||
RedisModule_ReplicateVerbatim(ctx);
|
||||
}
|
||||
|
||||
if (mode == RM_CALL_WITHDETACHEDCLIENT) {
|
||||
RedisModule_FreeThreadSafeContext(detached_ctx);
|
||||
}
|
||||
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
int internal_rmcall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
return call_rm_call(ctx, argv, argc, RM_CALL_REGULAR);
|
||||
}
|
||||
|
||||
int noninternal_rmcall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
return call_rm_call(ctx, argv, argc, RM_CALL_REGULAR);
|
||||
}
|
||||
|
||||
int noninternal_rmcall_withuser(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
return call_rm_call(ctx, argv, argc, RM_CALL_WITHUSER);
|
||||
}
|
||||
|
||||
int noninternal_rmcall_detachedcontext_withuser(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
return call_rm_call(ctx, argv, argc, RM_CALL_WITHDETACHEDCLIENT);
|
||||
}
|
||||
|
||||
int internal_rmcall_replicated(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
return call_rm_call(ctx, argv, argc, RM_CALL_REPLICATED);
|
||||
}
|
||||
|
||||
/* This function must be present on each Redis module. It is used in order to
|
||||
* register the commands into the Redis server. */
|
||||
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
REDISMODULE_NOT_USED(argc);
|
||||
|
||||
if (RedisModule_Init(ctx,"testinternalsecret",1,REDISMODULE_APIVER_1)
|
||||
== REDISMODULE_ERR) return REDISMODULE_ERR;
|
||||
|
||||
/* WARNING: A module should NEVER expose the internal secret - this is for
|
||||
* testing purposes only. */
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.getinternalsecret",
|
||||
InternalAuth_GetInternalSecret,"",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.internalcommand",
|
||||
InternalAuth_InternalCommand,"internal",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.internal_rmcall",
|
||||
internal_rmcall,"write internal",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.noninternal_rmcall",
|
||||
noninternal_rmcall,"write",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.noninternal_rmcall_withuser",
|
||||
noninternal_rmcall_withuser,"write",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.noninternal_rmcall_detachedcontext_withuser",
|
||||
noninternal_rmcall_detachedcontext_withuser,"write",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_CreateCommand(ctx,"internalauth.internal_rmcall_replicated",
|
||||
internal_rmcall_replicated,"write internal",0,0,0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
return REDISMODULE_OK;
|
||||
}
|
|
@ -165,83 +165,7 @@ int registerBlockCheck(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
|
|||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
REDISMODULE_NOT_USED(argc);
|
||||
|
||||
if (RedisModule_Init(ctx, "moduleconfigs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR;
|
||||
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "mutable_bool", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &mutable_bool_val) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
/* Immutable config here. */
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "immutable_bool", 0, REDISMODULE_CONFIG_IMMUTABLE, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &immutable_bool_val) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterStringConfig(ctx, "string", "secret password", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
/* On the stack to make sure we're copying them. */
|
||||
const char *enum_vals[] = {"none", "five", "one", "two", "four"};
|
||||
const int int_vals[] = {0, 5, 1, 2, 4};
|
||||
|
||||
if (RedisModule_RegisterEnumConfig(ctx, "enum", 1, REDISMODULE_CONFIG_DEFAULT, enum_vals, int_vals, 5, getEnumConfigCommand, setEnumConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterEnumConfig(ctx, "flags", 3, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_BITFLAGS, enum_vals, int_vals, 5, getFlagsConfigCommand, setFlagsConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
/* Memory config here. */
|
||||
if (RedisModule_RegisterNumericConfig(ctx, "memory_numeric", 1024, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_MEMORY, 0, 3000000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &memval) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterNumericConfig(ctx, "numeric", -1, REDISMODULE_CONFIG_DEFAULT, -5, 2000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &longval) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
/*** unprefixed and aliased configuration ***/
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "unprefix-bool|unprefix-bool-alias", 1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
getBoolConfigCommand, setBoolConfigCommand, NULL, &no_prefix_bool) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "unprefix-noalias-bool", 1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
getBoolConfigCommand, setBoolConfigCommand, NULL, &no_prefix_bool2) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterNumericConfig(ctx, "unprefix.numeric|unprefix.numeric-alias", -1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
-5, 2000, getNumericConfigCommand, setNumericConfigCommand, NULL, &no_prefix_longval) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterStringConfig(ctx, "unprefix-string|unprefix.string-alias", "secret unprefix", REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
getStringConfigUnprefix, setStringConfigUnprefix, NULL, NULL) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterEnumConfig(ctx, "unprefix-enum|unprefix-enum-alias", 1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
enum_vals, int_vals, 5, getEnumConfigUnprefix, setEnumConfigUnprefix, NULL, NULL) == REDISMODULE_ERR) {
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
|
||||
size_t len;
|
||||
if (argc && !strcasecmp(RedisModule_StringPtrLen(argv[0], &len), "noload")) {
|
||||
return REDISMODULE_OK;
|
||||
} else if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
|
||||
if (strval) {
|
||||
RedisModule_FreeString(ctx, strval);
|
||||
strval = NULL;
|
||||
}
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
/* Creates a command which registers configs outside OnLoad() function. */
|
||||
if (RedisModule_CreateCommand(ctx,"block.register.configs.outside.onload", registerBlockCheck, "write", 0, 0, 0) == REDISMODULE_ERR)
|
||||
return REDISMODULE_ERR;
|
||||
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
int RedisModule_OnUnload(RedisModuleCtx *ctx) {
|
||||
REDISMODULE_NOT_USED(ctx);
|
||||
void cleanup(RedisModuleCtx *ctx) {
|
||||
if (strval) {
|
||||
RedisModule_FreeString(ctx, strval);
|
||||
strval = NULL;
|
||||
|
@ -250,5 +174,114 @@ int RedisModule_OnUnload(RedisModuleCtx *ctx) {
|
|||
RedisModule_FreeString(ctx, strval2);
|
||||
strval2 = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
|
||||
REDISMODULE_NOT_USED(argv);
|
||||
REDISMODULE_NOT_USED(argc);
|
||||
|
||||
if (RedisModule_Init(ctx, "moduleconfigs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to init module");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "mutable_bool", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &mutable_bool_val) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register mutable_bool");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
/* Immutable config here. */
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "immutable_bool", 0, REDISMODULE_CONFIG_IMMUTABLE, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &immutable_bool_val) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register immutable_bool");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterStringConfig(ctx, "string", "secret password", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register string");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
/* On the stack to make sure we're copying them. */
|
||||
const char *enum_vals[] = {"none", "five", "one", "two", "four"};
|
||||
const int int_vals[] = {0, 5, 1, 2, 4};
|
||||
|
||||
if (RedisModule_RegisterEnumConfig(ctx, "enum", 1, REDISMODULE_CONFIG_DEFAULT, enum_vals, int_vals, 5, getEnumConfigCommand, setEnumConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register enum");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterEnumConfig(ctx, "flags", 3, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_BITFLAGS, enum_vals, int_vals, 5, getFlagsConfigCommand, setFlagsConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register flags");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
/* Memory config here. */
|
||||
if (RedisModule_RegisterNumericConfig(ctx, "memory_numeric", 1024, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_MEMORY, 0, 3000000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &memval) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register memory_numeric");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterNumericConfig(ctx, "numeric", -1, REDISMODULE_CONFIG_DEFAULT, -5, 2000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &longval) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register numeric");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
/*** unprefixed and aliased configuration ***/
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "unprefix-bool|unprefix-bool-alias", 1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
getBoolConfigCommand, setBoolConfigCommand, NULL, &no_prefix_bool) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register unprefix-bool");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterBoolConfig(ctx, "unprefix-noalias-bool", 1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
getBoolConfigCommand, setBoolConfigCommand, NULL, &no_prefix_bool2) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register unprefix-noalias-bool");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterNumericConfig(ctx, "unprefix.numeric|unprefix.numeric-alias", -1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
-5, 2000, getNumericConfigCommand, setNumericConfigCommand, NULL, &no_prefix_longval) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register unprefix.numeric");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterStringConfig(ctx, "unprefix-string|unprefix.string-alias", "secret unprefix", REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
getStringConfigUnprefix, setStringConfigUnprefix, NULL, NULL) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register unprefix-string");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
if (RedisModule_RegisterEnumConfig(ctx, "unprefix-enum|unprefix-enum-alias", 1, REDISMODULE_CONFIG_DEFAULT|REDISMODULE_CONFIG_UNPREFIXED,
|
||||
enum_vals, int_vals, 5, getEnumConfigUnprefix, setEnumConfigUnprefix, NULL, NULL) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register unprefix-enum");
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
RedisModule_Log(ctx, "debug", "Registered configuration");
|
||||
size_t len;
|
||||
if (argc && !strcasecmp(RedisModule_StringPtrLen(argv[0], &len), "noload")) {
|
||||
return REDISMODULE_OK;
|
||||
} else if (argc && !strcasecmp(RedisModule_StringPtrLen(argv[0], &len), "override-default")) {
|
||||
if (RedisModule_LoadDefaultConfigs(ctx) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to load default configuration");
|
||||
goto err;
|
||||
}
|
||||
// simulate configuration values being overwritten by the command line
|
||||
RedisModule_Log(ctx, "debug", "Overriding configuration values");
|
||||
if (strval) RedisModule_FreeString(ctx, strval);
|
||||
strval = RedisModule_CreateString(ctx, "foo", 3);
|
||||
longval = memval = 123;
|
||||
}
|
||||
RedisModule_Log(ctx, "debug", "Loading configuration");
|
||||
if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to load configuration");
|
||||
goto err;
|
||||
}
|
||||
/* Creates a command which registers configs outside OnLoad() function. */
|
||||
if (RedisModule_CreateCommand(ctx,"block.register.configs.outside.onload", registerBlockCheck, "write", 0, 0, 0) == REDISMODULE_ERR) {
|
||||
RedisModule_Log(ctx, "warning", "Failed to register command");
|
||||
goto err;
|
||||
}
|
||||
|
||||
return REDISMODULE_OK;
|
||||
err:
|
||||
cleanup(ctx);
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
int RedisModule_OnUnload(RedisModuleCtx *ctx) {
|
||||
REDISMODULE_NOT_USED(ctx);
|
||||
cleanup(ctx);
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ proc assert_aof_manifest_content {manifest_path content} {
|
|||
assert_equal [llength $lines] [llength $content]
|
||||
|
||||
for { set i 0 } { $i < [llength $lines] } {incr i} {
|
||||
assert_equal [lindex $lines $i] [lindex $content $i]
|
||||
assert {[string first [lindex $content $i] [lindex $lines $i]] != -1}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,8 +5,9 @@ proc cluster_config_consistent {} {
|
|||
for {set j 0} {$j < [llength $::servers]} {incr j} {
|
||||
if {$j == 0} {
|
||||
set base_cfg [R $j cluster slots]
|
||||
set base_secret [R $j debug internal_secret]
|
||||
} else {
|
||||
if {[R $j cluster slots] != $base_cfg} {
|
||||
if {[R $j cluster slots] != $base_cfg || [R $j debug internal_secret] != $base_secret} {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
proc num_unique_secrets {num_nodes} {
|
||||
set secrets [list]
|
||||
for {set i 0} {$i < $num_nodes} {incr i} {
|
||||
lappend secrets [R $i debug internal_secret]
|
||||
}
|
||||
set num_secrets [llength [lsort -unique $secrets]]
|
||||
return $num_secrets
|
||||
}
|
||||
|
||||
proc wait_for_secret_sync {maxtries delay num_nodes} {
|
||||
wait_for_condition $maxtries $delay {
|
||||
[num_unique_secrets $num_nodes] eq 1
|
||||
} else {
|
||||
fail "Failed waiting for secrets to sync"
|
||||
}
|
||||
}
|
||||
|
||||
start_cluster 10 10 {tags {external:skip cluster}} {
|
||||
test "Test internal secret sync" {
|
||||
wait_for_secret_sync 50 100 20
|
||||
}
|
||||
|
||||
|
||||
set first_shard_host [srv 0 host]
|
||||
set first_shard_port [srv 0 port]
|
||||
|
||||
if {$::verbose} {
|
||||
puts {cluster internal secret:}
|
||||
puts [R 1 debug internal_secret]
|
||||
}
|
||||
|
||||
test "Join a node to the cluster and make sure it gets the same secret" {
|
||||
start_server {tags {"external:skip"} overrides {cluster-enabled {yes}}} {
|
||||
r cluster meet $first_shard_host $first_shard_port
|
||||
wait_for_condition 50 100 {
|
||||
[r debug internal_secret] eq [R 1 debug internal_secret]
|
||||
} else {
|
||||
puts [r debug internal_secret]
|
||||
puts [R 1 debug internal_secret]
|
||||
fail "Secrets not match"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "Join another cluster, make sure clusters sync on the internal secret" {
|
||||
start_server {tags {"external:skip"} overrides {cluster-enabled {yes}}} {
|
||||
set new_shard_host [srv 0 host]
|
||||
set new_shard_port [srv 0 port]
|
||||
start_server {tags {"external:skip"} overrides {cluster-enabled {yes}}} {
|
||||
r cluster meet $new_shard_host $new_shard_port
|
||||
wait_for_condition 50 100 {
|
||||
[r debug internal_secret] eq [r -1 debug internal_secret]
|
||||
} else {
|
||||
puts [r debug internal_secret]
|
||||
puts [r -1 debug internal_secret]
|
||||
fail "Secrets not match"
|
||||
}
|
||||
if {$::verbose} {
|
||||
puts {new cluster internal secret:}
|
||||
puts [r -1 debug internal_secret]
|
||||
}
|
||||
r cluster meet $first_shard_host $first_shard_port
|
||||
wait_for_secret_sync 50 100 22
|
||||
if {$::verbose} {
|
||||
puts {internal secret after join to bigger cluster:}
|
||||
puts [r -1 debug internal_secret]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -334,6 +334,31 @@ proc test_all_keysizes { {replMode 0} } {
|
|||
run_cmd_verify_hist {$server HSET h2 2 2} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {$server HDEL h2 1} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {$server HDEL h2 2} {}
|
||||
# HGETDEL
|
||||
run_cmd_verify_hist {$server FLUSHALL} {}
|
||||
run_cmd_verify_hist {$server HSETEX h2 FIELDS 1 1 1} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {$server HSETEX h2 FIELDS 1 2 2} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {$server HGETDEL h2 FIELDS 1 1} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {$server HGETDEL h2 FIELDS 1 3} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {$server HGETDEL h2 FIELDS 1 2} {}
|
||||
# HGETEX
|
||||
run_cmd_verify_hist {$server FLUSHALL} {}
|
||||
run_cmd_verify_hist {$server HSETEX h1 FIELDS 2 f1 1 f2 1} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {$server HGETEX h1 PXAT 1 FIELDS 1 f1} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {$server HSETEX h1 FIELDS 1 f3 1} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {$server HGETEX h1 PX 50 FIELDS 1 f2} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {} {db0_HASH:1=1} 1
|
||||
run_cmd_verify_hist {$server HGETEX h1 PX 50 FIELDS 1 f3} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {} {} 1
|
||||
# HSETEX
|
||||
run_cmd_verify_hist {$server FLUSHALL} {}
|
||||
run_cmd_verify_hist {$server HSETEX h1 FIELDS 2 f1 1 f2 1} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {$server HSETEX h1 PXAT 1 FIELDS 1 f1 v1} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {$server HSETEX h1 FIELDS 1 f3 1} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {$server HSETEX h1 PX 50 FIELDS 1 f2 v2} {db0_HASH:2=1}
|
||||
run_cmd_verify_hist {} {db0_HASH:1=1} 1
|
||||
run_cmd_verify_hist {$server HSETEX h1 PX 50 FIELDS 1 f3 v3} {db0_HASH:1=1}
|
||||
run_cmd_verify_hist {} {} 1
|
||||
# HMSET
|
||||
run_cmd_verify_hist {$server FLUSHALL} {}
|
||||
run_cmd_verify_hist {$server HMSET h1 1 1 2 2 3 3} {db0_HASH:2=1}
|
||||
|
|
|
@ -1,3 +1,16 @@
|
|||
#
|
||||
# Copyright (c) 2009-Present, Redis Ltd.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Copyright (c) 2024-present, Valkey contributors.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Licensed under your choice of the Redis Source Available License 2.0
|
||||
# (RSALv2) or the Server Side Public License v1 (SSPLv1).
|
||||
#
|
||||
# Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
|
||||
#
|
||||
|
||||
proc test_memory_efficiency {range} {
|
||||
r flushall
|
||||
set rd [redis_deferring_client]
|
||||
|
@ -37,15 +50,19 @@ start_server {tags {"memefficiency external:skip"}} {
|
|||
}
|
||||
|
||||
run_solo {defrag} {
|
||||
proc wait_for_defrag_stop {maxtries delay} {
|
||||
proc wait_for_defrag_stop {maxtries delay {expect_frag 0}} {
|
||||
wait_for_condition $maxtries $delay {
|
||||
[s active_defrag_running] eq 0
|
||||
[s active_defrag_running] eq 0 && ($expect_frag == 0 || [s allocator_frag_ratio] <= $expect_frag)
|
||||
} else {
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
puts [r info memory]
|
||||
puts [r info stats]
|
||||
puts [r memory malloc-stats]
|
||||
fail "defrag didn't stop."
|
||||
if {$expect_frag != 0} {
|
||||
fail "defrag didn't stop or failed to achieve expected frag ratio ([s allocator_frag_ratio] > $expect_frag)"
|
||||
} else {
|
||||
fail "defrag didn't stop."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,7 +119,7 @@ run_solo {defrag} {
|
|||
r config set active-defrag-cycle-max 75
|
||||
|
||||
# Wait for the active defrag to stop working.
|
||||
wait_for_defrag_stop 2000 100
|
||||
wait_for_defrag_stop 2000 100 1.1
|
||||
|
||||
# Test the fragmentation is lower.
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -124,7 +141,6 @@ run_solo {defrag} {
|
|||
puts [r latency latest]
|
||||
puts [r latency history active-defrag-cycle]
|
||||
}
|
||||
assert {$frag < 1.1}
|
||||
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
||||
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
||||
if {!$::no_latency} {
|
||||
|
@ -142,6 +158,11 @@ run_solo {defrag} {
|
|||
# reset stats and load the AOF file
|
||||
r config resetstat
|
||||
r config set key-load-delay -25 ;# sleep on average 1/25 usec
|
||||
# Note: This test is checking if defrag is working DURING AOF loading (while
|
||||
# timers are not active). So we don't give any extra time, and we deactivate
|
||||
# defrag immediately after the AOF loading is complete. During loading,
|
||||
# defrag will get invoked less often, causing starvation prevention. We
|
||||
# should expect longer latency measurements.
|
||||
r debug loadaof
|
||||
r config set activedefrag no
|
||||
# measure hits and misses right after aof loading
|
||||
|
@ -246,7 +267,7 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
wait_for_defrag_stop 500 100 1.05
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -256,7 +277,6 @@ run_solo {defrag} {
|
|||
puts "frag [s allocator_frag_ratio]"
|
||||
puts "frag_bytes [s allocator_frag_bytes]"
|
||||
}
|
||||
assert_lessthan_equal [s allocator_frag_ratio] 1.05
|
||||
}
|
||||
# Flush all script to make sure we don't crash after defragging them
|
||||
r script flush sync
|
||||
|
@ -362,7 +382,7 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
wait_for_defrag_stop 500 100 1.1
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -384,7 +404,6 @@ run_solo {defrag} {
|
|||
puts [r latency latest]
|
||||
puts [r latency history active-defrag-cycle]
|
||||
}
|
||||
assert {$frag < 1.1}
|
||||
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
||||
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
||||
if {!$::no_latency} {
|
||||
|
@ -464,7 +483,7 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
wait_for_defrag_stop 500 100 1.05
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -474,7 +493,6 @@ run_solo {defrag} {
|
|||
puts "frag [s allocator_frag_ratio]"
|
||||
puts "frag_bytes [s allocator_frag_bytes]"
|
||||
}
|
||||
assert_lessthan_equal [s allocator_frag_ratio] 1.05
|
||||
}
|
||||
|
||||
# Publishes some message to all the pubsub clients to make sure that
|
||||
|
@ -572,7 +590,7 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
wait_for_defrag_stop 500 100 1.5
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -582,7 +600,6 @@ run_solo {defrag} {
|
|||
puts "frag [s allocator_frag_ratio]"
|
||||
puts "frag_bytes [s allocator_frag_bytes]"
|
||||
}
|
||||
assert_lessthan_equal [s allocator_frag_ratio] 1.5
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -682,7 +699,13 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
if {$io_threads == 1} {
|
||||
wait_for_defrag_stop 500 100 1.05
|
||||
} else {
|
||||
# TODO: When multithreading is enabled, argv may be created in the io thread
|
||||
# and kept in the main thread, which can cause fragmentation to become worse.
|
||||
wait_for_defrag_stop 500 100 1.1
|
||||
}
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -692,14 +715,6 @@ run_solo {defrag} {
|
|||
puts "frag [s allocator_frag_ratio]"
|
||||
puts "frag_bytes [s allocator_frag_bytes]"
|
||||
}
|
||||
|
||||
if {$io_threads == 1} {
|
||||
assert_lessthan_equal [s allocator_frag_ratio] 1.05
|
||||
} else {
|
||||
# TODO: When multithreading is enabled, argv may be created in the io thread
|
||||
# and kept in the main thread, which can cause fragmentation to become worse.
|
||||
assert_lessthan_equal [s allocator_frag_ratio] 1.1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -763,7 +778,7 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
wait_for_defrag_stop 500 100 1.1
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -789,7 +804,6 @@ run_solo {defrag} {
|
|||
puts [r latency history active-defrag-cycle]
|
||||
puts [r memory malloc-stats]
|
||||
}
|
||||
assert {$frag < 1.1}
|
||||
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
||||
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
||||
if {!$::no_latency} {
|
||||
|
@ -884,7 +898,7 @@ run_solo {defrag} {
|
|||
}
|
||||
|
||||
# wait for the active defrag to stop working
|
||||
wait_for_defrag_stop 500 100
|
||||
wait_for_defrag_stop 500 100 1.1
|
||||
|
||||
# test the fragmentation is lower
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
|
@ -896,7 +910,6 @@ run_solo {defrag} {
|
|||
puts "hits: $hits"
|
||||
puts "misses: $misses"
|
||||
}
|
||||
assert {$frag < 1.1}
|
||||
assert {$misses < 10000000} ;# when defrag doesn't stop, we have some 30m misses, when it does, we have 2m misses
|
||||
}
|
||||
|
||||
|
|
|
@ -44,3 +44,27 @@ start_server {tags {"modules external:skip"} overrides {enable-module-command no
|
|||
assert_error "ERR *MODULE command not allowed*" {r module load $testmodule}
|
||||
}
|
||||
}
|
||||
|
||||
start_server {tags {"modules external:skip"} overrides {enable-debug-command no}} {
|
||||
r module load $testmodule
|
||||
|
||||
test {debug command disabled} {
|
||||
assert_equal {no} [r test.candebug]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {tags {"modules external:skip"} overrides {enable-debug-command yes}} {
|
||||
r module load $testmodule
|
||||
|
||||
test {debug command enabled} {
|
||||
assert_equal {yes} [r test.candebug]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {tags {"modules external:skip"} overrides {enable-debug-command local}} {
|
||||
r module load $testmodule
|
||||
|
||||
test {debug commands are enabled for local connection} {
|
||||
assert_equal {yes} [r test.candebug]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,4 +136,100 @@ start_server {tags {"modules"}} {
|
|||
|
||||
assert_equal 1 [llength $keys]
|
||||
}
|
||||
|
||||
if {[string match {*jemalloc*} [s mem_allocator]] && [r debug mallctl arenas.page] <= 8192} {
|
||||
test {Reduce defrag CPU usage when module data can't be defragged} {
|
||||
r flushdb
|
||||
r config set hz 100
|
||||
r config set activedefrag no
|
||||
r config set active-defrag-threshold-lower 5
|
||||
r config set active-defrag-cycle-min 25
|
||||
r config set active-defrag-cycle-max 75
|
||||
r config set active-defrag-ignore-bytes 100kb
|
||||
|
||||
# Populate memory with interleaving field of same size.
|
||||
set n 20000
|
||||
set dummy "[string repeat x 400]"
|
||||
set rd [redis_deferring_client]
|
||||
for {set i 0} {$i < $n} {incr i} { $rd datatype.set k$i 1 $dummy }
|
||||
for {set i 0} {$i < [expr $n]} {incr i} { $rd read } ;# Discard replies
|
||||
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
if {$::verbose} {
|
||||
puts "used [s allocator_allocated]"
|
||||
puts "rss [s allocator_active]"
|
||||
puts "frag [s allocator_frag_ratio]"
|
||||
puts "frag_bytes [s allocator_frag_bytes]"
|
||||
}
|
||||
assert_lessthan [s allocator_frag_ratio] 1.05
|
||||
|
||||
for {set i 0} {$i < $n} {incr i 2} { $rd del k$i }
|
||||
for {set j 0} {$j < $n} {incr j 2} { $rd read } ; # Discard del replies
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
assert_morethan [s allocator_frag_ratio] 1.4
|
||||
|
||||
catch {r config set activedefrag yes} e
|
||||
if {[r config get activedefrag] eq "activedefrag yes"} {
|
||||
# wait for the active defrag to start working (decision once a second)
|
||||
wait_for_condition 50 100 {
|
||||
[s total_active_defrag_time] ne 0
|
||||
} else {
|
||||
after 120 ;# serverCron only updates the info once in 100ms
|
||||
puts [r info memory]
|
||||
puts [r info stats]
|
||||
puts [r memory malloc-stats]
|
||||
fail "defrag not started."
|
||||
}
|
||||
assert_morethan [s allocator_frag_ratio] 1.4
|
||||
|
||||
# The cpu usage of defragment will drop to active-defrag-cycle-min
|
||||
wait_for_condition 1000 50 {
|
||||
[s active_defrag_running] == 25
|
||||
} else {
|
||||
fail "Unable to reduce the defragmentation speed."
|
||||
}
|
||||
|
||||
# Fuzzy test to restore defragmentation speed to normal
|
||||
set end_time [expr {[clock seconds] + 10}]
|
||||
set speed_restored 0
|
||||
while {[clock seconds] < $end_time} {
|
||||
switch [expr {int(rand() * 3)}] {
|
||||
0 {
|
||||
# Randomly delete a key
|
||||
set random_key [r RANDOMKEY]
|
||||
if {$random_key != ""} {
|
||||
r DEL $random_key
|
||||
}
|
||||
}
|
||||
1 {
|
||||
# Randomly overwrite a key
|
||||
set random_key [r RANDOMKEY]
|
||||
if {$random_key != ""} {
|
||||
r datatype.set $random_key 1 $dummy
|
||||
}
|
||||
}
|
||||
2 {
|
||||
# Randomly generate a new key
|
||||
set random_key "key_[expr {int(rand() * 10000)}]"
|
||||
r datatype.set $random_key 1 $dummy
|
||||
}
|
||||
}
|
||||
|
||||
# Wait for defragmentation speed to restore.
|
||||
if {[s active_defrag_running] > 25} {
|
||||
set speed_restored 1
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_equal $speed_restored 1
|
||||
|
||||
# After the traffic disappears, the defragmentation speed will decrease again.
|
||||
wait_for_condition 1000 50 {
|
||||
[s active_defrag_running] == 25
|
||||
} else {
|
||||
fail "Unable to reduce the defragmentation speed after traffic disappears."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
set testmodule [file normalize tests/modules/defragtest.so]
|
||||
|
||||
start_server {tags {"modules"} overrides {{save ""}}} {
|
||||
r module load $testmodule 10000
|
||||
r module load $testmodule 50000
|
||||
r config set hz 100
|
||||
r config set active-defrag-ignore-bytes 1
|
||||
r config set active-defrag-threshold-lower 0
|
||||
|
@ -41,14 +41,32 @@ start_server {tags {"modules"} overrides {{save ""}}} {
|
|||
}
|
||||
|
||||
test {Module defrag: global defrag works} {
|
||||
r config set activedefrag no
|
||||
wait_for_condition 100 50 {
|
||||
[s active_defrag_running] eq 0
|
||||
} else {
|
||||
fail "Unable to wait for active defrag to stop"
|
||||
}
|
||||
|
||||
r flushdb
|
||||
r frag.resetstats
|
||||
r frag.create_frag_global
|
||||
r config set activedefrag yes
|
||||
|
||||
wait_for_condition 100 50 {
|
||||
[getInfoProperty [r info defragtest_stats] defragtest_defrag_ended] > 0
|
||||
} else {
|
||||
fail "Unable to wait for a complete defragmentation cycle to finish"
|
||||
}
|
||||
|
||||
after 2000
|
||||
set info [r info defragtest_stats]
|
||||
assert {[getInfoProperty $info defragtest_global_attempts] > 0}
|
||||
assert {[getInfoProperty $info defragtest_global_strings_attempts] > 0}
|
||||
assert {[getInfoProperty $info defragtest_global_strings_pauses] > 0}
|
||||
assert {[getInfoProperty $info defragtest_global_dicts_attempts] > 0}
|
||||
assert {[getInfoProperty $info defragtest_global_dicts_defragged] > 0}
|
||||
assert_morethan [getInfoProperty $info defragtest_defrag_started] 0
|
||||
assert_morethan [getInfoProperty $info defragtest_defrag_ended] 0
|
||||
assert_morethan [getInfoProperty $info defragtest_global_dicts_resumes] [getInfoProperty $info defragtest_defrag_ended]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,287 @@
|
|||
tags {modules} {
|
||||
set testmodule [file normalize tests/modules/internalsecret.so]
|
||||
|
||||
set modules [list loadmodule $testmodule]
|
||||
start_cluster 1 0 [list config_lines $modules] {
|
||||
set r [srv 0 client]
|
||||
|
||||
test {Internal command without internal connection fails as an unknown command} {
|
||||
assert_error {*unknown command*with args beginning with:*} {r internalauth.internalcommand}
|
||||
}
|
||||
|
||||
test {Wrong internalsecret fails authentication} {
|
||||
assert_error {*WRONGPASS invalid internal password*} {r auth "internal connection" 123}
|
||||
}
|
||||
|
||||
test {Internal connection basic flow} {
|
||||
# A non-internal connection cannot execute internal commands, and they
|
||||
# seem non-existent to it.
|
||||
assert_error {*unknown command*} {r internalauth.internalcommand}
|
||||
|
||||
# Authenticate as an internal connection
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
|
||||
# Now, internal commands are available.
|
||||
assert_equal {OK} [r internalauth.internalcommand]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {Internal secret is not available in non-cluster mode} {
|
||||
# On non-cluster mode, the internal secret does not exist, nor is the
|
||||
# internal auth command available
|
||||
assert_error {*unknown command*} {r internalauth.internalcommand}
|
||||
assert_error {*ERR no internal secret available*} {r internalauth.getinternalsecret}
|
||||
assert_error {*Cannot authenticate as an internal connection on non-cluster instances*} {r auth "internal connection" somepassword}
|
||||
}
|
||||
|
||||
test {marking and un-marking a connection as internal via a debug command} {
|
||||
# After marking the connection to an internal one via a debug command,
|
||||
# internal commands succeed.
|
||||
r debug mark-internal-client
|
||||
assert_equal {OK} [r internalauth.internalcommand]
|
||||
|
||||
# After unmarking the connection, internal commands fail.
|
||||
r debug mark-internal-client unmark
|
||||
assert_error {*unknown command*} {r internalauth.internalcommand}
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {Test `COMMAND *` commands with\without internal connections} {
|
||||
# ------------------ Non-internal connection ------------------
|
||||
# `COMMAND DOCS <cmd>` returns empty response.
|
||||
assert_equal {} [r command docs internalauth.internalcommand]
|
||||
|
||||
# `COMMAND INFO <cmd>` should reply with null for the internal command
|
||||
assert_equal {{}} [r command info internalauth.internalcommand]
|
||||
|
||||
# `COMMAND GETKEYS/GETKEYSANDFLAGS <cmd> <args>` returns an invalid command error
|
||||
assert_error {*Invalid command specified*} {r command getkeys internalauth.internalcommand}
|
||||
assert_error {*Invalid command specified*} {r command getkeysandflags internalauth.internalcommand}
|
||||
|
||||
# -------------------- Internal connection --------------------
|
||||
# Non-empty response for non-internal connections.
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
|
||||
# `COMMAND DOCS <cmd>` returns a correct response.
|
||||
assert_match {*internalauth.internalcommand*} [r command docs internalauth.internalcommand]
|
||||
|
||||
# `COMMAND INFO <cmd>` should reply with a full response for the internal command
|
||||
assert_match {*internalauth.internalcommand*} [r command info internalauth.internalcommand]
|
||||
|
||||
# `COMMAND GETKEYS/GETKEYSANDFLAGS <cmd> <args>` returns a key error (not related to the internal connection)
|
||||
assert_error {*ERR The command has no key arguments*} {r command getkeys internalauth.internalcommand}
|
||||
assert_error {*ERR The command has no key arguments*} {r command getkeysandflags internalauth.internalcommand}
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {No authentication needed for internal connections} {
|
||||
# Authenticate with a user that does not have permissions to any command
|
||||
r acl setuser David on >123 &* ~* -@all +auth +internalauth.getinternalsecret +debug +internalauth.internalcommand
|
||||
assert_equal {OK} [r auth David 123]
|
||||
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
# Execute a command for which David does not have permission
|
||||
assert_equal {OK} [r internalauth.internalcommand]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {RM_Call of internal commands without user-flag succeeds only for all connections} {
|
||||
# Fail before authenticating as an internal connection.
|
||||
assert_equal {OK} [r internalauth.noninternal_rmcall internalauth.internalcommand]
|
||||
}
|
||||
|
||||
test {Internal commands via RM_Call succeeds for non-internal connections depending on the user flag} {
|
||||
# A non-internal connection that calls rm_call of an internal command
|
||||
assert_equal {OK} [r internalauth.noninternal_rmcall internalauth.internalcommand]
|
||||
|
||||
# A non-internal connection that calls rm_call of an internal command
|
||||
# with a user flag should fail.
|
||||
assert_error {*unknown command*} {r internalauth.noninternal_rmcall_withuser internalauth.internalcommand}
|
||||
}
|
||||
|
||||
test {Internal connections override the user flag} {
|
||||
# Authenticate as an internal connection
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
|
||||
assert_equal {OK} [r internalauth.noninternal_rmcall internalauth.internalcommand]
|
||||
assert_equal {OK} [r internalauth.noninternal_rmcall_withuser internalauth.internalcommand]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {RM_Call with the user-flag after setting thread-safe-context from an internal connection should fail} {
|
||||
# Authenticate as an internal connection
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
|
||||
# New threadSafeContexts do not inherit the internal flag.
|
||||
assert_error {*unknown command*} {r internalauth.noninternal_rmcall_detachedcontext_withuser internalauth.internalcommand}
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
r config set appendonly yes
|
||||
r config set appendfsync always
|
||||
waitForBgrewriteaof r
|
||||
|
||||
test {AOF executes internal commands successfully} {
|
||||
# Authenticate as an internal connection
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
|
||||
# Call an internal writing command
|
||||
assert_equal {OK} [r internalauth.internal_rmcall_replicated set x 5]
|
||||
|
||||
# Reload the server from the AOF
|
||||
r debug loadaof
|
||||
|
||||
# Check if the internal command was executed successfully
|
||||
assert_equal {5} [r get x]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {Internal commands are not allowed from scripts} {
|
||||
# Internal commands are not allowed from scripts
|
||||
assert_error {*not allowed from script*} {r eval {redis.call('internalauth.internalcommand')} 0}
|
||||
|
||||
# Even after authenticating as an internal connection
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
assert_error {*not allowed from script*} {r eval {redis.call('internalauth.internalcommand')} 0}
|
||||
}
|
||||
}
|
||||
|
||||
start_cluster 1 1 [list config_lines $modules] {
|
||||
set master [srv 0 client]
|
||||
set slave [srv -1 client]
|
||||
|
||||
test {Setup master} {
|
||||
# Authenticate as an internal connection
|
||||
set reply [$master internalauth.getinternalsecret]
|
||||
assert_equal {OK} [$master auth "internal connection" $reply]
|
||||
}
|
||||
|
||||
test {Slaves successfully execute internal commands from the replication link} {
|
||||
assert {[s -1 role] eq {slave}}
|
||||
wait_for_condition 1000 50 {
|
||||
[s -1 master_link_status] eq {up}
|
||||
} else {
|
||||
fail "Master link status is not up"
|
||||
}
|
||||
|
||||
# Execute internal command in master, that will set `x` to `5`.
|
||||
assert_equal {OK} [$master internalauth.internal_rmcall_replicated set x 5]
|
||||
|
||||
# Wait for replica to have the key
|
||||
$slave readonly
|
||||
wait_for_condition 1000 50 {
|
||||
[$slave exists x] eq "1"
|
||||
} else {
|
||||
fail "Test key was not replicated"
|
||||
}
|
||||
|
||||
# See that the slave has the same value for `x`.
|
||||
assert_equal {5} [$slave get x]
|
||||
}
|
||||
}
|
||||
|
||||
start_server {} {
|
||||
r module load $testmodule
|
||||
|
||||
test {Internal commands are not reported in the monitor output for non-internal connections when unsuccessful} {
|
||||
set rd [redis_deferring_client]
|
||||
$rd monitor
|
||||
$rd read ; # Discard the OK
|
||||
assert_error {*unknown command*} {r internalauth.internalcommand}
|
||||
|
||||
# Assert that the monitor output does not contain the internal command
|
||||
r ping
|
||||
assert_match {*ping*} [$rd read]
|
||||
$rd close
|
||||
}
|
||||
|
||||
test {Internal commands are not reported in the monitor output for non-internal connections when successful} {
|
||||
# Authenticate as an internal connection
|
||||
assert_equal {OK} [r debug mark-internal-client]
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
$rd monitor
|
||||
$rd read ; # Discard the OK
|
||||
assert_equal {OK} [r internalauth.internalcommand]
|
||||
|
||||
# Assert that the monitor output does not contain the internal command
|
||||
r ping
|
||||
assert_match {*ping*} [$rd read]
|
||||
$rd close
|
||||
}
|
||||
|
||||
test {Internal commands are reported in the monitor output for internal connections} {
|
||||
set rd [redis_deferring_client]
|
||||
$rd debug mark-internal-client
|
||||
assert_equal {OK} [$rd read]
|
||||
$rd monitor
|
||||
$rd read ; # Discard the OK
|
||||
assert_equal {OK} [r internalauth.internalcommand]
|
||||
|
||||
# Assert that the monitor output contains the internal command
|
||||
assert_match {*internalauth.internalcommand*} [$rd read]
|
||||
$rd close
|
||||
}
|
||||
|
||||
test {Internal commands are reported in the slowlog} {
|
||||
# Set up slowlog to log all commands
|
||||
r config set slowlog-log-slower-than 0
|
||||
|
||||
# Execute an internal command
|
||||
r slowlog reset
|
||||
r internalauth.internalcommand
|
||||
|
||||
# The slow-log should contain the internal command
|
||||
set log [r slowlog get 1]
|
||||
assert_match {*internalauth.internalcommand*} $log
|
||||
}
|
||||
|
||||
test {Internal commands are reported in the latency report} {
|
||||
# The latency report should contain the internal command
|
||||
set report [r latency histogram internalauth.internalcommand]
|
||||
assert_match {*internalauth.internalcommand*} $report
|
||||
}
|
||||
|
||||
test {Internal commands are reported in the command stats report} {
|
||||
# The INFO report should contain the internal command for both the internal
|
||||
# and non-internal connections.
|
||||
set report [r info commandstats]
|
||||
assert_match {*internalauth.internalcommand*} $report
|
||||
|
||||
set report [r info latencystats]
|
||||
assert_match {*internalauth.internalcommand*} $report
|
||||
|
||||
# Un-mark the connection as internal
|
||||
r debug mark-internal-client unmark
|
||||
assert_error {*unknown command*} {r internalauth.internalcommand}
|
||||
|
||||
# We still expect to see the internal command in the report
|
||||
set report [r info commandstats]
|
||||
assert_match {*internalauth.internalcommand*} $report
|
||||
|
||||
set report [r info latencystats]
|
||||
assert_match {*internalauth.internalcommand*} $report
|
||||
}
|
||||
}
|
||||
}
|
|
@ -319,7 +319,7 @@ start_server {tags {"modules"}} {
|
|||
|
||||
# missing LoadConfigs call
|
||||
catch {exec src/redis-server --loadmodule "$testmodule" noload --moduleconfigs.string "hello"} err
|
||||
assert_match {*Module Configurations were not set, likely a missing LoadConfigs call. Unloading the module.*} $err
|
||||
assert_match {*Module Configurations were not set, missing LoadConfigs call. Unloading the module.*} $err
|
||||
|
||||
# successful
|
||||
start_server [list overrides [list loadmodule "$testmodule" moduleconfigs.string "bootedup" moduleconfigs.enum two moduleconfigs.flags "two four"]] {
|
||||
|
@ -342,5 +342,45 @@ start_server {tags {"modules"}} {
|
|||
assert_equal [r config get unprefix-enum-alias] "unprefix-enum-alias one"
|
||||
}
|
||||
}
|
||||
|
||||
test {loadmodule CONFIG values take precedence over module loadex ARGS values} {
|
||||
# Load module with conflicting CONFIG and ARGS values
|
||||
r module loadex $testmodule \
|
||||
CONFIG moduleconfigs.string goo \
|
||||
CONFIG moduleconfigs.memory_numeric 2mb \
|
||||
ARGS override-default
|
||||
|
||||
# Verify CONFIG values took precedence over the values that override-default would have caused the module to set
|
||||
assert_equal [r config get moduleconfigs.string] "moduleconfigs.string goo"
|
||||
assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 2097152"
|
||||
|
||||
r module unload moduleconfigs
|
||||
}
|
||||
|
||||
# Test: Ensure that modified configuration values from ARGS are correctly written to the config file
|
||||
test {Modified ARGS values are persisted after config rewrite when set through CONFIG commands} {
|
||||
# Load module with non-default ARGS values
|
||||
r module loadex $testmodule ARGS override-default
|
||||
|
||||
# Verify the initial values were overwritten
|
||||
assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 123"
|
||||
assert_equal [r config get moduleconfigs.string] "moduleconfigs.string foo"
|
||||
|
||||
# Set new values to simulate user configuration changes
|
||||
r config set moduleconfigs.memory_numeric 1mb
|
||||
r config set moduleconfigs.string "modified_value"
|
||||
|
||||
# Verify that the changes took effect
|
||||
assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1048576"
|
||||
assert_equal [r config get moduleconfigs.string] "moduleconfigs.string modified_value"
|
||||
|
||||
# Perform a config rewrite
|
||||
r config rewrite
|
||||
|
||||
restart_server 0 true false
|
||||
assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1048576"
|
||||
assert_equal [r config get moduleconfigs.string] "moduleconfigs.string modified_value"
|
||||
r module unload moduleconfigs
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -414,6 +414,58 @@ start_server {tags {"pubsub network"}} {
|
|||
assert_equal "pmessage * __keyspace@${db}__:myhash del" [$rd1 read]
|
||||
r debug set-active-expire 1
|
||||
|
||||
|
||||
# Test HSETEX, HGETEX and HGETDEL notifications
|
||||
r hsetex myhash FIELDS 3 f4 v4 f5 v5 f6 v6
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hset" [$rd1 read]
|
||||
|
||||
# hgetex sets ttl in past
|
||||
r hgetex myhash PX 0 FIELDS 1 f4
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hdel" [$rd1 read]
|
||||
|
||||
# hgetex sets ttl
|
||||
r hgetex myhash EXAT [expr {[clock seconds] + 999999}] FIELDS 1 f5
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hexpire" [$rd1 read]
|
||||
|
||||
# hgetex persists field
|
||||
r hgetex myhash PERSIST FIELDS 1 f5
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hpersist" [$rd1 read]
|
||||
|
||||
# hgetdel deletes a field
|
||||
r hgetdel myhash FIELDS 1 f5
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hdel" [$rd1 read]
|
||||
|
||||
# hsetex sets field and expiry time
|
||||
r hsetex myhash EXAT [expr {[clock seconds] + 999999}] FIELDS 1 f6 v6
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hset" [$rd1 read]
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hexpire" [$rd1 read]
|
||||
|
||||
# hsetex sets field and ttl in the past
|
||||
r hsetex myhash PX 0 FIELDS 1 f6 v6
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hset" [$rd1 read]
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hdel" [$rd1 read]
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash del" [$rd1 read]
|
||||
|
||||
# Test that we will get `hexpired` notification when a hash field is
|
||||
# removed by lazy expire using hgetdel command
|
||||
r debug set-active-expire 0
|
||||
r hsetex myhash PX 10 FIELDS 1 f1 v1
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hset" [$rd1 read]
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hexpire" [$rd1 read]
|
||||
|
||||
# Set another field
|
||||
r hsetex myhash FIELDS 1 f2 v2
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hset" [$rd1 read]
|
||||
# Wait until field expires
|
||||
after 20
|
||||
r hgetdel myhash FIELDS 1 f1
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hexpired" [$rd1 read]
|
||||
# Get and delete the only field
|
||||
r hgetdel myhash FIELDS 1 f2
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash hdel" [$rd1 read]
|
||||
assert_equal "pmessage * __keyspace@${db}__:myhash del" [$rd1 read]
|
||||
r debug set-active-expire 1
|
||||
|
||||
$rd1 close
|
||||
} {0} {needs:debug}
|
||||
} ;# foreach
|
||||
|
|
|
@ -109,9 +109,25 @@ proc test_scan {type} {
|
|||
|
||||
after 2
|
||||
|
||||
assert_error "*unknown type name*" {r scan 0 type "string1"}
|
||||
# TODO: remove this in redis 8.0
|
||||
set cur 0
|
||||
set keys {}
|
||||
while 1 {
|
||||
set res [r scan $cur type "string1"]
|
||||
set cur [lindex $res 0]
|
||||
set k [lindex $res 1]
|
||||
lappend keys {*}$k
|
||||
if {$cur == 0} break
|
||||
}
|
||||
|
||||
assert_equal 0 [llength $keys]
|
||||
# make sure that expired key have been removed by scan command
|
||||
assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
|
||||
|
||||
# TODO: uncomment in redis 8.0
|
||||
#assert_error "*unknown type name*" {r scan 0 type "string1"}
|
||||
# expired key will be no touched by scan command
|
||||
assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
|
||||
#assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
|
||||
r debug set-active-expire 1
|
||||
} {OK} {needs:debug}
|
||||
|
||||
|
@ -175,8 +191,11 @@ proc test_scan {type} {
|
|||
|
||||
assert_equal 1000 [llength $keys]
|
||||
|
||||
# make sure that expired key have been removed by scan command
|
||||
assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
|
||||
# TODO: uncomment in redis 8.0
|
||||
# make sure that only the expired key in the type match will been removed by scan command
|
||||
assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
|
||||
#assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
|
||||
|
||||
r debug set-active-expire 1
|
||||
} {OK} {needs:debug}
|
||||
|
|
|
@ -855,6 +855,430 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
assert_equal [r HINCRBYFLOAT h1 f1 2.5] 12.5
|
||||
assert_range [r HPTTL h1 FIELDS 1 f1] 1 20
|
||||
}
|
||||
|
||||
test "HGETDEL - delete field with ttl ($type)" {
|
||||
r debug set-active-expire 0
|
||||
r del h1
|
||||
|
||||
# Test deleting only field in a hash. Due to lazy expiry,
|
||||
# reply will be null and the field and the key will be deleted.
|
||||
r hsetex h1 PX 5 FIELDS 1 f1 10
|
||||
after 15
|
||||
assert_equal [r hgetdel h1 fields 1 f1] "{}"
|
||||
assert_equal [r exists h1] 0
|
||||
|
||||
# Test deleting one field among many. f2 will lazily expire
|
||||
r hsetex h1 FIELDS 3 f1 10 f2 20 f3 value3
|
||||
r hpexpire h1 5 FIELDS 1 f2
|
||||
after 15
|
||||
assert_equal [r hgetdel h1 fields 2 f2 f3] "{} value3"
|
||||
assert_equal [lsort [r hgetall h1]] [lsort "f1 10"]
|
||||
|
||||
# Try to delete the last field, along with non-existing fields
|
||||
assert_equal [r hgetdel h1 fields 4 f1 f2 f3 f4] "10 {} {} {}"
|
||||
r debug set-active-expire 1
|
||||
}
|
||||
|
||||
test "HGETEX - input validation ($type)" {
|
||||
r del h1
|
||||
assert_error "*wrong number of arguments*" {r HGETEX}
|
||||
assert_error "*wrong number of arguments*" {r HGETEX h1}
|
||||
assert_error "*wrong number of arguments*" {r HGETEX h1 FIELDS}
|
||||
assert_error "*wrong number of arguments*" {r HGETEX h1 FIELDS 0}
|
||||
assert_error "*wrong number of arguments*" {r HGETEX h1 FIELDS 1}
|
||||
assert_error "*argument FIELDS is missing*" {r HGETEX h1 XFIELDX 1 a}
|
||||
assert_error "*argument FIELDS is missing*" {r HGETEX h1 PXAT 1 1}
|
||||
assert_error "*argument FIELDS is missing*" {r HGETEX h1 PERSIST 1 FIELDS 1 a}
|
||||
assert_error "*must match the number of arguments*" {r HGETEX h1 FIELDS 2 a}
|
||||
assert_error "*Number of fields must be a positive integer*" {r HGETEX h1 FIELDS 0 a}
|
||||
assert_error "*Number of fields must be a positive integer*" {r HGETEX h1 FIELDS -1 a}
|
||||
assert_error "*Number of fields must be a positive integer*" {r HGETEX h1 FIELDS 9223372036854775808 a}
|
||||
}
|
||||
|
||||
test "HGETEX - input validation (expire time) ($type)" {
|
||||
assert_error "*value is not an integer or out of range*" {r HGETEX h1 EX bla FIELDS 1 a}
|
||||
assert_error "*value is not an integer or out of range*" {r HGETEX h1 EX 9223372036854775808 FIELDS 1 a}
|
||||
assert_error "*value is not an integer or out of range*" {r HGETEX h1 EXAT 9223372036854775808 FIELDS 1 a}
|
||||
assert_error "*invalid expire time, must be >= 0*" {r HGETEX h1 PX -1 FIELDS 1 a}
|
||||
assert_error "*invalid expire time, must be >= 0*" {r HGETEX h1 PXAT -1 FIELDS 1 a}
|
||||
assert_error "*invalid expire time*" {r HGETEX h1 EX -1 FIELDS 1 a}
|
||||
assert_error "*invalid expire time*" {r HGETEX h1 EX [expr (1<<48)] FIELDS 1 a}
|
||||
assert_error "*invalid expire time*" {r HGETEX h1 EX [expr (1<<46) - [clock seconds] + 100 ] FIELDS 1 a}
|
||||
assert_error "*invalid expire time*" {r HGETEX h1 EXAT [expr (1<<46) + 100 ] FIELDS 1 a}
|
||||
assert_error "*invalid expire time*" {r HGETEX h1 PX [expr (1<<46) - [clock milliseconds] + 100 ] FIELDS 1 a}
|
||||
assert_error "*invalid expire time*" {r HGETEX h1 PXAT [expr (1<<46) + 100 ] FIELDS 1 a}
|
||||
}
|
||||
|
||||
test "HGETEX - get without setting ttl ($type)" {
|
||||
r del h1
|
||||
r hset h1 a 1 b 2 c strval
|
||||
assert_equal [r hgetex h1 fields 1 a] "1"
|
||||
assert_equal [r hgetex h1 fields 2 a b] "1 2"
|
||||
assert_equal [r hgetex h1 fields 3 a b c] "1 2 strval"
|
||||
assert_equal [r HTTL h1 FIELDS 3 a b c] "$T_NO_EXPIRY $T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
}
|
||||
|
||||
test "HGETEX - get and set the ttl ($type)" {
|
||||
r del h1
|
||||
r hset h1 a 1 b 2 c strval
|
||||
assert_equal [r hgetex h1 EX 10000 fields 1 a] "1"
|
||||
assert_range [r HTTL h1 FIELDS 1 a] 9000 10000
|
||||
assert_equal [r hgetex h1 EX 10000 fields 1 c] "strval"
|
||||
assert_range [r HTTL h1 FIELDS 1 c] 9000 10000
|
||||
}
|
||||
|
||||
test "HGETEX - Test 'EX' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash field1 value1 field2 value2 field3 value3
|
||||
assert_equal [r hgetex myhash EX 1000 FIELDS 1 field1] [list "value1"]
|
||||
assert_range [r httl myhash FIELDS 1 field1] 1 1000
|
||||
}
|
||||
|
||||
test "HGETEX - Test 'EXAT' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash field1 value1 field2 value2 field3 value3
|
||||
assert_equal [r hgetex myhash EXAT 4000000000 FIELDS 1 field2] [list "value2"]
|
||||
assert_range [expr [r httl myhash FIELDS 1 field2] + [clock seconds]] 3900000000 4000000000
|
||||
}
|
||||
|
||||
test "HGETEX - Test 'PX' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash field1 value1 field2 value2 field3 value3
|
||||
assert_equal [r hgetex myhash PX 1000000 FIELDS 1 field3] [list "value3"]
|
||||
assert_range [r httl myhash FIELDS 1 field3] 900 1000
|
||||
}
|
||||
|
||||
test "HGETEX - Test 'PXAT' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash field1 value1 field2 value2 field3 value3
|
||||
assert_equal [r hgetex myhash PXAT 4000000000000 FIELDS 1 field3] [list "value3"]
|
||||
assert_range [expr [r httl myhash FIELDS 1 field3] + [clock seconds]] 3900000000 4000000000
|
||||
}
|
||||
|
||||
test "HGETEX - Test 'PERSIST' flag ($type)" {
|
||||
r del myhash
|
||||
r debug set-active-expire 0
|
||||
|
||||
r hsetex myhash PX 5000 FIELDS 3 f1 v1 f2 v2 f3 v3
|
||||
assert_not_equal [r httl myhash FIELDS 1 f1] "$T_NO_EXPIRY"
|
||||
assert_not_equal [r httl myhash FIELDS 1 f2] "$T_NO_EXPIRY"
|
||||
assert_not_equal [r httl myhash FIELDS 1 f3] "$T_NO_EXPIRY"
|
||||
|
||||
# Persist f1 and verify it does not have TTL anymore
|
||||
assert_equal [r hgetex myhash PERSIST FIELDS 1 f1] "v1"
|
||||
assert_equal [r httl myhash FIELDS 1 f1] "$T_NO_EXPIRY"
|
||||
|
||||
# Persist rest of the fields
|
||||
assert_equal [r hgetex myhash PERSIST FIELDS 2 f2 f3] "v2 v3"
|
||||
assert_equal [r httl myhash FIELDS 2 f2 f3] "$T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
|
||||
# Redo the operation. It should be noop as fields are persisted already.
|
||||
assert_equal [r hgetex myhash PERSIST FIELDS 2 f2 f3] "v2 v3"
|
||||
assert_equal [r httl myhash FIELDS 2 f2 f3] "$T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
|
||||
# Final sanity, fields exist and have no attached ttl.
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2 f3 v3"]
|
||||
assert_equal [r httl myhash FIELDS 3 f1 f2 f3] "$T_NO_EXPIRY $T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
r debug set-active-expire 1
|
||||
}
|
||||
|
||||
test "HGETEX - Test setting ttl in the past will delete the key ($type)" {
|
||||
r del myhash
|
||||
r hset myhash f1 v1 f2 v2 f3 v3
|
||||
|
||||
# hgetex without setting ttl
|
||||
assert_equal [lsort [r hgetex myhash fields 3 f1 f2 f3]] [lsort "v1 v2 v3"]
|
||||
assert_equal [r httl myhash FIELDS 3 f1 f2 f3] "$T_NO_EXPIRY $T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
|
||||
# set an expired ttl and verify the key is deleted
|
||||
r hgetex myhash PXAT 1 fields 3 f1 f2 f3
|
||||
assert_equal [r exists myhash] 0
|
||||
}
|
||||
|
||||
test "HGETEX - Test active expiry ($type)" {
|
||||
r del myhash
|
||||
r debug set-active-expire 0
|
||||
|
||||
r hset myhash f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
|
||||
assert_equal [lsort [r hgetex myhash PXAT 1 FIELDS 5 f1 f2 f3 f4 f5]] [lsort "v1 v2 v3 v4 v5"]
|
||||
|
||||
r debug set-active-expire 1
|
||||
wait_for_condition 50 20 { [r EXISTS myhash] == 0 } else { fail "'myhash' should be expired" }
|
||||
}
|
||||
|
||||
test "HGETEX - A field with TTL overridden with another value (TTL discarded) ($type)" {
|
||||
r del myhash
|
||||
r hset myhash f1 v1 f2 v2 f3 v3
|
||||
r hgetex myhash PX 10000 FIELDS 1 f1
|
||||
r hgetex myhash EX 100 FIELDS 1 f2
|
||||
|
||||
# f2 ttl will be discarded
|
||||
r hset myhash f2 v22
|
||||
assert_equal [r hget myhash f2] "v22"
|
||||
assert_equal [r httl myhash FIELDS 2 f2 f3] "$T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
|
||||
# Other field is not affected (still has TTL)
|
||||
assert_not_equal [r httl myhash FIELDS 1 f1] "$T_NO_EXPIRY"
|
||||
}
|
||||
|
||||
test "HGETEX - Test with lazy expiry ($type)" {
|
||||
r del myhash
|
||||
r debug set-active-expire 0
|
||||
|
||||
r hsetex myhash PX 1 FIELDS 2 f1 v1 f2 v2
|
||||
after 5
|
||||
assert_equal [r hgetex myhash FIELDS 2 f1 f2] "{} {}"
|
||||
assert_equal [r exists myhash] 0
|
||||
|
||||
r debug set-active-expire 1
|
||||
}
|
||||
|
||||
test "HSETEX - input validation ($type)" {
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 1}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 2 a b}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 2 a b c}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 2 a b c d e}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 3 a b c d}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 3 a b c d e}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 3 a b c d e f g}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 3 a b}
|
||||
assert_error {*wrong number of arguments*} {r hsetex myhash fields 1 a b unknown}
|
||||
assert_error {*unknown argument*} {r hsetex myhash nx fields 1 a b}
|
||||
assert_error {*unknown argument*} {r hsetex myhash 1 fields 1 a b}
|
||||
|
||||
# Only one of FNX or FXX
|
||||
assert_error {*Only one of FXX or FNX arguments *} {r hsetex myhash fxx fxx EX 100 fields 1 a b}
|
||||
assert_error {*Only one of FXX or FNX arguments *} {r hsetex myhash fxx fnx EX 100 fields 1 a b}
|
||||
assert_error {*Only one of FXX or FNX arguments *} {r hsetex myhash fnx fxx EX 100 fields 1 a b}
|
||||
assert_error {*Only one of FXX or FNX arguments *} {r hsetex myhash fnx fnx EX 100 fields 1 a b}
|
||||
assert_error {*Only one of FXX or FNX arguments *} {r hsetex myhash fxx fnx fxx EX 100 fields 1 a b}
|
||||
assert_error {*Only one of FXX or FNX arguments *} {r hsetex myhash fnx fxx fnx EX 100 fields 1 a b}
|
||||
|
||||
# Only one of EX, PX, EXAT, PXAT or KEEPTTL can be specified
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EX 100 PX 1000 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EX 100 EXAT 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EXAT 100 EX 1000 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EXAT 100 PX 1000 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash PX 100 EXAT 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash PX 100 PXAT 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash PXAT 100 EX 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash PXAT 100 EXAT 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EX 100 KEEPTTL fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash KEEPTTL EX 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EX 100 EX 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash EXAT 100 EXAT 100 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash PX 10 PX 10 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash PXAT 10 PXAT 10 fields 1 a b}
|
||||
assert_error {*Only one of EX, PX, EXAT, PXAT or KEEPTTL arguments*} {r hsetex myhash KEEPTTL KEEPTTL fields 1 a b}
|
||||
|
||||
# missing expire time
|
||||
assert_error {*not an integer or out of range*} {r hsetex myhash ex fields 1 a b}
|
||||
assert_error {*not an integer or out of range*} {r hsetex myhash px fields 1 a b}
|
||||
assert_error {*not an integer or out of range*} {r hsetex myhash exat fields 1 a b}
|
||||
assert_error {*not an integer or out of range*} {r hsetex myhash pxat fields 1 a b}
|
||||
|
||||
# expire time more than 2 ^ 48
|
||||
assert_error {*invalid expire time*} {r hsetex myhash EXAT [expr (1<<48)] 1 a b}
|
||||
assert_error {*invalid expire time*} {r hsetex myhash PXAT [expr (1<<48)] 1 a b}
|
||||
assert_error {*invalid expire time*} {r hsetex myhash EX [expr (1<<48) - [clock seconds] + 1000 ] 1 a b}
|
||||
assert_error {*invalid expire time*} {r hsetex myhash PX [expr (1<<48) - [clock milliseconds] + 1000 ] 1 a b}
|
||||
|
||||
# invalid expire time
|
||||
assert_error {*invalid expire time*} {r hsetex myhash EXAT -1 1 a b}
|
||||
assert_error {*not an integer or out of range*} {r hsetex myhash EXAT 9223372036854775808 1 a b}
|
||||
assert_error {*not an integer or out of range*} {r hsetex myhash EXAT x 1 a b}
|
||||
|
||||
# invalid numfields arg
|
||||
assert_error {*invalid number of fields*} {r hsetex myhash fields x a b}
|
||||
assert_error {*invalid number of fields*} {r hsetex myhash fields 9223372036854775808 a b}
|
||||
assert_error {*invalid number of fields*} {r hsetex myhash fields 0 a b}
|
||||
assert_error {*invalid number of fields*} {r hsetex myhash fields -1 a b}
|
||||
}
|
||||
|
||||
test "HSETEX - Basic test ($type)" {
|
||||
r del myhash
|
||||
|
||||
# set field
|
||||
assert_equal [r hsetex myhash FIELDS 1 f1 v1] 1
|
||||
assert_equal [r hget myhash f1] "v1"
|
||||
|
||||
# override
|
||||
assert_equal [r hsetex myhash FIELDS 1 f1 v11] 1
|
||||
assert_equal [r hget myhash f1] "v11"
|
||||
|
||||
# set multiple
|
||||
assert_equal [r hsetex myhash FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2"]
|
||||
assert_equal [r hsetex myhash FIELDS 3 f1 v111 f2 v222 f3 v333] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v111 f2 v222 f3 v333"]
|
||||
}
|
||||
|
||||
test "HSETEX - Test FXX flag ($type)" {
|
||||
r del myhash
|
||||
|
||||
# Key is empty, command fails due to FXX
|
||||
assert_equal [r hsetex myhash FXX FIELDS 2 f1 v1 f2 v2] 0
|
||||
# Verify it did not leave the key empty
|
||||
assert_equal [r exists myhash] 0
|
||||
|
||||
# Command fails and no change on fields
|
||||
r hset myhash f1 v1
|
||||
assert_equal [r hsetex myhash FXX FIELDS 2 f1 v1 f2 v2] 0
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1"]
|
||||
|
||||
# Command executed successfully
|
||||
assert_equal [r hsetex myhash FXX FIELDS 1 f1 v11] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v11"]
|
||||
|
||||
# Try with multiple fields
|
||||
r hset myhash f2 v2
|
||||
assert_equal [r hsetex myhash FXX FIELDS 2 f1 v111 f2 v222] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v111 f2 v222"]
|
||||
|
||||
# Try with expiry
|
||||
assert_equal [r hsetex myhash FXX EX 100 FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2"]
|
||||
assert_range [r httl myhash FIELDS 1 f1] 80 100
|
||||
assert_range [r httl myhash FIELDS 1 f2] 80 100
|
||||
|
||||
# Try with expiry, FXX arg comes after TTL
|
||||
assert_equal [r hsetex myhash PX 5000 FXX FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2"]
|
||||
assert_range [r hpttl myhash FIELDS 1 f1] 4500 5000
|
||||
assert_range [r hpttl myhash FIELDS 1 f2] 4500 5000
|
||||
}
|
||||
|
||||
test "HSETEX - Test FXX flag with lazy expire ($type)" {
|
||||
r del myhash
|
||||
r debug set-active-expire 0
|
||||
|
||||
r hsetex myhash PX 10 FIELDS 1 f1 v1
|
||||
after 15
|
||||
assert_equal [r hsetex myhash FXX FIELDS 1 f1 v11] 0
|
||||
assert_equal [r exists myhash] 0
|
||||
r debug set-active-expire 1
|
||||
}
|
||||
|
||||
test "HSETEX - Test FNX flag ($type)" {
|
||||
r del myhash
|
||||
|
||||
# Command successful on an empty key
|
||||
assert_equal [r hsetex myhash FNX FIELDS 1 f1 v1] 1
|
||||
|
||||
# Command fails and no change on fields
|
||||
assert_equal [r hsetex myhash FNX FIELDS 2 f1 v1 f2 v2] 0
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1"]
|
||||
|
||||
# Command executed successfully
|
||||
assert_equal [r hsetex myhash FNX FIELDS 2 f2 v2 f3 v3] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2 f3 v3"]
|
||||
assert_equal [r hsetex myhash FXX FIELDS 3 f1 v11 f2 v22 f3 v33] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v11 f2 v22 f3 v33"]
|
||||
|
||||
# Try with expiry
|
||||
r del myhash
|
||||
assert_equal [r hsetex myhash FNX EX 100 FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2"]
|
||||
assert_range [r httl myhash FIELDS 1 f1] 80 100
|
||||
assert_range [r httl myhash FIELDS 1 f2] 80 100
|
||||
|
||||
# Try with expiry, FNX arg comes after TTL
|
||||
assert_equal [r hsetex myhash PX 5000 FNX FIELDS 1 f3 v3] 1
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v1 f2 v2 f3 v3"]
|
||||
assert_range [r hpttl myhash FIELDS 1 f3] 4500 5000
|
||||
}
|
||||
|
||||
test "HSETEX - Test 'EX' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash f1 v1 f2 v2
|
||||
assert_equal [r hsetex myhash EX 1000 FIELDS 1 f3 v3 ] 1
|
||||
assert_range [r httl myhash FIELDS 1 f3] 900 1000
|
||||
}
|
||||
|
||||
test "HSETEX - Test 'EXAT' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash f1 v1 f2 v2
|
||||
assert_equal [r hsetex myhash EXAT 4000000000 FIELDS 1 f3 v3] 1
|
||||
assert_range [expr [r httl myhash FIELDS 1 f3] + [clock seconds]] 3900000000 4000000000
|
||||
}
|
||||
|
||||
test "HSETEX - Test 'PX' flag ($type)" {
|
||||
r del myhash
|
||||
assert_equal [r hsetex myhash PX 1000000 FIELDS 1 f3 v3] 1
|
||||
assert_range [r httl myhash FIELDS 1 f3] 990 1000
|
||||
}
|
||||
|
||||
test "HSETEX - Test 'PXAT' flag ($type)" {
|
||||
r del myhash
|
||||
r hset myhash f1 v2 f2 v2 f3 v3
|
||||
assert_equal [r hsetex myhash PXAT 4000000000000 FIELDS 1 f2 v2] 1
|
||||
assert_range [expr [r httl myhash FIELDS 1 f2] + [clock seconds]] 3900000000 4000000000
|
||||
}
|
||||
|
||||
test "HSETEX - Test 'KEEPTTL' flag ($type)" {
|
||||
r del myhash
|
||||
|
||||
r hsetex myhash FIELDS 2 f1 v1 f2 v2
|
||||
r hsetex myhash PX 20000 FIELDS 1 f2 v2
|
||||
|
||||
# f1 does not have ttl
|
||||
assert_equal [r httl myhash FIELDS 1 f1] "$T_NO_EXPIRY"
|
||||
|
||||
# f2 has ttl
|
||||
assert_not_equal [r httl myhash FIELDS 1 f2] "$T_NO_EXPIRY"
|
||||
|
||||
# Validate KEEPTTL preserves the TTL
|
||||
assert_equal [r hsetex myhash KEEPTTL FIELDS 1 f2 v22] 1
|
||||
assert_equal [r hget myhash f2] "v22"
|
||||
assert_not_equal [r httl myhash FIELDS 1 f2] "$T_NO_EXPIRY"
|
||||
|
||||
# Try with multiple fields. First, set fields and TTL
|
||||
r hsetex myhash EX 10000 FIELDS 3 f1 v1 f2 v2 f3 v3
|
||||
|
||||
# Update fields with KEEPTTL flag
|
||||
r hsetex myhash KEEPTTL FIELDS 3 f1 v111 f2 v222 f3 v333
|
||||
|
||||
# Verify values are set, ttls are untouched
|
||||
assert_equal [lsort [r hgetall myhash]] [lsort "f1 v111 f2 v222 f3 v333"]
|
||||
assert_range [r httl myhash FIELDS 1 f1] 9000 10000
|
||||
assert_range [r httl myhash FIELDS 1 f2] 9000 10000
|
||||
assert_range [r httl myhash FIELDS 1 f3] 9000 10000
|
||||
}
|
||||
|
||||
test "HSETEX - Test no expiry flag discards TTL ($type)" {
|
||||
r del myhash
|
||||
|
||||
r hsetex myhash FIELDS 1 f1 v1
|
||||
r hsetex myhash PX 100000 FIELDS 1 f2 v2
|
||||
assert_range [r hpttl myhash FIELDS 1 f2] 1 100000
|
||||
|
||||
assert_equal [r hsetex myhash FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [r httl myhash FIELDS 2 f1 f2] "$T_NO_EXPIRY $T_NO_EXPIRY"
|
||||
}
|
||||
|
||||
test "HSETEX - Test with active expiry" {
|
||||
r del myhash
|
||||
r debug set-active-expire 0
|
||||
|
||||
r hsetex myhash PX 10 FIELDS 5 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
|
||||
r debug set-active-expire 1
|
||||
wait_for_condition 50 20 { [r EXISTS myhash] == 0 } else { fail "'myhash' should be expired" }
|
||||
}
|
||||
|
||||
test "HSETEX - Set time in the past ($type)" {
|
||||
r del myhash
|
||||
|
||||
# Try on an empty key
|
||||
assert_equal [r hsetex myhash EXAT [expr {[clock seconds] - 1}] FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [r hexists myhash field1] 0
|
||||
|
||||
# Try with existing fields
|
||||
r hset myhash fields 2 f1 v1 f2 v2
|
||||
assert_equal [r hsetex myhash EXAT [expr {[clock seconds] - 1}] FIELDS 2 f1 v1 f2 v2] 1
|
||||
assert_equal [r hexists myhash field1] 0
|
||||
}
|
||||
}
|
||||
|
||||
test "Statistics - Hashes with HFEs ($type)" {
|
||||
|
@ -879,6 +1303,13 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
r hdel myhash3 f2
|
||||
assert_match [get_stat_subexpiry r] 2
|
||||
|
||||
# hash4: 2 fields, 1 with TTL. HGETDEL field with TTL. subexpiry decr -1
|
||||
r hset myhash4 f1 v1 f2 v2
|
||||
r hpexpire myhash4 100 FIELDS 1 f2
|
||||
assert_match [get_stat_subexpiry r] 3
|
||||
r hgetdel myhash4 FIELDS 1 f2
|
||||
assert_match [get_stat_subexpiry r] 2
|
||||
|
||||
# Expired fields of hash1 and hash2. subexpiry decr -2
|
||||
wait_for_condition 50 50 {
|
||||
[get_stat_subexpiry r] == 0
|
||||
|
@ -887,6 +1318,21 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
}
|
||||
}
|
||||
|
||||
test "HFE commands against wrong type" {
|
||||
r set wrongtype somevalue
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hexpire wrongtype 10 fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hexpireat wrongtype 10 fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hpexpire wrongtype 10 fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hpexpireat wrongtype 10 fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hexpiretime wrongtype fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hpexpiretime wrongtype fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r httl wrongtype fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hpttl wrongtype fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hpersist wrongtype fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hgetex wrongtype fields 1 f1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hsetex wrongtype fields 1 f1 v1}
|
||||
}
|
||||
|
||||
r config set hash-max-listpack-entries 512
|
||||
}
|
||||
|
||||
|
@ -1048,6 +1494,54 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
fail "Field f2 of hash h2 wasn't deleted"
|
||||
}
|
||||
|
||||
# HSETEX
|
||||
r hsetex h3 FIELDS 1 f1 v1
|
||||
r hsetex h3 FXX FIELDS 1 f1 v11
|
||||
r hsetex h3 FNX FIELDS 1 f2 v22
|
||||
r hsetex h3 KEEPTTL FIELDS 1 f2 v22
|
||||
|
||||
# Next one will fail due to FNX arg and it won't be replicated
|
||||
r hsetex h3 FNX FIELDS 2 f1 v1 f2 v2
|
||||
|
||||
# Commands with EX/PX/PXAT/EXAT will be replicated as PXAT
|
||||
r hsetex h3 EX 10000 FIELDS 1 f1 v111
|
||||
r hsetex h3 PX 10000 FIELDS 1 f1 v111
|
||||
r hsetex h3 PXAT [expr [clock milliseconds]+100000] FIELDS 1 f1 v111
|
||||
r hsetex h3 EXAT [expr [clock seconds]+100000] FIELDS 1 f1 v111
|
||||
|
||||
# Following commands will set and then delete the fields because
|
||||
# of TTL in the past. HDELs will be propagated.
|
||||
r hsetex h3 PX 0 FIELDS 1 f1 v111
|
||||
r hsetex h3 PX 0 FIELDS 3 f1 v2 f2 v2 f3 v3
|
||||
|
||||
# HGETEX
|
||||
r hsetex h4 FIELDS 3 f1 v1 f2 v2 f3 v3
|
||||
# No change on expiry, it won't be replicated.
|
||||
r hgetex h4 FIELDS 1 f1
|
||||
|
||||
# Commands with EX/PX/PXAT/EXAT will be replicated as
|
||||
# HPEXPIREAT command.
|
||||
r hgetex h4 EX 10000 FIELDS 1 f1
|
||||
r hgetex h4 PX 10000 FIELDS 1 f1
|
||||
r hgetex h4 PXAT [expr [clock milliseconds]+100000] FIELDS 1 f1
|
||||
r hgetex h4 EXAT [expr [clock seconds]+100000] FIELDS 1 f1
|
||||
|
||||
# Following commands will delete the fields because of TTL in
|
||||
# the past. HDELs will be propagated.
|
||||
r hgetex h4 PX 0 FIELDS 1 f1
|
||||
# HDELs will be propagated for f2 and f3 as only those exist.
|
||||
r hgetex h4 PX 0 FIELDS 3 f1 f2 f3
|
||||
|
||||
# HGETEX with PERSIST flag will be replicated as HPERSIST
|
||||
r hsetex h4 EX 1000 FIELDS 1 f4 v4
|
||||
r hgetex h4 PERSIST FIELDS 1 f4
|
||||
|
||||
# Nothing will be replicated as f4 is persisted already.
|
||||
r hgetex h4 PERSIST FIELDS 1 f4
|
||||
|
||||
# Replicated as hdel
|
||||
r hgetdel h4 FIELDS 1 f4
|
||||
|
||||
# Assert that each TTL-related command are persisted with absolute timestamps in AOF
|
||||
assert_aof_content $aof {
|
||||
{select *}
|
||||
|
@ -1068,6 +1562,33 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
{hdel h1 f2}
|
||||
{hdel h2 f1}
|
||||
{hdel h2 f2}
|
||||
{hsetex h3 FIELDS 1 f1 v1}
|
||||
{hsetex h3 FXX FIELDS 1 f1 v11}
|
||||
{hsetex h3 FNX FIELDS 1 f2 v22}
|
||||
{hsetex h3 KEEPTTL FIELDS 1 f2 v22}
|
||||
{hsetex h3 PXAT * 1 f1 v111}
|
||||
{hsetex h3 PXAT * 1 f1 v111}
|
||||
{hsetex h3 PXAT * 1 f1 v111}
|
||||
{hsetex h3 PXAT * 1 f1 v111}
|
||||
{hdel h3 f1}
|
||||
{multi}
|
||||
{hdel h3 f1}
|
||||
{hdel h3 f2}
|
||||
{hdel h3 f3}
|
||||
{exec}
|
||||
{hsetex h4 FIELDS 3 f1 v1 f2 v2 f3 v3}
|
||||
{hpexpireat h4 * FIELDS 1 f1}
|
||||
{hpexpireat h4 * FIELDS 1 f1}
|
||||
{hpexpireat h4 * FIELDS 1 f1}
|
||||
{hpexpireat h4 * FIELDS 1 f1}
|
||||
{hdel h4 f1}
|
||||
{multi}
|
||||
{hdel h4 f2}
|
||||
{hdel h4 f3}
|
||||
{exec}
|
||||
{hsetex h4 PXAT * FIELDS 1 f4 v4}
|
||||
{hpersist h4 FIELDS 1 f4}
|
||||
{hdel h4 f4}
|
||||
}
|
||||
}
|
||||
} {} {needs:debug}
|
||||
|
@ -1135,6 +1656,16 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
r hpexpire h2 1 FIELDS 2 f1 f2
|
||||
after 200
|
||||
|
||||
r hsetex h3 EX 100000 FIELDS 2 f1 v1 f2 v2
|
||||
r hsetex h3 EXAT [expr [clock seconds] + 1000] FIELDS 2 f1 v1 f2 v2
|
||||
r hsetex h3 PX 100000 FIELDS 2 f1 v1 f2 v2
|
||||
r hsetex h3 PXAT [expr [clock milliseconds]+100000] FIELDS 2 f1 v1 f2 v2
|
||||
|
||||
r hgetex h3 EX 100000 FIELDS 2 f1 f2
|
||||
r hgetex h3 EXAT [expr [clock seconds] + 1000] FIELDS 2 f1 f2
|
||||
r hgetex h3 PX 100000 FIELDS 2 f1 f2
|
||||
r hgetex h3 PXAT [expr [clock milliseconds]+100000] FIELDS 2 f1 f2
|
||||
|
||||
assert_aof_content $aof {
|
||||
{select *}
|
||||
{hset h1 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5 f6 v6}
|
||||
|
@ -1146,6 +1677,14 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
{hpexpireat h2 * FIELDS 2 f1 f2}
|
||||
{hdel h2 *}
|
||||
{hdel h2 *}
|
||||
{hsetex h3 PXAT * FIELDS 2 f1 v1 f2 v2}
|
||||
{hsetex h3 PXAT * FIELDS 2 f1 v1 f2 v2}
|
||||
{hsetex h3 PXAT * FIELDS 2 f1 v1 f2 v2}
|
||||
{hsetex h3 PXAT * FIELDS 2 f1 v1 f2 v2}
|
||||
{hpexpireat h3 * FIELDS 2 f1 f2}
|
||||
{hpexpireat h3 * FIELDS 2 f1 f2}
|
||||
{hpexpireat h3 * FIELDS 2 f1 f2}
|
||||
{hpexpireat h3 * FIELDS 2 f1 f2}
|
||||
}
|
||||
|
||||
array set keyAndFields1 [dumpAllHashes r]
|
||||
|
@ -1265,6 +1804,23 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
$primary hpexpireat h5 [expr [clock milliseconds]-100000] FIELDS 1 f
|
||||
$primary hset h9 f v
|
||||
|
||||
$primary hsetex h10 EX 100000 FIELDS 1 f v
|
||||
$primary hsetex h11 EXAT [expr [clock seconds] + 1000] FIELDS 1 f v
|
||||
$primary hsetex h12 PX 100000 FIELDS 1 f v
|
||||
$primary hsetex h13 PXAT [expr [clock milliseconds]+100000] FIELDS 1 f v
|
||||
$primary hsetex h14 PXAT 1 FIELDS 1 f v
|
||||
|
||||
$primary hsetex h15 FIELDS 1 f v
|
||||
$primary hgetex h15 EX 100000 FIELDS 1 f
|
||||
$primary hsetex h16 FIELDS 1 f v
|
||||
$primary hgetex h16 EXAT [expr [clock seconds] + 1000] FIELDS 1 f
|
||||
$primary hsetex h17 FIELDS 1 f v
|
||||
$primary hgetex h17 PX 100000 FIELDS 1 f
|
||||
$primary hsetex h18 FIELDS 1 f v
|
||||
$primary hgetex h18 PXAT [expr [clock milliseconds]+100000] FIELDS 1 f
|
||||
$primary hsetex h19 FIELDS 1 f v
|
||||
$primary hgetex h19 PXAT 1 FIELDS 1 f
|
||||
|
||||
# Wait for replica to get the keys and TTLs
|
||||
assert {[$primary wait 1 0] == 1}
|
||||
|
||||
|
@ -1273,5 +1829,102 @@ start_server {tags {"external:skip needs:debug"}} {
|
|||
assert_equal [dumpAllHashes $primary] [dumpAllHashes $replica]
|
||||
}
|
||||
}
|
||||
|
||||
test "Test HSETEX command replication" {
|
||||
r flushall
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
# Create a field and delete it in a single command due to timestamp
|
||||
# being in the past. It will be propagated as HDEL.
|
||||
r hsetex h1 PXAT 1 FIELDS 1 f1 v1
|
||||
|
||||
# Following ones will be propagated with PXAT arg
|
||||
r hsetex h1 EX 100000 FIELDS 1 f1 v1
|
||||
r hsetex h1 EXAT [expr [clock seconds] + 1000] FIELDS 1 f1 v1
|
||||
r hsetex h1 PX 100000 FIELDS 1 f1 v1
|
||||
r hsetex h1 PXAT [expr [clock milliseconds]+100000] FIELDS 1 f1 v1
|
||||
|
||||
# Propagate with KEEPTTL flag
|
||||
r hsetex h1 KEEPTTL FIELDS 1 f1 v1
|
||||
|
||||
# Following commands will fail and won't be propagated
|
||||
r hsetex h1 FNX FIELDS 1 f1 v11
|
||||
r hsetex h1 FXX FIELDS 1 f2 v2
|
||||
|
||||
# Propagate with FNX and FXX flags
|
||||
r hsetex h1 FNX FIELDS 1 f2 v2
|
||||
r hsetex h1 FXX FIELDS 1 f2 v22
|
||||
|
||||
assert_replication_stream $repl {
|
||||
{select *}
|
||||
{hdel h1 f1}
|
||||
{hsetex h1 PXAT * FIELDS 1 f1 v1}
|
||||
{hsetex h1 PXAT * FIELDS 1 f1 v1}
|
||||
{hsetex h1 PXAT * FIELDS 1 f1 v1}
|
||||
{hsetex h1 PXAT * FIELDS 1 f1 v1}
|
||||
{hsetex h1 KEEPTTL FIELDS 1 f1 v1}
|
||||
{hsetex h1 FNX FIELDS 1 f2 v2}
|
||||
{hsetex h1 FXX FIELDS 1 f2 v22}
|
||||
}
|
||||
close_replication_stream $repl
|
||||
} {} {needs:repl}
|
||||
|
||||
test "Test HGETEX command replication" {
|
||||
r flushall
|
||||
r debug set-active-expire 0
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
# If no fields are found, command won't be replicated
|
||||
r hgetex h1 EX 10000 FIELDS 1 f0
|
||||
r hgetex h1 PERSIST FIELDS 1 f0
|
||||
|
||||
# Get without setting expiry will not be replicated
|
||||
r hsetex h1 FIELDS 1 f0 v0
|
||||
r hgetex h1 FIELDS 1 f0
|
||||
|
||||
# Lazy expired field will be replicated as HDEL
|
||||
r hsetex h1 PX 10 FIELDS 1 f1 v1
|
||||
after 15
|
||||
r hgetex h1 EX 1000 FIELDS 1 f1
|
||||
|
||||
# If new TTL is in the past, it will be replicated as HDEL
|
||||
r hsetex h1 EX 10000 FIELDS 1 f2 v2
|
||||
r hgetex h1 EXAT 1 FIELDS 1 f2
|
||||
|
||||
# A field will expire lazily and other field will be deleted due to
|
||||
# TTL is being in the past. It'll be propagated as two HDEL's.
|
||||
r hsetex h1 PX 10 FIELDS 1 f3 v3
|
||||
after 15
|
||||
r hsetex h1 FIELDS 1 f4 v4
|
||||
r hgetex h1 EXAT 1 FIELDS 2 f3 f4
|
||||
|
||||
# TTL update, it will be replicated as HPEXPIREAT
|
||||
r hsetex h1 FIELDS 1 f5 v5
|
||||
r hgetex h1 EX 10000 FIELDS 1 f5
|
||||
|
||||
# If PERSIST flag is used, it will be replicated as HPERSIST
|
||||
r hsetex h1 EX 10000 FIELDS 1 f6 v6
|
||||
r hgetex h1 PERSIST FIELDS 1 f6
|
||||
|
||||
assert_replication_stream $repl {
|
||||
{select *}
|
||||
{hsetex h1 FIELDS 1 f0 v0}
|
||||
{hsetex h1 PXAT * FIELDS 1 f1 v1}
|
||||
{hdel h1 f1}
|
||||
{hsetex h1 PXAT * FIELDS 1 f2 v2}
|
||||
{hdel h1 f2}
|
||||
{hsetex h1 PXAT * FIELDS 1 f3 v3}
|
||||
{hsetex h1 FIELDS 1 f4 v4}
|
||||
{multi}
|
||||
{hdel h1 f3}
|
||||
{hdel h1 f4}
|
||||
{exec}
|
||||
{hsetex h1 FIELDS 1 f5 v5}
|
||||
{hpexpireat h1 * FIELDS 1 f5}
|
||||
{hsetex h1 PXAT * FIELDS 1 f6 v6}
|
||||
{hpersist h1 FIELDS 1 f6}
|
||||
}
|
||||
close_replication_stream $repl
|
||||
} {} {needs:repl}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -371,6 +371,7 @@ start_server {tags {"hash"}} {
|
|||
assert_error "WRONGTYPE Operation against a key*" {r hsetnx wrongtype field1 val1}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hlen wrongtype}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hscan wrongtype 0}
|
||||
assert_error "WRONGTYPE Operation against a key*" {r hgetdel wrongtype fields 1 a}
|
||||
}
|
||||
|
||||
test {HMGET - small hash} {
|
||||
|
@ -710,6 +711,89 @@ start_server {tags {"hash"}} {
|
|||
r config set hash-max-listpack-value $original_max_value
|
||||
}
|
||||
|
||||
test {HGETDEL input validation} {
|
||||
r del key1
|
||||
assert_error "*wrong number of arguments*" {r hgetdel}
|
||||
assert_error "*wrong number of arguments*" {r hgetdel key1}
|
||||
assert_error "*wrong number of arguments*" {r hgetdel key1 FIELDS}
|
||||
assert_error "*wrong number of arguments*" {r hgetdel key1 FIELDS 0}
|
||||
assert_error "*wrong number of arguments*" {r hgetdel key1 FIELDX}
|
||||
assert_error "*argument FIELDS is missing*" {r hgetdel key1 XFIELDX 1 a}
|
||||
assert_error "*numfields*parameter*must match*number of arguments*" {r hgetdel key1 FIELDS 2 a}
|
||||
assert_error "*numfields*parameter*must match*number of arguments*" {r hgetdel key1 FIELDS 2 a b c}
|
||||
assert_error "*Number of fields must be a positive integer*" {r hgetdel key1 FIELDS 0 a}
|
||||
assert_error "*Number of fields must be a positive integer*" {r hgetdel key1 FIELDS -1 a}
|
||||
assert_error "*Number of fields must be a positive integer*" {r hgetdel key1 FIELDS b a}
|
||||
assert_error "*Number of fields must be a positive integer*" {r hgetdel key1 FIELDS 9223372036854775808 a}
|
||||
}
|
||||
|
||||
foreach type {listpack ht} {
|
||||
set orig_config [lindex [r config get hash-max-listpack-entries] 1]
|
||||
r del key1
|
||||
|
||||
if {$type == "listpack"} {
|
||||
r config set hash-max-listpack-entries $orig_config
|
||||
r hset key1 f1 1 f2 2 f3 3 strfield strval
|
||||
assert_encoding listpack key1
|
||||
} else {
|
||||
r config set hash-max-listpack-entries 0
|
||||
r hset key1 f1 1 f2 2 f3 3 strfield strval
|
||||
assert_encoding hashtable key1
|
||||
}
|
||||
|
||||
test {HGETDEL basic test} {
|
||||
r del key1
|
||||
r hset key1 f1 1 f2 2 f3 3 strfield strval
|
||||
assert_equal [r hgetdel key1 fields 1 f2] 2
|
||||
assert_equal [r hlen key1] 3
|
||||
assert_equal [r hget key1 f1] 1
|
||||
assert_equal [r hget key1 f2] ""
|
||||
assert_equal [r hget key1 f3] 3
|
||||
assert_equal [r hget key1 strfield] strval
|
||||
|
||||
assert_equal [r hgetdel key1 fields 1 f1] 1
|
||||
assert_equal [lsort [r hgetall key1]] [lsort "f3 3 strfield strval"]
|
||||
assert_equal [r hgetdel key1 fields 1 f3] 3
|
||||
assert_equal [r hgetdel key1 fields 1 strfield] strval
|
||||
assert_equal [r hgetall key1] ""
|
||||
assert_equal [r exists key1] 0
|
||||
}
|
||||
|
||||
test {HGETDEL test with non existing fields} {
|
||||
r del key1
|
||||
r hset key1 f1 1 f2 2 f3 3
|
||||
assert_equal [r hgetdel key1 fields 4 x1 x2 x3 x4] "{} {} {} {}"
|
||||
assert_equal [r hgetdel key1 fields 4 x1 x2 f3 x4] "{} {} 3 {}"
|
||||
assert_equal [lsort [r hgetall key1]] [lsort "f1 1 f2 2"]
|
||||
assert_equal [r hgetdel key1 fields 3 f1 f2 f3] "1 2 {}"
|
||||
assert_equal [r hgetdel key1 fields 3 f1 f2 f3] "{} {} {}"
|
||||
}
|
||||
|
||||
r config set hash-max-listpack-entries $orig_config
|
||||
}
|
||||
|
||||
test {HGETDEL propagated as HDEL command to replica} {
|
||||
set repl [attach_to_replication_stream]
|
||||
r hset key1 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
|
||||
r hgetdel key1 fields 1 f1
|
||||
r hgetdel key1 fields 2 f2 f3
|
||||
|
||||
# make sure non-existing fields are not replicated
|
||||
r hgetdel key1 fields 2 f7 f8
|
||||
|
||||
# delete more
|
||||
r hgetdel key1 fields 3 f4 f5 f6
|
||||
|
||||
assert_replication_stream $repl {
|
||||
{select *}
|
||||
{hset key1 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5}
|
||||
{hdel key1 f1}
|
||||
{hdel key1 f2 f3}
|
||||
{hdel key1 f4 f5 f6}
|
||||
}
|
||||
close_replication_stream $repl
|
||||
} {} {needs:repl}
|
||||
|
||||
test {Hash ziplist regression test for large keys} {
|
||||
r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a
|
||||
r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b
|
||||
|
|
|
@ -464,12 +464,6 @@ start_server {tags {"string"}} {
|
|||
assert_equal "" [r getrange mykey 5 3]
|
||||
assert_equal " World" [r getrange mykey 5 5000]
|
||||
assert_equal "Hello World" [r getrange mykey -5000 10000]
|
||||
assert_equal "" [r getrange mykey 0 -100]
|
||||
assert_equal "" [r getrange mykey 1 -100]
|
||||
assert_equal "" [r getrange mykey -1 -100]
|
||||
assert_equal "" [r getrange mykey -100 -99]
|
||||
assert_equal "" [r getrange mykey -100 -100]
|
||||
assert_equal "" [r getrange mykey -100 -101]
|
||||
}
|
||||
|
||||
test "GETRANGE against integer-encoded value" {
|
||||
|
@ -480,12 +474,6 @@ start_server {tags {"string"}} {
|
|||
assert_equal "" [r getrange mykey 5 3]
|
||||
assert_equal "4" [r getrange mykey 3 5000]
|
||||
assert_equal "1234" [r getrange mykey -5000 10000]
|
||||
assert_equal "" [r getrange mykey 0 -100]
|
||||
assert_equal "" [r getrange mykey 1 -100]
|
||||
assert_equal "" [r getrange mykey -1 -100]
|
||||
assert_equal "" [r getrange mykey -100 -99]
|
||||
assert_equal "" [r getrange mykey -100 -100]
|
||||
assert_equal "" [r getrange mykey -100 -101]
|
||||
}
|
||||
|
||||
test "GETRANGE fuzzing" {
|
||||
|
|
Loading…
Reference in New Issue