diff --git a/src/commands.def b/src/commands.def index 93deaf04b..ef42fb8da 100644 --- a/src/commands.def +++ b/src/commands.def @@ -7735,41 +7735,6 @@ struct COMMAND_ARG RESTORE_ASKING_Args[] = { #define SAVE_Keyspecs NULL #endif -/********** SFLUSH ********************/ - -#ifndef SKIP_CMD_HISTORY_TABLE -/* SFLUSH history */ -#define SFLUSH_History NULL -#endif - -#ifndef SKIP_CMD_TIPS_TABLE -/* SFLUSH tips */ -#define SFLUSH_Tips NULL -#endif - -#ifndef SKIP_CMD_KEY_SPECS_TABLE -/* SFLUSH key specs */ -#define SFLUSH_Keyspecs NULL -#endif - -/* SFLUSH data argument table */ -struct COMMAND_ARG SFLUSH_data_Subargs[] = { -{MAKE_ARG("slot-start",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("slot-last",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -}; - -/* SFLUSH flush_type argument table */ -struct COMMAND_ARG SFLUSH_flush_type_Subargs[] = { -{MAKE_ARG("async",ARG_TYPE_PURE_TOKEN,-1,"ASYNC",NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("sync",ARG_TYPE_PURE_TOKEN,-1,"SYNC",NULL,NULL,CMD_ARG_NONE,0,NULL)}, -}; - -/* SFLUSH argument table */ -struct COMMAND_ARG SFLUSH_Args[] = { -{MAKE_ARG("data",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,2,NULL),.subargs=SFLUSH_data_Subargs}, -{MAKE_ARG("flush-type",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=SFLUSH_flush_type_Subargs}, -}; - /********** SHUTDOWN ********************/ #ifndef SKIP_CMD_HISTORY_TABLE @@ -11165,7 +11130,6 @@ struct COMMAND_STRUCT redisCommandTable[] = { {MAKE_CMD("restore-asking","An internal command for migrating keys in a cluster.","O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).","3.0.0",CMD_DOC_SYSCMD,NULL,NULL,"server",COMMAND_GROUP_SERVER,RESTORE_ASKING_History,3,RESTORE_ASKING_Tips,0,restoreCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_ASKING,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,RESTORE_ASKING_Keyspecs,1,NULL,7),.args=RESTORE_ASKING_Args}, {MAKE_CMD("role","Returns the replication role.","O(1)","2.8.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,ROLE_History,0,ROLE_Tips,0,roleCommand,1,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_FAST|CMD_SENTINEL,ACL_CATEGORY_ADMIN|ACL_CATEGORY_DANGEROUS,ROLE_Keyspecs,0,NULL,0)}, {MAKE_CMD("save","Synchronously saves the database(s) to disk.","O(N) where N is the total number of keys in all databases","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SAVE_History,0,SAVE_Tips,0,saveCommand,1,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_NO_MULTI,0,SAVE_Keyspecs,0,NULL,0)}, -{MAKE_CMD("sflush","Remove all keys from selected range of slots.","O(N)+O(k) where N is the number of keys and k is the number of slots.","8.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SFLUSH_History,0,SFLUSH_Tips,0,sflushCommand,-3,CMD_WRITE,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,SFLUSH_Keyspecs,0,NULL,2),.args=SFLUSH_Args}, {MAKE_CMD("shutdown","Synchronously saves the database(s) to disk and shuts down the Redis server.","O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SHUTDOWN_History,1,SHUTDOWN_Tips,0,shutdownCommand,-1,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_NO_MULTI|CMD_SENTINEL|CMD_ALLOW_BUSY,0,SHUTDOWN_Keyspecs,0,NULL,4),.args=SHUTDOWN_Args}, {MAKE_CMD("slaveof","Sets a Redis server as a replica of another, or promotes it to being a master.","O(1)","1.0.0",CMD_DOC_DEPRECATED,"`REPLICAOF`","5.0.0","server",COMMAND_GROUP_SERVER,SLAVEOF_History,0,SLAVEOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,SLAVEOF_Keyspecs,0,NULL,1),.args=SLAVEOF_Args}, {MAKE_CMD("slowlog","A container for slow log commands.","Depends on subcommand.","2.2.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SLOWLOG_History,0,SLOWLOG_Tips,0,NULL,-2,0,0,SLOWLOG_Keyspecs,0,NULL,0),.subcommands=SLOWLOG_Subcommands}, diff --git a/src/commands/sflush.json b/src/commands/sflush.json index b076e33d9..bac27ad2d 100644 --- a/src/commands/sflush.json +++ b/src/commands/sflush.json @@ -7,7 +7,8 @@ "arity": -3, "function": "sflushCommand", "command_flags": [ - "WRITE" + "WRITE", + "EXPERIMENTAL" ], "acl_categories": [ "KEYSPACE", diff --git a/tests/cluster/tests/19-cluster-nodes-slots.tcl b/tests/cluster/tests/19-cluster-nodes-slots.tcl index 9780322af..77faec912 100644 --- a/tests/cluster/tests/19-cluster-nodes-slots.tcl +++ b/tests/cluster/tests/19-cluster-nodes-slots.tcl @@ -13,72 +13,6 @@ test "Cluster should start ok" { set master1 [Rn 0] set master2 [Rn 1] -test "SFLUSH - Errors and output validation" { - assert_match "* 0-8191*" [$master1 CLUSTER NODES] - assert_match "* 8192-16383*" [$master2 CLUSTER NODES] - assert_match "*0 8191*" [$master1 CLUSTER SLOTS] - assert_match "*8192 16383*" [$master2 CLUSTER SLOTS] - - # make master1 non-continuous slots - $master1 cluster DELSLOTSRANGE 1000 2000 - - # Test SFLUSH errors validation - assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4} - assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4 SYNC} - assert_error {ERR Invalid or out of range slot} {$master1 SFLUSH x 4} - assert_error {ERR Invalid or out of range slot} {$master1 SFLUSH 0 12x} - assert_error {ERR Slot 3 specified multiple times} {$master1 SFLUSH 2 4 3 5} - assert_error {ERR start slot number 8 is greater than*} {$master1 SFLUSH 8 4} - assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4 8 10} - assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 0 999 2001 8191 ASYNCX} - - # Test SFLUSH output validation - assert_match "" [$master1 SFLUSH 2 4] - assert_match "" [$master1 SFLUSH 0 4] - assert_match "" [$master2 SFLUSH 0 4] - assert_match "" [$master1 SFLUSH 1 8191] - assert_match "" [$master1 SFLUSH 0 8190] - assert_match "" [$master1 SFLUSH 0 998 2001 8191] - assert_match "" [$master1 SFLUSH 1 999 2001 8191] - assert_match "" [$master1 SFLUSH 0 999 2001 8190] - assert_match "" [$master1 SFLUSH 0 999 2002 8191] - assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 999 2001 8191] - assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 8191] - assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 4000 4001 8191] - assert_match "" [$master2 SFLUSH 8193 16383] - assert_match "" [$master2 SFLUSH 8192 16382] - assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383] - assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383 SYNC] - assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383 ASYNC] - assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383] - assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383 SYNC] - assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383 ASYNC] - - # restore master1 continuous slots - $master1 cluster ADDSLOTSRANGE 1000 2000 -} - -test "SFLUSH - Deletes the keys with argument /SYNC/ASYNC" { - foreach op {"" "SYNC" "ASYNC"} { - for {set i 0} {$i < 100} {incr i} { - catch {$master1 SET key$i val$i} - catch {$master2 SET key$i val$i} - } - - assert {[$master1 DBSIZE] > 0} - assert {[$master2 DBSIZE] > 0} - if {$op eq ""} { - assert_match "{0 8191}" [ $master1 SFLUSH 0 8191] - } else { - assert_match "{0 8191}" [ $master1 SFLUSH 0 8191 $op] - } - assert {[$master1 DBSIZE] == 0} - assert {[$master2 DBSIZE] > 0} - assert_match "{8192 16383}" [ $master2 SFLUSH 8192 16383] - assert {[$master2 DBSIZE] == 0} - } -} - test "Continuous slots distribution" { assert_match "* 0-8191*" [$master1 CLUSTER NODES] assert_match "* 8192-16383*" [$master2 CLUSTER NODES] diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 43b74d061..0db72cbfe 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -241,6 +241,11 @@ proc tags_acceptable {tags err_return} { return 0 } + if { [lsearch $tags "experimental"] >=0 && [lsearch $::allowtags "experimental"] == -1 } { + set err "experimental test not allowed" + return 0 + } + return 1 } diff --git a/tests/unit/cluster/multi-slot-operations.tcl b/tests/unit/cluster/multi-slot-operations.tcl index cc7bb7ae0..5d2d03e85 100644 --- a/tests/unit/cluster/multi-slot-operations.tcl +++ b/tests/unit/cluster/multi-slot-operations.tcl @@ -107,3 +107,76 @@ test "DELSLOTSRANGE command with several boundary conditions test suite" { assert_match "*9829 11000*12001 12100*12201 13104*" [$master4 CLUSTER SLOTS] } } cluster_allocate_with_continuous_slots_local + +start_cluster 2 0 {tags {external:skip cluster experimental}} { + +set master1 [srv 0 "client"] +set master2 [srv -1 "client"] + +test "SFLUSH - Errors and output validation" { + assert_match "* 0-8191*" [$master1 CLUSTER NODES] + assert_match "* 8192-16383*" [$master2 CLUSTER NODES] + assert_match "*0 8191*" [$master1 CLUSTER SLOTS] + assert_match "*8192 16383*" [$master2 CLUSTER SLOTS] + + # make master1 non-continuous slots + $master1 cluster DELSLOTSRANGE 1000 2000 + + # Test SFLUSH errors validation + assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4} + assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4 SYNC} + assert_error {ERR Invalid or out of range slot} {$master1 SFLUSH x 4} + assert_error {ERR Invalid or out of range slot} {$master1 SFLUSH 0 12x} + assert_error {ERR Slot 3 specified multiple times} {$master1 SFLUSH 2 4 3 5} + assert_error {ERR start slot number 8 is greater than*} {$master1 SFLUSH 8 4} + assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 4 8 10} + assert_error {ERR wrong number of arguments*} {$master1 SFLUSH 0 999 2001 8191 ASYNCX} + + # Test SFLUSH output validation + assert_match "" [$master1 SFLUSH 2 4] + assert_match "" [$master1 SFLUSH 0 4] + assert_match "" [$master2 SFLUSH 0 4] + assert_match "" [$master1 SFLUSH 1 8191] + assert_match "" [$master1 SFLUSH 0 8190] + assert_match "" [$master1 SFLUSH 0 998 2001 8191] + assert_match "" [$master1 SFLUSH 1 999 2001 8191] + assert_match "" [$master1 SFLUSH 0 999 2001 8190] + assert_match "" [$master1 SFLUSH 0 999 2002 8191] + assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 999 2001 8191] + assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 8191] + assert_match "{0 999} {2001 8191}" [$master1 SFLUSH 0 4000 4001 8191] + assert_match "" [$master2 SFLUSH 8193 16383] + assert_match "" [$master2 SFLUSH 8192 16382] + assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383] + assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383 SYNC] + assert_match "{8192 16383}" [$master2 SFLUSH 8192 16383 ASYNC] + assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383] + assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383 SYNC] + assert_match "{8192 16383}" [$master2 SFLUSH 8192 9000 9001 16383 ASYNC] + + # restore master1 continuous slots + $master1 cluster ADDSLOTSRANGE 1000 2000 +} + +test "SFLUSH - Deletes the keys with argument /SYNC/ASYNC" { + foreach op {"" "SYNC" "ASYNC"} { + for {set i 0} {$i < 100} {incr i} { + catch {$master1 SET key$i val$i} + catch {$master2 SET key$i val$i} + } + + assert {[$master1 DBSIZE] > 0} + assert {[$master2 DBSIZE] > 0} + if {$op eq ""} { + assert_match "{0 8191}" [ $master1 SFLUSH 0 8191] + } else { + assert_match "{0 8191}" [ $master1 SFLUSH 0 8191 $op] + } + assert {[$master1 DBSIZE] == 0} + assert {[$master2 DBSIZE] > 0} + assert_match "{8192 16383}" [ $master2 SFLUSH 8192 16383] + assert {[$master2 DBSIZE] == 0} + } +} + +} diff --git a/utils/generate-command-code.py b/utils/generate-command-code.py index 2d7cc5b0d..76c8c3b15 100755 --- a/utils/generate-command-code.py +++ b/utils/generate-command-code.py @@ -517,6 +517,11 @@ class Subcommand(Command): def create_command(name, desc): + flags = desc.get("command_flags") + if flags and "EXPERIMENTAL" in flags: + print("Command %s is experimental, skipping..." % name) + return + if desc.get("container"): cmd = Subcommand(name.upper(), desc) subcommands.setdefault(desc["container"].upper(), {})[name] = cmd