diff --git a/include/pika_client_conn.h b/include/pika_client_conn.h index 6b5dbab419..9008d848c8 100644 --- a/include/pika_client_conn.h +++ b/include/pika_client_conn.h @@ -99,8 +99,7 @@ class PikaClientConn : public net::RedisConn { void AddKeysToWatch(const std::vector& db_keys); void RemoveWatchedKeys(); void SetTxnFailedFromKeys(const std::vector& db_keys); - void SetAllTxnFailed(); - void SetTxnFailedFromDBs(std::string db_name); + void SetTxnFailedIfKeyExists(const std::string target_db_name = ""); void ExitTxn(); bool IsInTxn(); bool IsTxnInitFailed(); diff --git a/pikatests.sh b/pikatests.sh index 9e1df19563..7d3163c40e 100755 --- a/pikatests.sh +++ b/pikatests.sh @@ -45,7 +45,7 @@ function setup_pika_bin { exit 1 fi cp $PIKA_BIN src/redis-server - cp conf/pika.conf tests/assets/default.conf + cp tests/conf/pika.conf tests/assets/default.conf } diff --git a/src/pika_client_conn.cc b/src/pika_client_conn.cc index c5f0a09844..02233968db 100644 --- a/src/pika_client_conn.cc +++ b/src/pika_client_conn.cc @@ -186,28 +186,28 @@ std::shared_ptr PikaClientConn::DoCmd(const PikaCmdArgsType& argv, const st } } - // Process Command - c_ptr->Execute(); - time_stat_->process_done_ts_ = pstd::NowMicros(); - auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); - (*cmdstat_map)[opt].cmd_count.fetch_add(1); - (*cmdstat_map)[opt].cmd_time_consuming.fetch_add(time_stat_->total_time()); - if (c_ptr->res().ok() && c_ptr->is_write() && name() != kCmdNameExec) { if (c_ptr->name() == kCmdNameFlushdb) { auto flushdb = std::dynamic_pointer_cast(c_ptr); - SetTxnFailedFromDBs(flushdb->GetFlushDBname()); + SetTxnFailedIfKeyExists(flushdb->GetFlushDBname()); } else if (c_ptr->name() == kCmdNameFlushall) { - SetAllTxnFailed(); + SetTxnFailedIfKeyExists(); } else { auto table_keys = c_ptr->current_key(); for (auto& key : table_keys) { - key = c_ptr->db_name().append(key); + key = c_ptr->db_name().append("_").append(key); } SetTxnFailedFromKeys(table_keys); } } + // Process Command + c_ptr->Execute(); + time_stat_->process_done_ts_ = pstd::NowMicros(); + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + (*cmdstat_map)[opt].cmd_count.fetch_add(1); + (*cmdstat_map)[opt].cmd_time_consuming.fetch_add(time_stat_->total_time()); + if (g_pika_conf->slowlog_slower_than() >= 0) { ProcessSlowlog(argv, c_ptr->GetDoDuration()); } @@ -386,32 +386,42 @@ void PikaClientConn::SetTxnFailedFromKeys(const std::vector& db_key auto involved_conns = std::vector>{}; involved_conns = dispatcher->GetInvolvedTxn(db_keys); for (auto& conn : involved_conns) { - if (auto c = std::dynamic_pointer_cast(conn); c != nullptr && c.get() != this) { + if (auto c = std::dynamic_pointer_cast(conn); c != nullptr) { c->SetTxnWatchFailState(true); } } } } -void PikaClientConn::SetAllTxnFailed() { +// if key in target_db exists, then the key been watched multi will be failed +void PikaClientConn::SetTxnFailedIfKeyExists(std::string target_db_name) { auto dispatcher = dynamic_cast(server_thread()); - if (dispatcher != nullptr) { - auto involved_conns = dispatcher->GetAllTxns(); - for (auto& conn : involved_conns) { - if (auto c = std::dynamic_pointer_cast(conn); c != nullptr && c.get() != this) { - c->SetTxnWatchFailState(true); - } - } + if (dispatcher == nullptr) { + return; } -} + auto involved_conns = dispatcher->GetAllTxns(); + for (auto& conn : involved_conns) { + std::shared_ptr c; + if (c = std::dynamic_pointer_cast(conn); c == nullptr) { + continue; + } -void PikaClientConn::SetTxnFailedFromDBs(std::string db_name) { - auto dispatcher = dynamic_cast(server_thread()); - if (dispatcher != nullptr) { - auto involved_conns = dispatcher->GetDBTxns(db_name); - for (auto& conn : involved_conns) { - if (auto c = std::dynamic_pointer_cast(conn); c != nullptr && c.get() != this) { - c->SetTxnWatchFailState(true); + for (const auto& db_key : c->watched_db_keys_) { + size_t pos = db_key.find('_'); + if (pos == std::string::npos) { + continue; + } + + auto db_name = db_key.substr(0, pos); + auto key = db_key.substr(pos + 1); + + if (target_db_name == "" || target_db_name == "all" || target_db_name == db_name) { + auto db = g_pika_server->GetDB(db_name); + // if watched key exists, set watch state to failed + if (db->storage()->Exists({key}) > 0) { + c->SetTxnWatchFailState(true); + break; + } } } } diff --git a/src/pika_transaction.cc b/src/pika_transaction.cc index cdf47856df..66ccb6ca98 100644 --- a/src/pika_transaction.cc +++ b/src/pika_transaction.cc @@ -57,14 +57,14 @@ void ExecCmd::Do() { if (cmd->name() == kCmdNameFlushall) { auto flushall = std::dynamic_pointer_cast(cmd); flushall->FlushAllWithoutLock(); - client_conn->SetAllTxnFailed(); + client_conn->SetTxnFailedIfKeyExists(); } else if (cmd->name() == kCmdNameFlushdb) { auto flushdb = std::dynamic_pointer_cast(cmd); flushdb->FlushAllDBsWithoutLock(); if (cmd->res().ok()) { cmd->res().SetRes(CmdRes::kOk); } - client_conn->SetTxnFailedFromDBs(each_cmd_info.db_->GetDBName()); + client_conn->SetTxnFailedIfKeyExists(each_cmd_info.db_->GetDBName()); } else { cmd->Do(); if (cmd->res().ok() && cmd->is_write()) { @@ -258,7 +258,7 @@ void WatchCmd::DoInitial() { size_t pos = 1; while (pos < argv_.size()) { keys_.emplace_back(argv_[pos]); - db_keys_.push_back(db_name() + argv_[pos++]); + db_keys_.push_back(db_name() + "_" + argv_[pos++]); } } diff --git a/tests/assets/default.conf b/tests/assets/default.conf index 468d253e89..1a7b815885 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -34,10 +34,17 @@ slow-cmd-thread-pool-size : 1 # Slow cmd list e.g. hgetall, mset slow-cmd-list : -# The number of sync-thread for data replication from master, those are the threads work on slave nodes -# and are used to execute commands sent from master node when replicating. +# The number of threads to write DB in slaveNode when replicating. +# It's preferable to set slave's sync-thread-num value close to master's thread-pool-size. sync-thread-num : 6 +# The num of threads to write binlog in slaveNode when replicating, +# each DB cloud only bind to one sync-binlog-thread to write binlog in maximum +#[NOTICE] It's highly recommended to set sync-binlog-thread-num equal to conf item 'database'(then each DB cloud have a exclusive thread to write binlog), +# eg. if you use 8 DBs(databases_ is 8), sync-binlog-thread-num is preferable to be 8 +# Valid range of sync-binlog-thread-num is [1, databases], the final value of it is Min(sync-binlog-thread-num, databases) +sync-binlog-thread-num : 1 + # Directory to store log files of Pika, which contains multiple types of logs, # Including: INFO, WARNING, ERROR log, as well as binglog(write2fine) file which # is used for replication. @@ -101,6 +108,8 @@ instance-mode : classic # The default database id is DB 0. You can select a different one on # a per-connection by using SELECT. The db id range is [0, 'databases' value -1]. # The value range of this parameter is [1, 8]. +# [NOTICE] It's RECOMMENDED to set sync-binlog-thread-num equal to DB num(databases), +# if you've changed the value of databases, remember to check if the value of sync-binlog-thread-num is proper. databases : 1 # The number of followers of a master. Only [0, 1, 2, 3, 4] is valid at present. @@ -308,6 +317,11 @@ max-write-buffer-num : 2 # whether the key exists. Setting this value too high may hurt performance. min-write-buffer-number-to-merge : 1 +# The total size of wal files, when reaches this limit, rocksdb will force the flush of column-families +# whose memtables are backed by the oldest live WAL file. Also used to control the rocksdb open time when +# process restart. +max-total-wal-size : 1073741824 + # rocksdb level0_stop_writes_trigger level0-stop-writes-trigger : 36 @@ -466,9 +480,14 @@ default-slot-num : 1024 # The cache will be sharded into 2^blob-num-shard-bits shards. # blob-num-shard-bits : -1 -# Rsync Rate limiting configuration 200MB/s +# Rsync Rate limiting configuration [Default value is 200MB/s] +# [USED BY SLAVE] The transmitting speed(Rsync Rate) In full replication is controlled BY SLAVE NODE, You should modify the throttle-bytes-per-second in slave's pika.conf if you wanna change the rsync rate limit. +# [Dynamic Change Supported] send command 'config set throttle-bytes-per-second new_value' to SLAVE NODE can dynamically adjust rsync rate during full sync(use config rewrite can persist the changes). throttle-bytes-per-second : 207200000 - +# Rsync timeout in full sync stage[Default value is 1000 ms], unnecessary retries will happen if this value is too small. +# [Dynamic Change Supported] similar to throttle-bytes-per-second, rsync-timeout-ms can be dynamically changed by configset command +# [USED BY SLAVE] Similar to throttle-bytes-per-second, you should change rsync-timeout-ms's value in slave's conf file if it is needed to adjust. +rsync-timeout-ms : 1000 # The valid range for max-rsync-parallel-num is [1, 4]. # If an invalid value is provided, max-rsync-parallel-num will automatically be reset to 4. max-rsync-parallel-num : 4 diff --git a/tests/conf/pika.conf b/tests/conf/pika.conf index e3966c4925..2a2d3dbac5 100644 --- a/tests/conf/pika.conf +++ b/tests/conf/pika.conf @@ -7,7 +7,9 @@ # Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221. port : 9221 -db-instance-num : 3 +db-instance-num : 3 +rocksdb-ttl-second : 86400 * 7; +rocksdb-periodic-second : 86400 * 3; # Random value identifying the Pika server, its string length must be 40. # If not set, Pika will generate a random string with a length of 40 random characters. @@ -25,10 +27,24 @@ thread-num : 1 # are dedicated to handling user requests. thread-pool-size : 12 -# The number of sync-thread for data replication from master, those are the threads work on slave nodes -# and are used to execute commands sent from master node when replicating. +# Size of the low level thread pool, The threads within this pool +# are dedicated to handling slow user requests. +slow-cmd-thread-pool-size : 1 + +# Slow cmd list e.g. hgetall, mset +slow-cmd-list : + +# The number of threads to write DB in slaveNode when replicating. +# It's preferable to set slave's sync-thread-num value close to master's thread-pool-size. sync-thread-num : 6 +# The num of threads to write binlog in slaveNode when replicating, +# each DB cloud only bind to one sync-binlog-thread to write binlog in maximum +#[NOTICE] It's highly recommended to set sync-binlog-thread-num equal to conf item 'database'(then each DB cloud have a exclusive thread to write binlog), +# eg. if you use 8 DBs(databases_ is 8), sync-binlog-thread-num is preferable to be 8 +# Valid range of sync-binlog-thread-num is [1, databases], the final value of it is Min(sync-binlog-thread-num, databases) +sync-binlog-thread-num : 1 + # Directory to store log files of Pika, which contains multiple types of logs, # Including: INFO, WARNING, ERROR log, as well as binglog(write2fine) file which # is used for replication. @@ -70,7 +86,7 @@ requirepass : # [NOTICE] The value of this parameter must match the "requirepass" setting on the master. masterauth : -# The [password of user], which is empty by default.(Deprecated) +# The [password of user], which is empty by default. # [NOTICE] If this user password is the same as admin password (including both being empty), # the value of this parameter will be ignored and all users are considered as administrators, # in this scenario, users are not subject to the restrictions imposed by the userblacklist. @@ -92,7 +108,9 @@ instance-mode : classic # The default database id is DB 0. You can select a different one on # a per-connection by using SELECT. The db id range is [0, 'databases' value -1]. # The value range of this parameter is [1, 8]. -databases : 1 +# [NOTICE] It's RECOMMENDED to set sync-binlog-thread-num equal to DB num(databases), +# if you've changed the value of databases, remember to check if the value of sync-binlog-thread-num is proper. +databases : 3 # The number of followers of a master. Only [0, 1, 2, 3, 4] is valid at present. # By default, this num is set to 0, which means this feature is [not enabled] @@ -219,6 +237,11 @@ slave-priority : 100 # [NOTICE]: compact-interval is prior than compact-cron. #compact-interval : +# The disable_auto_compactions option is [true | false] +disable_auto_compactions : false + +# Rocksdb max_subcompactions +max-subcompactions : 1 # The minimum disk usage ratio for checking resume. # If the disk usage ratio is lower than min-check-resume-ratio, it will not check resume, only higher will check resume. # Its default value is 0.7. @@ -269,6 +292,7 @@ max-cache-statistic-keys : 0 # a small compact is triggered automatically if the small compaction feature is enabled. # small-compaction-threshold default value is 5000 and the value range is [1, 100000]. small-compaction-threshold : 5000 +small-compaction-duration-threshold : 10000 # The maximum total size of all live memtables of the RocksDB instance that owned by Pika. # Flushing from memtable to disk will be triggered if the actual memory usage of RocksDB @@ -283,6 +307,30 @@ max-write-buffer-size : 10737418240 # If max-write-buffer-num > 3, writing will be slowed down. max-write-buffer-num : 2 +# `min_write_buffer_number_to_merge` is the minimum number of memtables +# that need to be merged before placing the order. For example, if the +# option is set to 2, immutable memtables will only be flushed if there +# are two of them - a single immutable memtable will never be flushed. +# If multiple memtables are merged together, less data will be written +# to storage because the two updates are merged into a single key. However, +# each Get() must linearly traverse all unmodifiable memtables and check +# whether the key exists. Setting this value too high may hurt performance. +min-write-buffer-number-to-merge : 1 + +# The total size of wal files, when reaches this limit, rocksdb will force the flush of column-families +# whose memtables are backed by the oldest live WAL file. Also used to control the rocksdb open time when +# process restart. +max-total-wal-size : 1073741824 + +# rocksdb level0_stop_writes_trigger +level0-stop-writes-trigger : 36 + +# rocksdb level0_slowdown_writes_trigger +level0-slowdown-writes-trigger : 20 + +# rocksdb level0_file_num_compaction_trigger +level0-file-num-compaction-trigger : 4 + # The maximum size of the response package to client to prevent memory # exhaustion caused by commands like 'keys *' and 'Scan' which can generate huge response. # Supported Units [K|M|G]. The default unit is in [bytes]. @@ -328,6 +376,12 @@ max-bytes-for-level-multiplier : 10 # slotmigrate [yes | no] slotmigrate : no +# slotmigrate thread num +slotmigrate-thread-num : 1 + +# thread-migrate-keys-num 1/8 of the write_buffer_size_ +thread-migrate-keys-num : 64 + # BlockBasedTable block_size, default 4k # block-size: 4096 @@ -346,6 +400,12 @@ slotmigrate : no # The slot number of pika when used with codis. default-slot-num : 1024 +# enable-partitioned-index-filters [yes | no] +# When `cache-index-and-filter-blocks` is enabled, `pin_l0_filter_and_index_blocks_in_cache` +# and `cache-index-and-filter-blocks` is suggested to be enabled +# https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters +# enable-partitioned-index-filters: default no + # whether or not index and filter blocks is stored in block cache # cache-index-and-filter-blocks: no @@ -364,6 +424,10 @@ default-slot-num : 1024 # https://github.com/EighteenZi/rocksdb_wiki/blob/master/Rate-Limiter.md #######################################################################E####### +# rate limiter mode +# 0: Read 1: Write 2: ReadAndWrite +# rate-limiter-mode : default 1 + # rate limiter bandwidth, default 2000MB/s #rate-limiter-bandwidth : 2097152000 @@ -416,8 +480,16 @@ default-slot-num : 1024 # The cache will be sharded into 2^blob-num-shard-bits shards. # blob-num-shard-bits : -1 -# Rsync Rate limiting configuration 200MB/s +# Rsync Rate limiting configuration [Default value is 200MB/s] +# [USED BY SLAVE] The transmitting speed(Rsync Rate) In full replication is controlled BY SLAVE NODE, You should modify the throttle-bytes-per-second in slave's pika.conf if you wanna change the rsync rate limit. +# [Dynamic Change Supported] send command 'config set throttle-bytes-per-second new_value' to SLAVE NODE can dynamically adjust rsync rate during full sync(use config rewrite can persist the changes). throttle-bytes-per-second : 207200000 +# Rsync timeout in full sync stage[Default value is 1000 ms], unnecessary retries will happen if this value is too small. +# [Dynamic Change Supported] similar to throttle-bytes-per-second, rsync-timeout-ms can be dynamically changed by configset command +# [USED BY SLAVE] Similar to throttle-bytes-per-second, you should change rsync-timeout-ms's value in slave's conf file if it is needed to adjust. +rsync-timeout-ms : 1000 +# The valid range for max-rsync-parallel-num is [1, 4]. +# If an invalid value is provided, max-rsync-parallel-num will automatically be reset to 4. max-rsync-parallel-num : 4 # The synchronization mode of Pika primary/secondary replication is determined by ReplicationID. ReplicationID in one replication_cluster are the same @@ -432,7 +504,7 @@ cache-num : 16 # cache-model 0:cache_none 1:cache_read cache-model : 1 # cache-type: string, set, zset, list, hash, bit -cache-type: string, set, zset, list, hash +cache-type: string, set, zset, list, hash, bit # Maximum number of keys in the zset redis cache # On the disk DB, a zset field may have many fields. In the memory cache, we limit the maximum @@ -498,18 +570,19 @@ cache-lfu-decay-time: 1 # # aclfile : ../conf/users.acl +# (experimental) # It is possible to change the name of dangerous commands in a shared environment. # For instance the CONFIG command may be renamed into something Warning: To prevent # data inconsistency caused by different configuration files, do not use the rename # command to modify write commands on the primary and secondary servers. If necessary, # ensure that the configuration files of the primary and secondary servers are consistent # In addition, when using the command rename, you must not use "" to modify the command, -# for example, rename-command: FLUSHALL "360flushall" is incorrect; instead, use -# rename-command: FLUSHALL 360flushall is correct. After the rename command is executed, +# for example, rename-command: FLUSHDB "360flushdb" is incorrect; instead, use +# rename-command: FLUSHDB 360flushdb is correct. After the rename command is executed, # it is most appropriate to use a numeric string with uppercase or lowercase letters -# for example: rename-command : FLUSHALL joYAPNXRPmcarcR4ZDgC81TbdkSmLAzRPmcarcR +# for example: rename-command : FLUSHDB joYAPNXRPmcarcR4ZDgC81TbdkSmLAzRPmcarcR +# Warning: Currently only applies to flushdb, slaveof, bgsave, shutdown, config command +# Warning: Ensure that the Settings of rename-command on the master and slave servers are consistent # # Example: -# -# rename-command : FLUSHALL 360flushall -# rename-command : FLUSHDB 360flushdb \ No newline at end of file +# rename-command : FLUSHDB 360flushdb diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 6f363654c4..2dc7499837 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -15,7 +15,6 @@ set ::all_tests { unit/printver unit/basic unit/scan - unit/multi unit/quit unit/pubsub unit/slowlog @@ -32,6 +31,7 @@ set ::all_tests { unit/type/zset unit/type/string unit/type/hash + unit/multi unit/type/stream # unit/expire # unit/protocol @@ -79,7 +79,7 @@ set ::force_failure 0 set ::timeout 600; # 10 minutes without progresses will quit the test. set ::last_progress [clock seconds] set ::active_servers {} ; # Pids of active Redis instances. - +set ::tls 0 # Set to 1 when we are running in client mode. The Redis test uses a # server-client model to run tests simultaneously. The server instance # runs the specified number of client instances that will actually run tests. @@ -179,6 +179,26 @@ proc cleanup {} { if {!$::quiet} {puts "OK"} } +proc redis_client {args} { + set level 0 + if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set args [lrange $args 1 end] + } + + # create client that won't defers reading reply + set client [redis [srv $level "host"] [srv $level "port"] 0 $::tls] + + # select the right db and read the response (OK), or at least ping + # the server if we're in a singledb mode. + if {$::singledb} { + $client ping + } else { + $client select 9 + } + return $client +} + proc test_server_main {} { cleanup set tclsh [info nameofexecutable] diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 9e7102f708..5ebd1cbfca 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -1,5 +1,15 @@ +proc wait_for_dbsize {size} { + set r2 [redis_client] + wait_for_condition 50 100 { + [$r2 dbsize] == $size + } else { + fail "Target dbsize not reached" + } + $r2 close +} + start_server {tags {"multi"}} { - test {MUTLI / EXEC basics} { + test {MULTI / EXEC basics} { r del mylist r rpush mylist a r rpush mylist b @@ -47,83 +57,144 @@ start_server {tags {"multi"}} { } {*ERR WATCH*} test {EXEC fails if there are errors while queueing commands #1} { - r del foo1 foo2 + r del foo1{t} foo2{t} r multi - r set foo1 bar1 + r set foo1{t} bar1 catch {r non-existing-command} - r set foo2 bar2 + r set foo2{t} bar2 catch {r exec} e assert_match {EXECABORT*} $e - list [r exists foo1] [r exists foo2] + list [r exists foo1{t}] [r exists foo2{t}] } {0 0} -# This parameter is not available in Pika -# test {EXEC fails if there are errors while queueing commands #2} { -# set rd [redis_deferring_client] -# r del foo1 foo2 -# r multi -# r set foo1 bar1 -# $rd config set maxmemory 1 -# assert {[$rd read] eq {OK}} -# catch {r lpush mylist myvalue} -# $rd config set maxmemory 0 -# assert {[$rd read] eq {OK}} -# r set foo2 bar2 -# catch {r exec} e -# assert_match {EXECABORT*} $e -# $rd close -# list [r exists foo1] [r exists foo2] -# } {0 0} + # Pika not support parameter maxmemory + # test {EXEC fails if there are errors while queueing commands #2} { + # set rd [redis_deferring_client] + # r del foo1{t} foo2{t} + # r multi + # r set foo1{t} bar1 + # $rd config set maxmemory 1 + # assert {[$rd read] eq {OK}} + # catch {r lpush mylist{t} myvalue} + # $rd config set maxmemory 0 + # assert {[$rd read] eq {OK}} + # r set foo2{t} bar2 + # catch {r exec} e + # assert_match {EXECABORT*} $e + # $rd close + # list [r exists foo1{t}] [r exists foo2{t}] + # } {0 0} {needs:config-maxmemory} test {If EXEC aborts, the client MULTI state is cleared} { - r del foo1 foo2 + r del foo1{t} foo2{t} r multi - r set foo1 bar1 + r set foo1{t} bar1 catch {r non-existing-command} - r set foo2 bar2 + r set foo2{t} bar2 catch {r exec} e assert_match {EXECABORT*} $e r ping } {PONG} test {EXEC works on WATCHed key not modified} { - r watch x y z - r watch k + r watch x{t} y{t} z{t} + r watch k{t} r multi r ping r exec } {PONG} -# The return value of Pika is inconsistent with Redis -# test {EXEC fail on WATCHed key modified (1 key of 1 watched)} { -# r set x 30 -# r watch x -# r set x 40 -# r multi -# r ping -# r exec -# } {} - -# The return value of Pika is inconsistent with Redis -# test {EXEC fail on WATCHed key modified (1 key of 5 watched)} { -# r set x 30 -# r watch a b x k z -# r set x 40 -# r multi -# r ping -# r exec -# } {} - -# The return value of Pika is inconsistent with Redis -# test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} { -# r flushdb -# r lpush foo barsync" -# r watch foo -# r sort emptylist store foo -# r multi -# r ping -# r exec -# } {} + test {EXEC fail on WATCHed key modified (1 key of 1 watched)} { + r set x 30 + r watch x + r set x 40 + r multi + r ping + r exec + } {} + + test {EXEC fail on WATCHed key modified (1 key of 5 watched)} { + r set x{t} 30 + r watch a{t} b{t} x{t} k{t} z{t} + r set x{t} 40 + r multi + r ping + r exec + } {} + + # Pika does not support the sort command + # test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} { + # r flushdb + # r lpush foo bar + # r watch foo + # r sort emptylist store foo + # r multi + # r ping + # r exec + # } {} + + # Pika does not support the debug command + # test {EXEC fail on lazy expired WATCHed key} { + # r del key + # r debug set-active-expire 0 + + # for {set j 0} {$j < 10} {incr j} { + # r set key 1 px 100 + # r watch key + # after 101 + # r multi + # r incr key + + # set res [r exec] + # if {$res eq {}} break + # } + # if {$::verbose} { puts "EXEC fail on lazy expired WATCHed key attempts: $j" } + + # r debug set-active-expire 1 + # set _ $res + # } {} {needs:debug} + + # Pika does not support the debug command + # test {WATCH stale keys should not fail EXEC} { + # r del x + # r debug set-active-expire 0 + # r set x foo px 1 + # after 2 + # r watch x + # r multi + # r ping + # assert_equal {PONG} [r exec] + # r debug set-active-expire 1 + # } {OK} {needs:debug} + + # Pika does not support the debug command + # test {Delete WATCHed stale keys should not fail EXEC} { + # r del x + # r debug set-active-expire 0 + # r set x foo px 1 + # after 2 + # r watch x + # # EXISTS triggers lazy expiry/deletion + # assert_equal 0 [r exists x] + # r multi + # r ping + # assert_equal {PONG} [r exec] + # r debug set-active-expire 1 + # } {OK} {needs:debug} + + # Pika does not support the debug command + # test {FLUSHDB while watching stale keys should not fail EXEC} { + # r del x + # r debug set-active-expire 0 + # r set x foo px 1 + # after 2 + # r watch x + # r flushdb + # r multi + # r ping + # assert_equal {PONG} [r exec] + # r debug set-active-expire 1 + # } {OK} {needs:debug} test {After successful EXEC key is no longer watched} { r set x 30 @@ -164,15 +235,14 @@ start_server {tags {"multi"}} { r unwatch } {OK} -# The return value of Pika is inconsistent with Redis -# test {FLUSHALL is able to touch the watched keys} { -# r set x 30 -# r watch x -# r flushall -# r multi -# r ping -# r exec -# } {} + test {FLUSHALL is able to touch the watched keys} { + r set x 30 + r watch x + r flushall + r multi + r ping + r exec + } {} test {FLUSHALL does not touch non affected keys} { r del x @@ -183,15 +253,14 @@ start_server {tags {"multi"}} { r exec } {PONG} -# The return value of Pika is inconsistent with Redis -# test {FLUSHDB is able to touch the watched keys} { -# r set x 30 -# r watch x -# r flushdb -# r multi -# r ping -# r exec -# } {} + test {FLUSHDB is able to touch the watched keys} { + r set x 30 + r watch x + r flushdb + r multi + r ping + r exec + } {} test {FLUSHDB does not touch non affected keys} { r del x @@ -202,43 +271,119 @@ start_server {tags {"multi"}} { r exec } {PONG} -# The return value of Pika is inconsistent with Redis -# test {WATCH is able to remember the DB a key belongs to} { -# r select 5 -# r set x 30 -# r watch x -# r select 1 -# r set x 10 -# r select 5 -# r multi -# r ping -# set res [r exec] -# # Restore original DB -# r select 9 -# set res -# } {PONG} - -# The return value of Pika is inconsistent with Redis -# test {WATCH will consider touched keys target of EXPIRE} { -# r del x -# r set x foo -# r watch x -# r expire x 10 -# r multi -# r ping -# r exec -# } {} - - test {WATCH will not consider touched expired keys} { + # # Pika does not support the swapdb command + # test {SWAPDB is able to touch the watched keys that exist} { + # r flushall + # r select 0 + # r set x 30 + # r watch x ;# make sure x (set to 30) doesn't change (SWAPDB will "delete" it) + # r swapdb 0 1 + # r multi + # r ping + # r exec + # } {} {singledb:skip} + + # # Pika does not support the swapdb command + # test {SWAPDB is able to touch the watched keys that do not exist} { + # r flushall + # r select 1 + # r set x 30 + # r select 0 + # r watch x ;# make sure the key x (currently missing) doesn't change (SWAPDB will create it) + # r swapdb 0 1 + # r multi + # r ping + # r exec + # } {} {singledb:skip} + + # # Pika does not support the swapdb command + # test {SWAPDB does not touch watched stale keys} { + # r flushall + # r select 1 + # r debug set-active-expire 0 + # r set x foo px 1 + # after 2 + # r watch x + # r swapdb 0 1 ; # expired key replaced with no key => no change + # r multi + # r ping + # assert_equal {PONG} [r exec] + # r debug set-active-expire 1 + # } {OK} {singledb:skip needs:debug} + + # # Pika does not support the swapdb command + # test {SWAPDB does not touch non-existing key replaced with stale key} { + # r flushall + # r select 0 + # r debug set-active-expire 0 + # r set x foo px 1 + # after 2 + # r select 1 + # r watch x + # r swapdb 0 1 ; # no key replaced with expired key => no change + # r multi + # r ping + # assert_equal {PONG} [r exec] + # r debug set-active-expire 1 + # } {OK} {singledb:skip needs:debug} + + # Pika does not support the swapdb command + # test {SWAPDB does not touch stale key replaced with another stale key} { + # r flushall + # r debug set-active-expire 0 + # r select 1 + # r set x foo px 1 + # r select 0 + # r set x bar px 1 + # after 2 + # r select 1 + # r watch x + # r swapdb 0 1 ; # no key replaced with expired key => no change + # r multi + # r ping + # assert_equal {PONG} [r exec] + # r debug set-active-expire 1 + # } {OK} {singledb:skip needs:debug} + + test {WATCH is able to remember the DB a key belongs to} { + r select 0 + r set x 30 + r watch x + r select 1 + r set x 10 + r select 0 + r multi + r ping + set res [r exec] + r select 2 + set res + } {PONG} + + test {WATCH will consider touched keys target of EXPIRE} { r del x r set x foo - r expire x 1 r watch x - after 1100 + r expire x 10 r multi r ping r exec - } {PONG} + } {} + + # wait_for_dbsize command not support + # test {WATCH will consider touched expired keys} { + # r flushall + # r del x + # r set x foo + # r expire x 1 + # r watch x + + # # Wait for the keys to expire. + # wait_for_dbsize 0 + + # r multi + # r ping + # r exec + # } {} test {DISCARD should clear the WATCH dirty flag on the client} { r watch x @@ -261,61 +406,559 @@ start_server {tags {"multi"}} { r exec } {11} -# Pika does not support the sync command -# test {MULTI / EXEC is propagated correctly (single write command)} { -# set repl [attach_to_replication_stream] -# r multi -# r set foo bar -# r exec -# assert_replication_stream $repl { -# {select *} -# {multi} -# {set foo bar} -# {exec} -# } -# close_replication_stream $repl -# } - -# Pika does not support the sync command -# test {MULTI / EXEC is propagated correctly (empty transaction)} { -# set repl [attach_to_replication_stream] -# r multi -# r exec -# r set foo bar -# assert_replication_stream $repl { -# {select *} -# {set foo bar} -# } -# close_replication_stream $repl -# } - -# Pika does not support the sync command -# test {MULTI / EXEC is propagated correctly (read-only commands)} { -# r set foo value1 -# set repl [attach_to_replication_stream] -# r multi -# r get foo -# r exec -# r set foo value2 -# assert_replication_stream $repl { -# {select *} -# {set foo value2} -# } -# close_replication_stream $repl -# } - -# Pika does not support the sync command -# test {MULTI / EXEC is propagated correctly (write command, no effect)} { -# r del bar foo bar -# set repl [attach_to_replication_stream] -# r multi -# r del foo -# r exec -# assert_replication_stream $repl { -# {select *} -# {multi} -# {exec} -# } -# close_replication_stream $repl -# } + # Pika does not support the sync command + # test {MULTI / EXEC is not propagated (single write command)} { + # set repl [attach_to_replication_stream] + # r multi + # r set foo bar + # r exec + # r set foo2 bar + # assert_replication_stream $repl { + # {select *} + # {set foo bar} + # {set foo2 bar} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the sync command + # test {MULTI / EXEC is propagated correctly (multiple commands)} { + # set repl [attach_to_replication_stream] + # r multi + # r set foo{t} bar + # r get foo{t} + # r set foo2{t} bar2 + # r get foo2{t} + # r set foo3{t} bar3 + # r get foo3{t} + # r exec + + # assert_replication_stream $repl { + # {multi} + # {select *} + # {set foo{t} bar} + # {set foo2{t} bar2} + # {set foo3{t} bar3} + # {exec} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the sync command + # test {MULTI / EXEC is propagated correctly (multiple commands with SELECT)} { + # set repl [attach_to_replication_stream] + # r multi + # r select 1 + # r set foo{t} bar + # r get foo{t} + # r select 2 + # r set foo2{t} bar2 + # r get foo2{t} + # r select 3 + # r set foo3{t} bar3 + # r get foo3{t} + # r exec + + # assert_replication_stream $repl { + # {multi} + # {select *} + # {set foo{t} bar} + # {select *} + # {set foo2{t} bar2} + # {select *} + # {set foo3{t} bar3} + # {exec} + # } + # close_replication_stream $repl + # } {} {needs:repl singledb:skip} + + # Pika does not support the sync command + # test {MULTI / EXEC is propagated correctly (empty transaction)} { + # set repl [attach_to_replication_stream] + # r multi + # r exec + # r set foo bar + # assert_replication_stream $repl { + # {select *} + # {set foo bar} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the sync command + # test {MULTI / EXEC is propagated correctly (read-only commands)} { + # r set foo value1 + # set repl [attach_to_replication_stream] + # r multi + # r get foo + # r exec + # r set foo value2 + # assert_replication_stream $repl { + # {select *} + # {set foo value2} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the sync command + # test {MULTI / EXEC is propagated correctly (write command, no effect)} { + # r del bar + # r del foo + # set repl [attach_to_replication_stream] + # r multi + # r del foo + # r exec + + # # add another command so that when we see it we know multi-exec wasn't + # # propagated + # r incr foo + + # assert_replication_stream $repl { + # {select *} + # {incr foo} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the sync command + # test {MULTI / EXEC with REPLICAOF} { + # # This test verifies that if we demote a master to replica inside a transaction, the + # # entire transaction is not propagated to the already-connected replica + # set repl [attach_to_replication_stream] + # r set foo bar + # r multi + # r set foo2 bar + # r replicaof localhost 9999 + # r set foo3 bar + # r exec + # catch {r set foo4 bar} e + # assert_match {READONLY*} $e + # assert_replication_stream $repl { + # {select *} + # {set foo bar} + # } + # r replicaof no one + # } {OK} {needs:repl cluster:skip} + + # Pika does not support the "config set maxmemory" command + # test {DISCARD should not fail during OOM} { + # set rd [redis_deferring_client] + # $rd config set maxmemory 1 + # assert {[$rd read] eq {OK}} + # r multi + # catch {r set x 1} e + # assert_match {OOM*} $e + # r discard + # $rd config set maxmemory 0 + # assert {[$rd read] eq {OK}} + # $rd close + # r ping + # } {PONG} {needs:config-maxmemory} + + # Pika does not support the "config set lua-time-limit" command + # test {MULTI and script timeout} { + # # check that if MULTI arrives during timeout, it is either refused, or + # # allowed to pass, and we don't end up executing half of the transaction + # set rd1 [redis_deferring_client] + # set r2 [redis_client] + # r config set lua-time-limit 10 + # r set xx 1 + # $rd1 eval {while true do end} 0 + # after 200 + # catch { $r2 multi; } e + # catch { $r2 incr xx; } e + # r script kill + # after 200 ; # Give some time to Lua to call the hook again... + # catch { $r2 incr xx; } e + # catch { $r2 exec; } e + # assert_match {EXECABORT*previous errors*} $e + # set xx [r get xx] + # # make sure that either the whole transcation passed or none of it (we actually expect none) + # assert { $xx == 1 || $xx == 3} + # # check that the connection is no longer in multi state + # set pong [$r2 ping asdf] + # assert_equal $pong "asdf" + # $rd1 close; $r2 close + # } + + # Pika does not support the "config set lua-time-limit" command + # test {EXEC and script timeout} { + # # check that if EXEC arrives during timeout, we don't end up executing + # # half of the transaction, and also that we exit the multi state + # set rd1 [redis_deferring_client] + # set r2 [redis_client] + # r config set lua-time-limit 10 + # r set xx 1 + # catch { $r2 multi; } e + # catch { $r2 incr xx; } e + # $rd1 eval {while true do end} 0 + # after 200 + # catch { $r2 incr xx; } e + # catch { $r2 exec; } e + # assert_match {EXECABORT*BUSY*} $e + # r script kill + # after 200 ; # Give some time to Lua to call the hook again... + # set xx [r get xx] + # # make sure that either the whole transcation passed or none of it (we actually expect none) + # assert { $xx == 1 || $xx == 3} + # # check that the connection is no longer in multi state + # set pong [$r2 ping asdf] + # assert_equal $pong "asdf" + # $rd1 close; $r2 close + # } + + # Pika does not support the "config set lua-time-limit" command + # test {MULTI-EXEC body and script timeout} { + # # check that we don't run an incomplete transaction due to some commands + # # arriving during busy script + # set rd1 [redis_deferring_client] + # set r2 [redis_client] + # r config set lua-time-limit 10 + # r set xx 1 + # catch { $r2 multi; } e + # catch { $r2 incr xx; } e + # $rd1 eval {while true do end} 0 + # after 200 + # catch { $r2 incr xx; } e + # r script kill + # after 200 ; # Give some time to Lua to call the hook again... + # catch { $r2 exec; } e + # assert_match {EXECABORT*previous errors*} $e + # set xx [r get xx] + # # make sure that either the whole transcation passed or none of it (we actually expect none) + # assert { $xx == 1 || $xx == 3} + # # check that the connection is no longer in multi state + # set pong [$r2 ping asdf] + # assert_equal $pong "asdf" + # $rd1 close; $r2 close + # } + + # Pika does not support the "config set lua-time-limit" command + # test {just EXEC and script timeout} { + # # check that if EXEC arrives during timeout, we don't end up executing + # # actual commands during busy script, and also that we exit the multi state + # set rd1 [redis_deferring_client] + # set r2 [redis_client] + # r config set lua-time-limit 10 + # r set xx 1 + # catch { $r2 multi; } e + # catch { $r2 incr xx; } e + # $rd1 eval {while true do end} 0 + # after 200 + # catch { $r2 exec; } e + # assert_match {EXECABORT*BUSY*} $e + # r script kill + # after 200 ; # Give some time to Lua to call the hook again... + # set xx [r get xx] + # # make we didn't execute the transaction + # assert { $xx == 1} + # # check that the connection is no longer in multi state + # set pong [$r2 ping asdf] + # assert_equal $pong "asdf" + # $rd1 close; $r2 close + # } + + # Pika does not support the "config set min-replicas-to-write" command + # test {exec with write commands and state change} { + # # check that exec that contains write commands fails if server state changed since they were queued + # set r1 [redis_client] + # r set xx 1 + # r multi + # r incr xx + # $r1 config set min-replicas-to-write 2 + # catch {r exec} e + # assert_match {*EXECABORT*NOREPLICAS*} $e + # set xx [r get xx] + # # make sure that the INCR wasn't executed + # assert { $xx == 1} + # $r1 config set min-replicas-to-write 0 + # $r1 close + # } {0} {needs:repl} + + # Pika does not support the "config set replica-serve-stale-data" command + # test {exec with read commands and stale replica state change} { + # # check that exec that contains read commands fails if server state changed since they were queued + # r config set replica-serve-stale-data no + # set r1 [redis_client] + # r set xx 1 + + # # check that GET and PING are disallowed on stale replica, even if the replica becomes stale only after queuing. + # r multi + # r get xx + # $r1 replicaof localhsot 0 + # catch {r exec} e + # assert_match {*EXECABORT*MASTERDOWN*} $e + + # # reset + # $r1 replicaof no one + + # r multi + # r ping + # $r1 replicaof localhsot 0 + # catch {r exec} e + # assert_match {*EXECABORT*MASTERDOWN*} $e + + # # check that when replica is not stale, GET is allowed + # # while we're at it, let's check that multi is allowed on stale replica too + # r multi + # $r1 replicaof no one + # r get xx + # set xx [r exec] + # # make sure that the INCR was executed + # assert { $xx == 1 } + # $r1 close + # } {0} {needs:repl cluster:skip} + + # Pika does not support the "config set maxmemory" command + # test {EXEC with only read commands should not be rejected when OOM} { + # set r2 [redis_client] + + # r set x value + # r multi + # r get x + # r ping + + # # enforcing OOM + # $r2 config set maxmemory 1 + + # # finish the multi transaction with exec + # assert { [r exec] == {value PONG} } + + # # releasing OOM + # $r2 config set maxmemory 0 + # $r2 close + # } {0} {needs:config-maxmemory} + + # Pika does not support the "config set maxmemory" command + # test {EXEC with at least one use-memory command should fail} { + # set r2 [redis_client] + + # r multi + # r set x 1 + # r get x + + # # enforcing OOM + # $r2 config set maxmemory 1 + + # # finish the multi transaction with exec + # catch {r exec} e + # assert_match {EXECABORT*OOM*} $e + + # # releasing OOM + # $r2 config set maxmemory 0 + # $r2 close + # } {0} {needs:config-maxmemory} + + # Pika does not support the xgroup command + # test {Blocking commands ignores the timeout} { + # r xgroup create s{t} g $ MKSTREAM + + # set m [r multi] + # r blpop empty_list{t} 0 + # r brpop empty_list{t} 0 + # r brpoplpush empty_list1{t} empty_list2{t} 0 + # r blmove empty_list1{t} empty_list2{t} LEFT LEFT 0 + # r bzpopmin empty_zset{t} 0 + # r bzpopmax empty_zset{t} 0 + # r xread BLOCK 0 STREAMS s{t} $ + # r xreadgroup group g c BLOCK 0 STREAMS s{t} > + # set res [r exec] + + # list $m $res + # } {OK {{} {} {} {} {} {} {} {}}} + + # Pika does not support the SYNC command + # test {MULTI propagation of PUBLISH} { + # set repl [attach_to_replication_stream] + + # r multi + # r publish bla bla + # r exec + + # assert_replication_stream $repl { + # {select *} + # {publish bla bla} + # } + # close_replication_stream $repl + # } {} {needs:repl cluster:skip} + + # Pika does not support the SYNC command + # test {MULTI propagation of SCRIPT LOAD} { + # set repl [attach_to_replication_stream] + + # # make sure that SCRIPT LOAD inside MULTI isn't propagated + # r multi + # r script load {redis.call('set', KEYS[1], 'foo')} + # r set foo bar + # set res [r exec] + # set sha [lindex $res 0] + + # assert_replication_stream $repl { + # {select *} + # {set foo bar} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the SYNC command + # test {MULTI propagation of EVAL} { + # set repl [attach_to_replication_stream] + + # # make sure that EVAL inside MULTI is propagated in a transaction in effects + # r multi + # r eval {redis.call('set', KEYS[1], 'bar')} 1 bar + # r exec + + # assert_replication_stream $repl { + # {select *} + # {set bar bar} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the SYNC command + # test {MULTI propagation of SCRIPT FLUSH} { + # set repl [attach_to_replication_stream] + + # # make sure that SCRIPT FLUSH isn't propagated + # r multi + # r script flush + # r set foo bar + # r exec + + # assert_replication_stream $repl { + # {select *} + # {set foo bar} + # } + # close_replication_stream $repl + # } {} {needs:repl} + + # Pika does not support the SYNC command + # tags {"stream"} { + # test {MULTI propagation of XREADGROUP} { + # set repl [attach_to_replication_stream] + + # r XADD mystream * foo bar + # r XADD mystream * foo2 bar2 + # r XADD mystream * foo3 bar3 + # r XGROUP CREATE mystream mygroup 0 + + # # make sure the XCALIM (propagated by XREADGROUP) is indeed inside MULTI/EXEC + # r multi + # r XREADGROUP GROUP mygroup consumer1 COUNT 2 STREAMS mystream ">" + # r XREADGROUP GROUP mygroup consumer1 STREAMS mystream ">" + # r exec + + # assert_replication_stream $repl { + # {select *} + # {xadd *} + # {xadd *} + # {xadd *} + # {xgroup CREATE *} + # {multi} + # {xclaim *} + # {xclaim *} + # {xgroup SETID * ENTRIESREAD *} + # {xclaim *} + # {xgroup SETID * ENTRIESREAD *} + # {exec} + # } + # close_replication_stream $repl + # } {} {needs:repl} + # } + + # Pika does not support the SAVE command + foreach {cmd} {SAVE SHUTDOWN} { + # The return value of Pika is inconsistent with Redis + # test "MULTI with $cmd" { + # r del foo + # r multi + # r set foo bar + # catch {r $cmd} e1 + # catch {r exec} e2 + # assert_match {*Command not allowed inside a transaction*} $e1 + # assert_match {EXECABORT*} $e2 + # r get foo + # } {} + } + + # Pika does not support the BGREWRITEAOF command + # test "MULTI with BGREWRITEAOF" { + # set forks [s total_forks] + # r multi + # r set foo bar + # r BGREWRITEAOF + # set res [r exec] + # assert_match "*rewriting scheduled*" [lindex $res 1] + # wait_for_condition 50 100 { + # [s total_forks] > $forks + # } else { + # fail "aofrw didn't start" + # } + # waitForBgrewriteaof r + # } {} {external:skip} + + # Pika does not support the "config set appendonly" command + # test "MULTI with config set appendonly" { + # set lines [count_log_lines 0] + # set forks [s total_forks] + # r multi + # r set foo bar + # r config set appendonly yes + # r exec + # verify_log_message 0 "*AOF background was scheduled*" $lines + # wait_for_condition 50 100 { + # [s total_forks] > $forks + # } else { + # fail "aofrw didn't start" + # } + # waitForBgrewriteaof r + # } {} {external:skip} + + # Pika does not support the "config set maxmemory" command + # test "MULTI with config error" { + # r multi + # r set foo bar + # r config set maxmemory bla + + # # letting the redis parser read it, it'll throw an exception instead of + # # reply with an array that contains an error, so we switch to reading + # # raw RESP instead + # r readraw 1 + + # set res [r exec] + # assert_equal $res "*2" + # set res [r read] + # assert_equal $res "+OK" + # set res [r read] + # r readraw 0 + # set _ $res + # } {*CONFIG SET failed*} + + test "Flushall while watching several keys by one client" { + r flushall + r mset a{t} a b{t} b + r watch b{t} a{t} + r flushall + r ping + } } + +# Pika does not support AOF +# start_server {overrides {appendonly {yes} appendfilename {appendonly.aof} appendfsync always} tags {external:skip}} { +# test {MULTI with FLUSHALL and AOF} { +# set aof [get_last_incr_aof_path r] +# r multi +# r set foo bar +# r flushall +# r exec +# assert_aof_content $aof { +# {multi} +# {select *} +# {set *} +# {flushall} +# {exec} +# } +# r get foo +# } {} +# }