From 807a1c48dd689767dfb287ba2eedbc6ad503c4d6 Mon Sep 17 00:00:00 2001 From: QlQl <2458371920@qq.com> Date: Fri, 24 May 2024 00:17:55 +0800 Subject: [PATCH 01/25] one worker thread, one list --- src/net/include/thread_pool.h | 29 +++++++---- src/net/src/thread_pool.cc | 91 +++++++++++++++++++++-------------- 2 files changed, 76 insertions(+), 44 deletions(-) diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index bd2e6d8609..8909a1c071 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -9,11 +9,12 @@ #include #include #include +#include #include "net/include/net_define.h" #include "net/include/random.h" #include "pstd/include/pstd_mutex.h" - + namespace net { using TaskFunc = void (*)(void*); @@ -30,7 +31,13 @@ class ThreadPool : public pstd::noncopyable { public: class Worker { public: - explicit Worker(ThreadPool* tp) : start_(false), thread_pool_(tp){}; + struct Arg { + Arg(void* p, int i) : arg(p), idx(i) {} + void* arg; + int idx; + }; + + explicit Worker(ThreadPool* tp, int idx = 0) : start_(false), thread_pool_(tp), idx_(idx), arg_(tp, idx){}; static void* WorkerMain(void* arg); int start(); @@ -41,6 +48,8 @@ class ThreadPool : public pstd::noncopyable { std::atomic start_; ThreadPool* const thread_pool_; std::string worker_name_; + int idx_; + Arg arg_; }; explicit ThreadPool(size_t worker_num, size_t max_queue_size, std::string thread_pool_name = "ThreadPool"); @@ -60,7 +69,7 @@ class ThreadPool : public pstd::noncopyable { std::string thread_pool_name(); private: - void runInThread(); + void runInThread(const int idx = 0); public: struct AdaptationContext { @@ -96,12 +105,14 @@ class ThreadPool : public pstd::noncopyable { // it's okay for other platforms to be no-ops } - Node* CreateMissingNewerLinks(Node* head); + Node* CreateMissingNewerLinks(Node* head, int* cnt); bool LinkOne(Node* node, std::atomic* newest_node); - std::atomic newest_node_; + int task_idx_; +std::vector> asd; + std::vector> newest_node_; std::atomic node_cnt_; // for task - std::atomic time_newest_node_; + std::vector> time_newest_node_; std::atomic time_node_cnt_; // for time task const int queue_slow_size_; // default value: min(worker_num_ * 10, max_queue_size_) @@ -112,14 +123,14 @@ class ThreadPool : public pstd::noncopyable { AdaptationContext adp_ctx; - size_t worker_num_; + const size_t worker_num_; std::string thread_pool_name_; std::vector workers_; std::atomic running_; std::atomic should_stop_; - pstd::Mutex mu_; - pstd::CondVar rsignal_; + std::vector mu_; + std::vector rsignal_; }; } // namespace net diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index 0a55ff9e0a..a1f615c831 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -14,15 +14,16 @@ namespace net { -void* ThreadPool::Worker::WorkerMain(void* arg) { - auto tp = static_cast(arg); - tp->runInThread(); +void* ThreadPool::Worker::WorkerMain(void* p) { + auto arg = static_cast(p); + auto tp = static_cast(arg->arg); + tp->runInThread(arg->idx); return nullptr; } int ThreadPool::Worker::start() { if (!start_.load()) { - if (pthread_create(&thread_id_, nullptr, &WorkerMain, thread_pool_) != 0) { + if (pthread_create(&thread_id_, nullptr, &WorkerMain, &arg_) != 0) { return -1; } else { start_.store(true); @@ -44,9 +45,9 @@ int ThreadPool::Worker::stop() { } ThreadPool::ThreadPool(size_t worker_num, size_t max_queue_size, std::string thread_pool_name) - : newest_node_(nullptr), + : newest_node_(worker_num), node_cnt_(0), - time_newest_node_(nullptr), + time_newest_node_(worker_num), time_node_cnt_(0), queue_slow_size_(std::min(worker_num * 10, max_queue_size)), max_queue_size_(max_queue_size), @@ -56,7 +57,14 @@ ThreadPool::ThreadPool(size_t worker_num, size_t max_queue_size, std::string thr worker_num_(worker_num), thread_pool_name_(std::move(thread_pool_name)), running_(false), - should_stop_(false) {} + should_stop_(false), + mu_(worker_num), + rsignal_(worker_num) { + for (size_t i = 0; i < worker_num_; ++i) { + newest_node_[i] = nullptr; + time_newest_node_[i] = nullptr; + } +} ThreadPool::~ThreadPool() { stop_thread_pool(); } @@ -64,7 +72,7 @@ int ThreadPool::start_thread_pool() { if (!running_.load()) { should_stop_.store(false); for (size_t i = 0; i < worker_num_; ++i) { - workers_.push_back(new Worker(this)); + workers_.push_back(new Worker(this, i)); int res = workers_[i]->start(); if (res != 0) { return kCreateThreadError; @@ -79,7 +87,9 @@ int ThreadPool::stop_thread_pool() { int res = 0; if (running_.load()) { should_stop_.store(true); - rsignal_.notify_all(); + for (auto& r : rsignal_) { + r.notify_all(); + } for (const auto worker : workers_) { res = worker->stop(); if (res != 0) { @@ -107,12 +117,13 @@ void ThreadPool::Schedule(TaskFunc func, void* arg) { if (node_cnt_.load(std::memory_order_relaxed) >= queue_slow_size_) { std::this_thread::yield(); } - // std::unique_lock lock(mu_); + if (LIKELY(!should_stop())) { auto node = new Node(func, arg); - LinkOne(node, &newest_node_); + auto idx = ++task_idx_ % worker_num_; + LinkOne(node, &newest_node_[idx]); node_cnt_++; - rsignal_.notify_one(); + rsignal_[idx].notify_one(); } } @@ -124,12 +135,12 @@ void ThreadPool::DelaySchedule(uint64_t timeout, TaskFunc func, void* arg) { uint64_t unow = std::chrono::duration_cast(now.time_since_epoch()).count(); uint64_t exec_time = unow + timeout * 1000; - // std::unique_lock lock(mu_); if (LIKELY(!should_stop())) { + auto idx = ++task_idx_ % worker_num_; auto node = new Node(exec_time, func, arg); - LinkOne(node, &time_newest_node_); + LinkOne(node, &newest_node_[idx]); time_node_cnt_++; - rsignal_.notify_all(); + rsignal_[idx].notify_all(); } } @@ -143,15 +154,20 @@ void ThreadPool::cur_time_queue_size(size_t* qsize) { *qsize = time_node_cnt_.lo std::string ThreadPool::thread_pool_name() { return thread_pool_name_; } -void ThreadPool::runInThread() { +void ThreadPool::runInThread(const int idx) { Node* tmp = nullptr; Node* last = nullptr; Node* time_last = nullptr; + auto& newest_node = newest_node_[idx]; + auto& time_newest_node = time_newest_node_[idx]; + auto& mu = mu_[idx]; + auto& rsignal = rsignal_[idx]; + while (LIKELY(!should_stop())) { - std::unique_lock lock(mu_); - rsignal_.wait(lock, [this]() { - return newest_node_.load(std::memory_order_relaxed) != nullptr || - UNLIKELY(time_newest_node_.load(std::memory_order_relaxed) != nullptr) || UNLIKELY(should_stop()); + std::unique_lock lock(mu); + rsignal.wait(lock, [this, &newest_node, &time_newest_node]() { + return newest_node.load(std::memory_order_relaxed) != nullptr || + UNLIKELY(time_newest_node.load(std::memory_order_relaxed) != nullptr) || UNLIKELY(should_stop()); }); lock.unlock(); @@ -160,26 +176,26 @@ void ThreadPool::runInThread() { break; } - last = newest_node_.exchange(nullptr); - time_last = time_newest_node_.exchange(nullptr); + last = newest_node.exchange(nullptr); + time_last = time_newest_node.exchange(nullptr); if (last == nullptr && LIKELY(time_last == nullptr)) { // 1. loop for short time for (uint32_t tries = 0; tries < 200; ++tries) { - if (newest_node_.load(std::memory_order_acquire) != nullptr) { - last = newest_node_.exchange(nullptr); + if (newest_node.load(std::memory_order_acquire) != nullptr) { + last = newest_node.exchange(nullptr); if (last != nullptr) { goto exec; } } - if (UNLIKELY(time_newest_node_.load(std::memory_order_acquire) != nullptr)) { - time_last = time_newest_node_.exchange(nullptr); + if (UNLIKELY(time_newest_node.load(std::memory_order_acquire) != nullptr)) { + time_last = time_newest_node.exchange(nullptr); if (time_last != nullptr) { goto exec; } } AsmVolatilePause(); } - + // 2. loop for a little short time again const size_t kMaxSlowYieldsWhileSpinning = 3; auto& yield_credit = adp_ctx.value; @@ -198,16 +214,16 @@ void ThreadPool::runInThread() { while ((iter_begin - spin_begin) <= std::chrono::microseconds(max_yield_usec_)) { std::this_thread::yield(); - if (newest_node_.load(std::memory_order_acquire) != nullptr) { - last = newest_node_.exchange(nullptr); + if (newest_node.load(std::memory_order_acquire) != nullptr) { + last = newest_node.exchange(nullptr); if (last != nullptr) { would_spin_again = true; // success break; } } - if (UNLIKELY(time_newest_node_.load(std::memory_order_acquire) != nullptr)) { - time_last = time_newest_node_.exchange(nullptr); + if (UNLIKELY(time_newest_node.load(std::memory_order_acquire) != nullptr)) { + time_last = time_newest_node.exchange(nullptr); if (time_last != nullptr) { would_spin_again = true; // success @@ -243,7 +259,9 @@ void ThreadPool::runInThread() { exec: // do all normal tasks older than this task pointed last if (LIKELY(last != nullptr)) { - auto first = CreateMissingNewerLinks(last); + int cnt = 1; + auto first = CreateMissingNewerLinks(last, &cnt); + // node_cnt_ -= cnt; assert(!first->is_time_task); do { first->Exec(); @@ -256,7 +274,8 @@ void ThreadPool::runInThread() { // do all time tasks older than this task pointed time_last if (UNLIKELY(time_last != nullptr)) { - auto time_first = CreateMissingNewerLinks(time_last); + int cnt = 1; + auto time_first = CreateMissingNewerLinks(time_last, &cnt); do { // time task may block normal task auto now = std::chrono::system_clock::now(); @@ -268,7 +287,7 @@ void ThreadPool::runInThread() { time_first->Exec(); } else { lock.lock(); - rsignal_.wait_for(lock, std::chrono::microseconds(exec_time - unow)); + rsignal.wait_for(lock, std::chrono::microseconds(exec_time - unow)); lock.unlock(); time_first->Exec(); } @@ -282,14 +301,16 @@ void ThreadPool::runInThread() { } } -ThreadPool::Node* ThreadPool::CreateMissingNewerLinks(Node* head) { +ThreadPool::Node* ThreadPool::CreateMissingNewerLinks(Node* head, int* cnt) { assert(head != nullptr); + assert(cnt != nullptr && *cnt == 1); Node* next = nullptr; while (true) { next = head->link_older; if (next == nullptr) { return head; } + ++(*cnt); next->link_newer = head; head = next; } From 38eb16deaa77351d58667d263e1d523166257034 Mon Sep 17 00:00:00 2001 From: baerwang Date: Fri, 7 Jun 2024 20:32:39 +0800 Subject: [PATCH 02/25] ci: Accelerated compilation (#2706) --- .github/workflows/pika.yml | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/.github/workflows/pika.yml b/.github/workflows/pika.yml index b00701bebe..a2edf0b1e1 100644 --- a/.github/workflows/pika.yml +++ b/.github/workflows/pika.yml @@ -26,8 +26,12 @@ jobs: with: go-version: 1.19 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.13 + with: + key: ubuntu-latest + - name: Install Deps - if: ${{ steps.cache.output.cache-hit != 'true' }} run: | sudo apt-get install -y autoconf libprotobuf-dev protobuf-compiler sudo apt-get install -y clang-tidy-12 @@ -35,16 +39,7 @@ jobs: - name: Configure CMake # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make. # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type - run: cmake -B build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DUSE_PIKA_TOOLS=ON -DCMAKE_CXX_FLAGS_DEBUG=-fsanitize=address - - - name: Cache Build - uses: actions/cache@v3 - id: cache-ubuntu - with: - key: ${{ runner.os }}-build-ubuntu-${{ hashFiles('**/CMakeLists.txt') }} - path: | - ${{ github.workspace }}/buildtrees - ${{ github.workspace }}/deps + run: cmake -B build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DUSE_PIKA_TOOLS=ON -DCMAKE_CXX_FLAGS_DEBUG=-fsanitize=address -D CMAKE_C_COMPILER_LAUNCHER=ccache -D CMAKE_CXX_COMPILER_LAUNCHER=ccache - name: Build # Build your program with the given configuration @@ -166,25 +161,21 @@ jobs: with: go-version: 1.19 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.13 + with: + key: macos-12 + - name: Install Deps run: | brew update brew install --overwrite python@3.12 autoconf protobuf llvm wget git brew install gcc@10 automake cmake make binutils + - name: Configure CMake run: | export CC=/usr/local/opt/gcc@10/bin/gcc-10 - cmake -B build -DCMAKE_C_COMPILER=/usr/local/opt/gcc@10/bin/gcc-10 -DUSE_PIKA_TOOLS=ON -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DCMAKE_CXX_FLAGS_DEBUG=-fsanitize=address - - - uses: actions/cache@v3 - with: - path: ${{ github.workspace }}/deps - key: ${{ runner.os }}-deps-${{ hashFiles('**/CMakeLists.txt') }} - - - uses: actions/cache@v3 - with: - path: ${{ github.workspace }}/buildtrees - key: ${{ runner.os }}-buildtrees-${{ hashFiles('**/CMakeLists.txt') }} + cmake -B build -DCMAKE_C_COMPILER=/usr/local/opt/gcc@10/bin/gcc-10 -DUSE_PIKA_TOOLS=ON -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DCMAKE_CXX_FLAGS_DEBUG=-fsanitize=address -D CMAKE_C_COMPILER_LAUNCHER=ccache -D CMAKE_CXX_COMPILER_LAUNCHER=ccache - name: Build run: | From 6f93095e26fceaea8823db8adebabb9e09633df6 Mon Sep 17 00:00:00 2001 From: baerwang Date: Tue, 11 Jun 2024 14:29:17 +0800 Subject: [PATCH 03/25] ci: PR title regex restriction Chinese (#2718) --- .github/pr-title-checker-config.json | 2 +- .github/workflows/pr-title-checker.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/pr-title-checker-config.json b/.github/pr-title-checker-config.json index 0e5a4d829b..e04b246137 100644 --- a/.github/pr-title-checker-config.json +++ b/.github/pr-title-checker-config.json @@ -4,7 +4,7 @@ "color": "B60205" }, "CHECKS": { - "regexp": "^(feat|fix|test|refactor|chore|upgrade|style|docs|perf|build|ci|revert)(\\(.*\\))?:.*", + "regexp": "^(feat|fix|test|refactor|chore|upgrade|style|docs|perf|build|ci|revert)(\\(.*\\))?:[^\u4e00-\u9fa5]+$", "ignoreLabels": [ "ignore-title" ] diff --git a/.github/workflows/pr-title-checker.yaml b/.github/workflows/pr-title-checker.yaml index 34b8709d8b..89839f97d6 100644 --- a/.github/workflows/pr-title-checker.yaml +++ b/.github/workflows/pr-title-checker.yaml @@ -12,7 +12,7 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: thehanimo/pr-title-checker@v1.4.1 + - uses: thehanimo/pr-title-checker@v1.4.2 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} pass_on_octokit_error: false From 41655266d93fbe59505ea630941c1425426e5efd Mon Sep 17 00:00:00 2001 From: JayLiu <38887641+luky116@users.noreply.github.com> Date: Wed, 12 Jun 2024 16:54:04 +0800 Subject: [PATCH 04/25] fix: fix acl bug (#2714) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: modify cmdId assignment time to assgining after registering cmdtable (#2692) * change cmdId assignment time to assign after intializing cmdtable * 修改 PikaCmdTableManager中 getCmdId,删除pika_command.cc中多余的头文件引用 * fix acl bug --------- Co-authored-by: Kaijie Gu <2459548460@qq.com> Co-authored-by: liuyuecai --- src/acl.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/acl.cc b/src/acl.cc index bf0f4758f2..bd6862aa81 100644 --- a/src/acl.cc +++ b/src/acl.cc @@ -489,15 +489,14 @@ void Acl::InitLimitUser(const std::string& bl, bool limit_exist) { auto u = GetUser(DefaultLimitUser); if (limit_exist) { if (!bl.empty()) { - u->SetUser("+@all"); for(auto& cmd : blacklist) { cmd = pstd::StringTrim(cmd, " "); u->SetUser("-" + cmd); } u->SetUser("on"); - if (!pass.empty()) { - u->SetUser(">"+pass); - } + } + if (!pass.empty()) { + u->SetUser(">"+pass); } } else { if (pass.empty()) { From ea56da0334f7eed411417ad8fb7d9073be759c14 Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Wed, 12 Jun 2024 17:05:22 +0800 Subject: [PATCH 05/25] fix: fix pkpatternmatchdel bug (#2717) * fix pkpatternmatchdel error * support load max-total-wal-size from conf file --- conf/pika.conf | 5 +++++ src/pika_admin.cc | 1 + src/pika_command.cc | 2 +- src/pika_conf.cc | 6 ++++++ src/storage/src/redis_strings.cc | 2 +- 5 files changed, 14 insertions(+), 2 deletions(-) diff --git a/conf/pika.conf b/conf/pika.conf index a99c30b30d..1a7b815885 100644 --- a/conf/pika.conf +++ b/conf/pika.conf @@ -317,6 +317,11 @@ max-write-buffer-num : 2 # whether the key exists. Setting this value too high may hurt performance. min-write-buffer-number-to-merge : 1 +# The total size of wal files, when reaches this limit, rocksdb will force the flush of column-families +# whose memtables are backed by the oldest live WAL file. Also used to control the rocksdb open time when +# process restart. +max-total-wal-size : 1073741824 + # rocksdb level0_stop_writes_trigger level0-stop-writes-trigger : 36 diff --git a/src/pika_admin.cc b/src/pika_admin.cc index 2996d3a005..d374b6faf5 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -3043,6 +3043,7 @@ void PKPatternMatchDelCmd::DoInitial() { pattern_ = argv_[1]; } +//TODO: may lead to inconsistent between rediscache and db, because currently it only cleans db void PKPatternMatchDelCmd::Do() { int ret = 0; rocksdb::Status s = db_->storage()->PKPatternMatchDel(type_, pattern_, &ret); diff --git a/src/pika_command.cc b/src/pika_command.cc index 7a934aebe6..81c23c2533 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -130,7 +130,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNamePadding, std::move(paddingptr))); std::unique_ptr pkpatternmatchdelptr = - std::make_unique(kCmdNamePKPatternMatchDel, 3, kCmdFlagsWrite | kCmdFlagsAdmin); + std::make_unique(kCmdNamePKPatternMatchDel, 2, kCmdFlagsWrite | kCmdFlagsAdmin); cmd_table->insert( std::pair>(kCmdNamePKPatternMatchDel, std::move(pkpatternmatchdelptr))); std::unique_ptr dummyptr = std::make_unique(kCmdDummy, 0, kCmdFlagsWrite); diff --git a/src/pika_conf.cc b/src/pika_conf.cc index 2b4152e0f1..c88e77478b 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -339,6 +339,12 @@ int PikaConf::Load() { max_write_buffer_size_ = PIKA_CACHE_SIZE_DEFAULT; // 10Gb } + // max-total-wal-size + GetConfInt64("max-total-wal-size", &max_total_wal_size_); + if (max_total_wal_size_ < 0) { + max_total_wal_size_ = 0; + } + // rate-limiter-mode rate_limiter_mode_ = 1; GetConfInt("rate-limiter-mode", &rate_limiter_mode_); diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index dd476aa687..471320ae55 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -1597,7 +1597,7 @@ rocksdb::Status Redis::PKPatternMatchDel(const std::string& pattern, int32_t* re meta_value = iter->value().ToString(); ParsedStringsValue parsed_strings_value(&meta_value); if (!parsed_strings_value.IsStale() && - (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { batch.Delete(key); } } else if (meta_type == DataType::kLists) { From 80a3486d31a74c019ed4c6d57bcf464162d9fdb0 Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Wed, 12 Jun 2024 17:12:08 +0800 Subject: [PATCH 06/25] fix: keyspace error for stream type (#2705) * fix keyspace error for stream type * fix dbsize bug * fix keyscaninfo init error --------- Co-authored-by: wangshaoyi --- include/pika_db.h | 4 ++-- src/pika_admin.cc | 11 ++++++++--- src/pika_db.cc | 13 ++----------- src/storage/src/redis.cc | 2 +- src/storage/src/storage.cc | 2 +- 5 files changed, 14 insertions(+), 18 deletions(-) diff --git a/include/pika_db.h b/include/pika_db.h index bcaf3f8b16..c3d4fce211 100644 --- a/include/pika_db.h +++ b/include/pika_db.h @@ -24,11 +24,11 @@ struct KeyScanInfo { time_t start_time = 0; std::string s_start_time; int32_t duration = -3; - std::vector key_infos; // the order is strings, hashes, lists, zsets, sets + std::vector key_infos; // the order is strings, hashes, lists, zsets, sets, streams bool key_scaning_ = false; KeyScanInfo() : s_start_time("0"), - key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}) + key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}) {} }; diff --git a/src/pika_admin.cc b/src/pika_admin.cc index d374b6faf5..18b5e89873 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1201,7 +1201,7 @@ void InfoCmd::InfoKeyspace(std::string& info) { key_scan_info = db_item.second->GetKeyScanInfo(); key_infos = key_scan_info.key_infos; duration = key_scan_info.duration; - if (key_infos.size() != 5) { + if (key_infos.size() != (size_t)(storage::DataType::kNones)) { info.append("info keyspace error\r\n"); return; } @@ -1227,6 +1227,8 @@ void InfoCmd::InfoKeyspace(std::string& info) { << ", invalid_keys=" << key_infos[3].invaild_keys << "\r\n"; tmp_stream << db_name << " Sets_keys=" << key_infos[4].keys << ", expires=" << key_infos[4].expires << ", invalid_keys=" << key_infos[4].invaild_keys << "\r\n\r\n"; + tmp_stream << db_name << " Streams_keys=" << key_infos[5].keys << ", expires=" << key_infos[5].expires + << ", invalid_keys=" << key_infos[5].invaild_keys << "\r\n\r\n"; } } info.append(tmp_stream.str()); @@ -2840,11 +2842,14 @@ void DbsizeCmd::Do() { } KeyScanInfo key_scan_info = dbs->GetKeyScanInfo(); std::vector key_infos = key_scan_info.key_infos; - if (key_infos.size() != 5) { + if (key_infos.size() != (size_t)(storage::DataType::kNones)) { res_.SetRes(CmdRes::kErrOther, "keyspace error"); return; } - uint64_t dbsize = key_infos[0].keys + key_infos[1].keys + key_infos[2].keys + key_infos[3].keys + key_infos[4].keys; + uint64_t dbsize = 0; + for (auto info : key_infos) { + dbsize += info.keys; + } res_.AppendInteger(static_cast(dbsize)); } } diff --git a/src/pika_db.cc b/src/pika_db.cc index 328ab5443b..efe004c122 100644 --- a/src/pika_db.cc +++ b/src/pika_db.cc @@ -95,20 +95,11 @@ bool DB::IsKeyScaning() { void DB::RunKeyScan() { Status s; - std::vector new_key_infos(5); + std::vector new_key_infos; InitKeyScan(); std::shared_lock l(dbs_rw_); - std::vector tmp_key_infos; - s = GetKeyNum(&tmp_key_infos); - if (s.ok()) { - for (size_t idx = 0; idx < tmp_key_infos.size(); ++idx) { - new_key_infos[idx].keys += tmp_key_infos[idx].keys; - new_key_infos[idx].expires += tmp_key_infos[idx].expires; - new_key_infos[idx].avg_ttl += tmp_key_infos[idx].avg_ttl; - new_key_infos[idx].invaild_keys += tmp_key_infos[idx].invaild_keys; - } - } + s = GetKeyNum(&new_key_infos); key_scan_info_.duration = static_cast(time(nullptr) - key_scan_info_.start_time); std::lock_guard lm(key_scan_protector_); diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 66604aec7e..b5bfb66bd4 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -387,7 +387,7 @@ Status Redis::ScanKeyNum(std::vector* key_infos) { if (!s.ok()) { return s; } - s = ScanSetsKeyNum(&((*key_infos)[5])); + s = ScanStreamsKeyNum(&((*key_infos)[5])); if (!s.ok()) { return s; } diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index e17e5ffb55..eff2a82176 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1823,7 +1823,7 @@ uint64_t Storage::GetProperty(const std::string& property) { Status Storage::GetKeyNum(std::vector* key_infos) { KeyInfo key_info; - key_infos->resize(5); + key_infos->resize(size_t(DataType::kNones)); for (const auto& db : insts_) { std::vector db_key_infos; // check the scanner was stopped or not, before scanning the next db From 1379748ab74d6fea17297dfd5a944d345c7efed9 Mon Sep 17 00:00:00 2001 From: baerwang Date: Wed, 12 Jun 2024 18:11:09 +0800 Subject: [PATCH 07/25] fix: release macos changed (#2722) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 30b749faa0..d281b2e33a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: include: - os: ubuntu-latest name: ${{ github.event.repository.name }}-${{ github.ref_name }}-ubuntu-amd64.tar.gz - - os: macos-latest + - os: macos-12 name: ${{ github.event.repository.name }}-${{ github.ref_name }}-macos-amd64.tar.gz runs-on: ${{ matrix.os }} From 84cdb183f6a336fd53d66b7b327330c7935e5076 Mon Sep 17 00:00:00 2001 From: Qx Date: Thu, 13 Jun 2024 10:56:35 +0800 Subject: [PATCH 08/25] feat:Supports compilation on FreeBSD14 (#2711) --- CMakeLists.txt | 5 ++++- include/pika_server.h | 4 +++- src/net/CMakeLists.txt | 2 +- src/net/src/net_interfaces.cc | 8 ++++++-- src/pika_monotonic_time.cc | 15 +++++++++++++-- src/pika_rsync_service.cc | 4 ++++ src/pstd/src/rsync.cc | 5 +++++ src/storage/src/coding.h | 7 +++++-- 8 files changed, 41 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6a263cd599..41b2d897f0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,6 +56,9 @@ endif() if(CMAKE_SYSTEM_NAME MATCHES "Darwin") set(CMAKE_CXX_FLAGS "-pthread") add_definitions(-DOS_MACOSX) +elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + set(CMAKE_CXX_FLAGS "-pthread") + add_definitions(-DOS_FREEBSD) elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set(CMAKE_EXE_LINKER_FLAGS "-stdlib=libc++ -fuse-ld=lld -lc++ -lc++abi ${CMAKE_EXE_LINKER_FLAGS}") @@ -66,7 +69,7 @@ elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") endif() add_definitions(-DOS_LINUX) else() - message(FATAL_ERROR "only support linux or macOs") + message(FATAL_ERROR "only support linux or macOs or FreeBSD") endif() if(HOST_ARCH MATCHES "x86_64" OR HOST_ARCH MATCHES "i386") diff --git a/include/pika_server.h b/include/pika_server.h index 2e26678d05..4811c54045 100644 --- a/include/pika_server.h +++ b/include/pika_server.h @@ -7,12 +7,14 @@ #define PIKA_SERVER_H_ #include -#if defined(__APPLE__) + +#if defined(__APPLE__) || defined(__FreeBSD__) # include # include #else # include #endif + #include #include diff --git a/src/net/CMakeLists.txt b/src/net/CMakeLists.txt index d25c06163e..dc38d0d3d8 100644 --- a/src/net/CMakeLists.txt +++ b/src/net/CMakeLists.txt @@ -15,7 +15,7 @@ add_subdirectory(examples) if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") list(FILTER DIR_SRCS EXCLUDE REGEX ".net_kqueue.*") -elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") +elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin" OR ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD") list(FILTER DIR_SRCS EXCLUDE REGEX ".net_epoll.*") endif() diff --git a/src/net/src/net_interfaces.cc b/src/net/src/net_interfaces.cc index fd8b1a7906..89061dd5b1 100644 --- a/src/net/src/net_interfaces.cc +++ b/src/net/src/net_interfaces.cc @@ -12,10 +12,14 @@ #include #include -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__FreeBSD__) +# include +# include # include +# include # include # include +# include # include # include "pstd/include/pstd_defer.h" @@ -31,7 +35,7 @@ #include "pstd/include/xdebug.h" std::string GetDefaultInterface() { -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__FreeBSD__) std::string name("lo0"); int fd = socket(AF_INET, SOCK_DGRAM, 0); diff --git a/src/pika_monotonic_time.cc b/src/pika_monotonic_time.cc index e1c8c51496..1c3f6e820d 100644 --- a/src/pika_monotonic_time.cc +++ b/src/pika_monotonic_time.cc @@ -3,7 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#ifdef __APPLE__ // Mac +#if defined(__APPLE__) // Mac #include #include "include/pika_monotonic_time.h" @@ -17,7 +17,18 @@ monotime getMonotonicUs() { return nanos / 1000; } -#elif __linux__ // Linux +#elif defined(__FreeBSD__) // FreeBSD +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (ts.tv_sec * 1000000) + (ts.tv_nsec / 1000); +} + +#elif defined(__linux__) // Linux #ifdef __x86_64__ // x86_64 diff --git a/src/pika_rsync_service.cc b/src/pika_rsync_service.cc index 5f1d8c0e6b..5071a1cfc1 100644 --- a/src/pika_rsync_service.cc +++ b/src/pika_rsync_service.cc @@ -15,6 +15,10 @@ #include "include/pika_conf.h" #include "include/pika_define.h" +#ifdef __FreeBSD__ +# include +#endif + extern std::unique_ptr g_pika_conf; PikaRsyncService::PikaRsyncService(const std::string& raw_path, const int port) : raw_path_(raw_path), port_(port) { diff --git a/src/pstd/src/rsync.cc b/src/pstd/src/rsync.cc index 44ca330aff..5748cfa5ac 100644 --- a/src/pstd/src/rsync.cc +++ b/src/pstd/src/rsync.cc @@ -8,6 +8,11 @@ #include "pstd/include/rsync.h" #include "pstd/include/xdebug.h" +#ifdef __FreeBSD__ +# include +# include +#endif + namespace pstd { // Clean files for rsync info, such as the lock, log, pid, conf file static bool CleanRsyncInfo(const std::string& path) { return pstd::DeleteDirIfExist(path + kRsyncSubDir); } diff --git a/src/storage/src/coding.h b/src/storage/src/coding.h index 001e9d76ee..824bf7a080 100644 --- a/src/storage/src/coding.h +++ b/src/storage/src/coding.h @@ -6,17 +6,20 @@ #ifndef SRC_CODING_H_ #define SRC_CODING_H_ +#undef STORAGE_PLATFORM_IS_LITTLE_ENDIAN + #if defined(__APPLE__) # include // __BYTE_ORDER # define __BYTE_ORDER __DARWIN_BYTE_ORDER # define __LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN #elif defined(__FreeBSD__) -# include // __BYTE_ORDER +# include +# include +# define STORAGE_PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #else # include // __BYTE_ORDER #endif -#undef STORAGE_PLATFORM_IS_LITTLE_ENDIAN #ifndef STORAGE_PLATFORM_IS_LITTLE_ENDIAN # define STORAGE_PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) #endif From d562abd6fd84e4167b0fec3c0e94ca3ef945b9bf Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Mon, 17 Jun 2024 09:18:14 +0800 Subject: [PATCH 09/25] fix:ttl will deafult 0 when keys have ttl (#2730) * fix:ttl will deafult 0 when keys have ttl --------- Co-authored-by: chejinge --- src/pika_kv.cc | 2 +- src/storage/include/storage/storage.h | 1 + src/storage/src/redis.h | 1 + src/storage/src/redis_strings.cc | 66 +++++++++++++++++++-------- src/storage/src/storage.cc | 7 ++- 5 files changed, 57 insertions(+), 20 deletions(-) diff --git a/src/pika_kv.cc b/src/pika_kv.cc index bba495a967..2d0e5c8744 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -533,7 +533,7 @@ void MgetCmd::Do() { cache_miss_keys_ = keys_; } db_value_status_array_.clear(); - s_ = db_->storage()->MGet(cache_miss_keys_, &db_value_status_array_); + s_ = db_->storage()->MGetWithTTL(cache_miss_keys_, &db_value_status_array_); if (!s_.ok()) { if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kMultiKey); diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 779e52cc3e..0b520f5800 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -1114,6 +1114,7 @@ class Storage { // For scan keys in data base std::atomic scan_keynum_exit_ = {false}; + Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl); }; } // namespace storage diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 84f95b67e5..ad8906ba0c 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -156,6 +156,7 @@ class Redis { Status Get(const Slice& key, std::string* value); Status MGet(const Slice& key, std::string* value); Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl); + Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl); Status GetBit(const Slice& key, int64_t offset, int32_t* ret); Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 471320ae55..cab41de9aa 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -357,38 +357,68 @@ Status Redis::MGet(const Slice& key, std::string* value) { return s; } +void ClearValueAndSetTTL(std::string* value, int64_t* ttl, int64_t ttl_value) { + value->clear(); + *ttl = ttl_value; +} + +int64_t CalculateTTL(int64_t expiry_time) { + int64_t current_time; + rocksdb::Env::Default()->GetCurrentTime(¤t_time); + return expiry_time - current_time >= 0 ? expiry_time - current_time : -2; +} + +Status HandleParsedStringsValue(ParsedStringsValue& parsed_strings_value, std::string* value, int64_t* ttl) { + if (parsed_strings_value.IsStale()) { + ClearValueAndSetTTL(value, ttl, -2); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + int64_t expiry_time = parsed_strings_value.Etime(); + *ttl = (expiry_time == 0) ? -1 : CalculateTTL(expiry_time); + } + return Status::OK(); +} + Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { value->clear(); BaseKey base_key(key); Status s = db_->Get(default_read_options_, base_key.Encode(), value); std::string meta_value = *value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } + if (s.ok()) { ParsedStringsValue parsed_strings_value(value); - if (parsed_strings_value.IsStale()) { - value->clear(); - *ttl = -2; - return Status::NotFound("Stale"); - } else { - parsed_strings_value.StripSuffix(); - *ttl = parsed_strings_value.Etime(); - if (*ttl == 0) { - *ttl = -1; - } else { - int64_t curtime; - rocksdb::Env::Default()->GetCurrentTime(&curtime); - *ttl = *ttl - curtime >= 0 ? *ttl - curtime : -2; - } - } + return HandleParsedStringsValue(parsed_strings_value, value, ttl); } else if (s.IsNotFound()) { - value->clear(); - *ttl = -2; + ClearValueAndSetTTL(value, ttl, -2); + } + + return s; +} + +Status Redis::MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { + value->clear(); + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + s = Status::NotFound(); + } + + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + return HandleParsedStringsValue(parsed_strings_value, value, ttl); + } else if (s.IsNotFound()) { + ClearValueAndSetTTL(value, ttl, -2); } return s; diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index eff2a82176..ff4378367d 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -155,6 +155,11 @@ Status Storage::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { return inst->GetWithTTL(key, value, ttl); } +Status Storage::MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { + auto& inst = GetDBInstance(key); + return inst->MGetWithTTL(key, value, ttl); +} + Status Storage::GetSet(const Slice& key, const Slice& value, std::string* old_value) { auto& inst = GetDBInstance(key); return inst->GetSet(key, value, old_value); @@ -208,7 +213,7 @@ Status Storage::MGetWithTTL(const std::vector& keys, std::vectorGetWithTTL(key, &value, &ttl); + s = inst->MGetWithTTL(key, &value, &ttl); if (s.ok()) { vss->push_back({value, Status::OK(), ttl}); } else if (s.IsNotFound()) { From 3bcccd0898a9925d75bd3e8233ea42ed2267fd63 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Mon, 17 Jun 2024 20:35:31 +0800 Subject: [PATCH 10/25] fix: pksetexat should update cache (#2736) * fix: pksetexat should update cache * fix: handle error when expire < 0 --- include/pika_kv.h | 2 ++ src/pika_command.cc | 2 +- src/pika_kv.cc | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/include/pika_kv.h b/include/pika_kv.h index 277a27422f..204fdb1ff2 100644 --- a/include/pika_kv.h +++ b/include/pika_kv.h @@ -792,6 +792,8 @@ class PKSetexAtCmd : public Cmd { return res; } void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; void Split(const HintKeys& hint_keys) override {}; void Merge() override {}; Cmd* Clone() override { return new PKSetexAtCmd(*this); } diff --git a/src/pika_command.cc b/src/pika_command.cc index 81c23c2533..a40cb77f35 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -370,7 +370,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameScanx, std::move(scanxptr))); ////PKSetexAtCmd std::unique_ptr pksetexatptr = std::make_unique( - kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKSetexAt, std::move(pksetexatptr))); ////PKScanRange std::unique_ptr pkscanrangeptr = std::make_unique( diff --git a/src/pika_kv.cc b/src/pika_kv.cc index 2d0e5c8744..ccc7ea1cfa 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -1704,6 +1704,21 @@ void PKSetexAtCmd::Do() { } } +void PKSetexAtCmd::DoThroughDB() { + Do(); +} + +void PKSetexAtCmd::DoUpdateCache() { + if (s_.ok()) { + auto expire = time_stamp_ - static_cast(std::time(nullptr)); + if (expire <= 0) [[unlikely]] { + db_->cache()->Del({key_}); + return; + } + db_->cache()->Setxx(key_, value_, expire); + } +} + void PKScanRangeCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNamePKScanRange); From f95f867c6b62cd23e25750771b2408923f6bd058 Mon Sep 17 00:00:00 2001 From: cheniujh <41671101+cheniujh@users.noreply.github.com> Date: Tue, 18 Jun 2024 14:48:26 +0800 Subject: [PATCH 11/25] fix: Revised RocksDB-Related Parameters in Pika (#2728) * 1 set the default value of rate-limiter to 1024GB and make it dynamically changeable 2 allow user to config max-background-flushes and max-background-compactions to -1 while max-background-jobs is given 3 add conf item delayed-write-rate and make it dynamically changeable 4 add conf item max-compaction-bytes and make it dynamically changeable 4 revised the comment of rate-limiter-auto-tuned in pika.conf * fix bugs --------- Co-authored-by: cjh <1271435567@qq.com> --- conf/pika.conf | 51 +++++++++++++++++++++++++++++++++++---------- include/pika_conf.h | 36 +++++++++++++++++++++++++++++--- src/pika_admin.cc | 51 ++++++++++++++++++++++++++++++++++++++++++++- src/pika_conf.cc | 26 +++++++++++++++++++---- src/pika_server.cc | 4 ++-- 5 files changed, 147 insertions(+), 21 deletions(-) diff --git a/conf/pika.conf b/conf/pika.conf index 1a7b815885..1396caf5e5 100644 --- a/conf/pika.conf +++ b/conf/pika.conf @@ -240,7 +240,8 @@ slave-priority : 100 # The disable_auto_compactions option is [true | false] disable_auto_compactions : false -# Rocksdb max_subcompactions +# Rocksdb max_subcompactions, increasing this value can accelerate the exec speed of a single compaction task +# it's recommended to increase it's value if large compaction is found in you instance max-subcompactions : 1 # The minimum disk usage ratio for checking resume. # If the disk usage ratio is lower than min-check-resume-ratio, it will not check resume, only higher will check resume. @@ -352,17 +353,42 @@ compression : snappy # https://github.com/facebook/rocksdb/wiki/Compression #compression_per_level : [none:none:snappy:lz4:lz4] +# The number of rocksdb background threads(sum of max-background-compactions and max-background-flushes) +# If max-background-jobs has a valid value AND both 'max-background-flushs' and 'max-background-compactions' is set to -1, +# then max-background-flushs' and 'max-background-compactions will be auto config by rocksdb, specifically: +# 1/4 of max-background-jobs will be given to max-background-flushs' and the rest(3/4) will be given to 'max-background-compactions'. +# 'max-background-jobs' default value is 3 and the value range is [2, 12]. +max-background-jobs : 3 + # The number of background flushing threads. -# max-background-flushes default value is 1 and the value range is [1, 4]. -max-background-flushes : 1 +# max-background-flushes default value is -1 and the value range is [1, 4] or -1. +# if 'max-background-flushes' is set to -1, the 'max-background-compactions' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-flushes : -1 + +# [NOTICE] you MUST NOT set one of the max-background-flushes or max-background-compactions to -1 while setting another one to other values(not -1). +# They SHOULD both be -1 or both not(if you want to config them manually). # The number of background compacting threads. -# max-background-compactions default value is 2 and the value range is [1, 8]. -max-background-compactions : 2 +# max-background-compactions default value is -1 and the value range is [1, 8] or -1. +# if 'max-background-compactions' is set to -1, the 'max-background-flushes' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-compactions : -1 + +# RocksDB delayed-write-rate, default is 0(infer from rate-limiter by RocksDB) +# Ref from rocksdb: Whenever stall conditions are triggered, RocksDB will reduce write rate to delayed_write_rate, +# and could possibly reduce write rate to even lower than delayed_write_rate if estimated pending compaction bytes accumulates. +# If the value is 0, RcoksDB will infer a value from `rater_limiter` value if it is not empty, or 16MB if `rater_limiter` is empty. +# Note that if users change the rate in `rate_limiter` after DB is opened, delayed_write_rate won't be adjusted. +# [Support Dynamically changeable] send 'config set delayed-write-rate' to a running pika can change it's value dynamically +delayed-write-rate : 0 + + +# RocksDB will try to limit number of bytes in one compaction to be lower than this max-compaction-bytes. +# But it's NOT guaranteed. +# default value is -1, means let it be 25 * target-file-size-base (Which is RocksDB's default value) +max-compaction-bytes : -1 -# The number of background threads. -# max-background-jobs default value is 3 and the value range is [2, 12]. -max-background-jobs : 3 # maximum value of RocksDB cached open file descriptors max-cache-files : 5000 @@ -428,14 +454,17 @@ default-slot-num : 1024 # 0: Read 1: Write 2: ReadAndWrite # rate-limiter-mode : default 1 -# rate limiter bandwidth, default 2000MB/s -#rate-limiter-bandwidth : 2097152000 +# rate limiter bandwidth, units in bytes, default 1024GB/s (No limit) +# [Support Dynamically changeable] send 'rate-limiter-bandwidth' to a running pika can change it's value dynamically +#rate-limiter-bandwidth : 1099511627776 #rate-limiter-refill-period-us : 100000 # #rate-limiter-fairness: 10 -# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is false. +# if auto_tuned is true: Enables dynamic adjustment of rate limit within the range +#`[rate-limiter-bandwidth / 20, rate-limiter-bandwidth]`, according to the recent demand for background I/O. +# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is true. #rate-limiter-auto-tuned : true ################################## RocksDB Blob Configure ##################### diff --git a/include/pika_conf.h b/include/pika_conf.h index e0cb81062d..d55b45e027 100644 --- a/include/pika_conf.h +++ b/include/pika_conf.h @@ -255,6 +255,12 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return target_file_size_base_; } + + uint64_t max_compaction_bytes() { + std::shared_lock l(rwlock_); + return static_cast(max_compaction_bytes_); + } + int max_cache_statistic_keys() { std::shared_lock l(rwlock_); return max_cache_statistic_keys_; @@ -279,6 +285,10 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return max_background_jobs_; } + uint64_t delayed_write_rate(){ + std::shared_lock l(rwlock_); + return static_cast(delayed_write_rate_); + } int max_cache_files() { std::shared_lock l(rwlock_); return max_cache_files_; @@ -723,6 +733,24 @@ class PikaConf : public pstd::BaseConf { arena_block_size_ = value; } + void SetRateLmiterBandwidth(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("rate-limiter-bandwidth", std::to_string(value)); + rate_limiter_bandwidth_ = value; + } + + void SetDelayedWriteRate(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("delayed-write-rate", std::to_string(value)); + delayed_write_rate_ = value; + } + + void SetMaxCompactionBytes(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-compaction-bytes", std::to_string(value)); + max_compaction_bytes_ = value; + } + void SetLogLevel(const std::string& value) { std::lock_guard l(rwlock_); TryPushDiffCommands("loglevel", value); @@ -862,9 +890,10 @@ class PikaConf : public pstd::BaseConf { int max_cache_statistic_keys_ = 0; int small_compaction_threshold_ = 0; int small_compaction_duration_threshold_ = 0; - int max_background_flushes_ = 1; - int max_background_compactions_ = 2; + int max_background_flushes_ = -1; + int max_background_compactions_ = -1; int max_background_jobs_ = 0; + int64_t delayed_write_rate_ = 0; int max_cache_files_ = 0; std::atomic rocksdb_ttl_second_ = 0; std::atomic rocksdb_periodic_second_ = 0; @@ -908,6 +937,7 @@ class PikaConf : public pstd::BaseConf { // bool write_binlog_ = false; int target_file_size_base_ = 0; + int64_t max_compaction_bytes_ = 0; int binlog_file_size_ = 0; // cache @@ -942,7 +972,7 @@ class PikaConf : public pstd::BaseConf { std::shared_mutex rwlock_; // Rsync Rate limiting configuration - int throttle_bytes_per_second_ = 207200000; + int throttle_bytes_per_second_ = 200 << 20; // 200MB/s int max_rsync_parallel_num_ = kMaxRsyncParallelNum; std::atomic_int64_t rsync_timeout_ms_ = 1000; }; diff --git a/src/pika_admin.cc b/src/pika_admin.cc index 18b5e89873..bb52159dd6 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1929,6 +1929,18 @@ void ConfigCmd::ConfigGet(std::string& ret) { EncodeNumber(&config_body, g_pika_conf->rate_limiter_bandwidth()); } + if (pstd::stringmatch(pattern.data(), "delayed-write-rate", 1) != 0) { + elements += 2; + EncodeString(&config_body, "delayed-write-rate"); + EncodeNumber(&config_body, g_pika_conf->delayed_write_rate()); + } + + if (pstd::stringmatch(pattern.data(), "max-compaction-bytes", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-compaction-bytes"); + EncodeNumber(&config_body, g_pika_conf->max_compaction_bytes()); + } + if (pstd::stringmatch(pattern.data(), "rate-limiter-refill-period-us", 1) != 0) { elements += 2; EncodeString(&config_body, "rate-limiter-refill-period-us"); @@ -2342,6 +2354,43 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { } g_pika_conf->SetDisableAutoCompaction(value); res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rate-limiter-bandwidth") { + int64_t new_bandwidth = 0; + if (pstd::string2int(value.data(), value.size(), &new_bandwidth) == 0 || new_bandwidth <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rate-limiter-bandwidth'\r\n"); + return; + } + g_pika_server->storage_options().options.rate_limiter->SetBytesPerSecond(new_bandwidth); + g_pika_conf->SetRateLmiterBandwidth(new_bandwidth); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "delayed-write-rate") { + int64_t new_delayed_write_rate = 0; + if (pstd::string2int(value.data(), value.size(), &new_delayed_write_rate) == 0 || new_delayed_write_rate <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'delayed-write-rate'\r\n"); + return; + } + std::unordered_map options_map{{"delayed_write_rate", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set delayed-write-rate wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetDelayedWriteRate(new_delayed_write_rate); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-compaction-bytes") { + int64_t new_max_compaction_bytes = 0; + if (pstd::string2int(value.data(), value.size(), &new_max_compaction_bytes) == 0 || new_max_compaction_bytes <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-compaction-bytes'\r\n"); + return; + } + std::unordered_map options_map{{"max_compaction_bytes", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-compaction-bytes wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxCompactionBytes(new_max_compaction_bytes); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "max-client-response-size") { if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-client-response-size'\r\n"); @@ -2461,7 +2510,7 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { g_pika_conf->SetMaxCacheFiles(static_cast(ival)); res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "max-background-compactions") { - if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival <= 0) { res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-compactions'\r\n"); return; } diff --git a/src/pika_conf.cc b/src/pika_conf.cc index c88e77478b..4ca6710b60 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -355,7 +355,7 @@ int PikaConf::Load() { // rate-limiter-bandwidth GetConfInt64("rate-limiter-bandwidth", &rate_limiter_bandwidth_); if (rate_limiter_bandwidth_ <= 0) { - rate_limiter_bandwidth_ = 2000 * 1024 * 1024; // 2000MB/s + rate_limiter_bandwidth_ = 1024LL << 30; // 1024GB/s } // rate-limiter-refill-period-us @@ -372,6 +372,7 @@ int PikaConf::Load() { std::string at; GetConfStr("rate-limiter-auto-tuned", &at); + // rate_limiter_auto_tuned_ will be true if user didn't config rate_limiter_auto_tuned_ = at == "yes" || at.empty(); // max_write_buffer_num @@ -393,6 +394,12 @@ int PikaConf::Load() { target_file_size_base_ = 1048576; // 10Mb } + GetConfInt64("max-compaction-bytes", &max_compaction_bytes_); + if (max_compaction_bytes_ <= 0) { + // RocksDB's default is 25 * target_file_size_base_ + max_compaction_bytes_ = target_file_size_base_ * 25; + } + max_cache_statistic_keys_ = 0; GetConfInt("max-cache-statistic-keys", &max_cache_statistic_keys_); if (max_cache_statistic_keys_ <= 0) { @@ -418,8 +425,9 @@ int PikaConf::Load() { small_compaction_duration_threshold_ = 1000000; } + // max-background-flushes and max-background-compactions should both be -1 or both not GetConfInt("max-background-flushes", &max_background_flushes_); - if (max_background_flushes_ <= 0) { + if (max_background_flushes_ <= 0 && max_background_flushes_ != -1) { max_background_flushes_ = 1; } if (max_background_flushes_ >= 6) { @@ -427,7 +435,7 @@ int PikaConf::Load() { } GetConfInt("max-background-compactions", &max_background_compactions_); - if (max_background_compactions_ <= 0) { + if (max_background_compactions_ <= 0 && max_background_compactions_ != -1) { max_background_compactions_ = 2; } if (max_background_compactions_ >= 8) { @@ -443,6 +451,13 @@ int PikaConf::Load() { max_background_jobs_ = (8 + 6); } + GetConfInt64("delayed-write-rate", &delayed_write_rate_); + if (delayed_write_rate_ <= 0) { + // set 0 means let rocksDB infer from rate-limiter(by default, rate-limiter is 1024GB, delayed_write_rate will be 512GB) + // if rate-limiter is nullptr, it would be set to 16MB by RocksDB + delayed_write_rate_ = 0; + } + max_cache_files_ = 5000; GetConfInt("max-cache-files", &max_cache_files_); if (max_cache_files_ < -1) { @@ -651,7 +666,7 @@ int PikaConf::Load() { // throttle-bytes-per-second GetConfInt("throttle-bytes-per-second", &throttle_bytes_per_second_); if (throttle_bytes_per_second_ <= 0) { - throttle_bytes_per_second_ = 207200000; + throttle_bytes_per_second_ = 200LL << 20; //200 MB } GetConfInt("max-rsync-parallel-num", &max_rsync_parallel_num_); @@ -749,6 +764,9 @@ int PikaConf::ConfigRewrite() { SetConfInt("max-cache-files", max_cache_files_); SetConfInt("max-background-compactions", max_background_compactions_); SetConfInt("max-background-jobs", max_background_jobs_); + SetConfInt64("rate-limiter-bandwidth", rate_limiter_bandwidth_); + SetConfInt64("delayed-write-rate", delayed_write_rate_); + SetConfInt64("max-compaction-bytes", max_compaction_bytes_); SetConfInt("max-write-buffer-num", max_write_buffer_num_); SetConfInt64("write-buffer-size", write_buffer_size_); SetConfInt("min-write-buffer-number-to-merge", min_write_buffer_number_to_merge_); diff --git a/src/pika_server.cc b/src/pika_server.cc index b5fa4f56d9..5c3aae16df 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -1299,10 +1299,12 @@ void PikaServer::InitStorageOptions() { storage_options_.options.max_bytes_for_level_base = g_pika_conf->level0_file_num_compaction_trigger() * g_pika_conf->write_buffer_size(); storage_options_.options.max_subcompactions = g_pika_conf->max_subcompactions(); storage_options_.options.target_file_size_base = g_pika_conf->target_file_size_base(); + storage_options_.options.max_compaction_bytes = g_pika_conf->max_compaction_bytes(); storage_options_.options.max_background_flushes = g_pika_conf->max_background_flushes(); storage_options_.options.max_background_compactions = g_pika_conf->max_background_compactions(); storage_options_.options.disable_auto_compactions = g_pika_conf->disable_auto_compactions(); storage_options_.options.max_background_jobs = g_pika_conf->max_background_jobs(); + storage_options_.options.delayed_write_rate = g_pika_conf->delayed_write_rate(); storage_options_.options.max_open_files = g_pika_conf->max_cache_files(); storage_options_.options.max_bytes_for_level_multiplier = g_pika_conf->max_bytes_for_level_multiplier(); storage_options_.options.optimize_filters_for_hits = g_pika_conf->optimize_filters_for_hits(); @@ -1337,7 +1339,6 @@ void PikaServer::InitStorageOptions() { storage_options_.table_options.block_cache = rocksdb::NewLRUCache(storage_options_.block_cache_size, static_cast(g_pika_conf->num_shard_bits())); } - storage_options_.options.rate_limiter = std::shared_ptr( rocksdb::NewGenericRateLimiter( @@ -1347,7 +1348,6 @@ void PikaServer::InitStorageOptions() { static_cast(g_pika_conf->rate_limiter_mode()), g_pika_conf->rate_limiter_auto_tuned() )); - // For Storage small compaction storage_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys(); storage_options_.small_compaction_threshold = g_pika_conf->small_compaction_threshold(); From e7e2f414035ad0f8bc2b60523dba0adcfa15afc2 Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Tue, 18 Jun 2024 18:18:59 +0800 Subject: [PATCH 12/25] fix: slotmigrate return not correct (#2741) * fix: slotmigrate return not correct * fix: slotmigrate return not correct * fix codestyle --------- Co-authored-by: chejinge --- src/pika_migrate_thread.cc | 33 ++++++++++++++++++--------------- src/pika_slot_command.cc | 1 - 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/pika_migrate_thread.cc b/src/pika_migrate_thread.cc index a5786381b0..fd221f0b8e 100644 --- a/src/pika_migrate_thread.cc +++ b/src/pika_migrate_thread.cc @@ -635,7 +635,7 @@ bool PikaMigrateThread::ReqMigrateBatch(const std::string &ip, int64_t port, int return false; } -int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_ptr& db) { +int PikaMigrateThread::ReqMigrateOne(const std::string &key, const std::shared_ptr &db) { std::unique_lock lm(migrator_mutex_); int slot_id = GetSlotID(g_pika_conf->default_slot_num(), key); @@ -653,12 +653,14 @@ int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_p } key_type = storage::DataTypeToTag(type); if (type == storage::DataType::kNones) { - LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " type: " << static_cast(type) << " is illegal"; - return -1; + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " type: " << static_cast(type) + << " is illegal"; + return 0; } + if (slot_id != slot_id_) { LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne Slot : " << slot_id << " is not the migrating slot:" << slot_id_; - return -2; + return -1; } // if the migrate thread exit, start it @@ -675,17 +677,16 @@ int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_p is_migrating_ = true; usleep(100); } + } + // check the key is migrating + std::pair kpair = std::make_pair(key_type, key); + if (IsMigrating(kpair)) { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " is migrating ! "; + return 1; } else { - // check the key is migrating - std::pair kpair = std::make_pair(key_type, key); - if (IsMigrating(kpair)) { - LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " is migrating ! "; - return 1; - } else { - std::unique_lock lo(mgrtone_queue_mutex_); - mgrtone_queue_.emplace_back(kpair); - NotifyRequestMigrate(); - } + std::unique_lock lo(mgrtone_queue_mutex_); + mgrtone_queue_.emplace_back(kpair); + NotifyRequestMigrate(); } return 1; @@ -934,7 +935,9 @@ void *PikaMigrateThread::ThreadMain() { { std::unique_lock lw(workers_mutex_); while (!should_exit_ && is_task_success_ && send_num_ != response_num_) { - workers_cond_.wait(lw); + if (workers_cond_.wait_for(lw, std::chrono::seconds(60)) == std::cv_status::timeout) { + break; + } } } LOG(INFO) << "PikaMigrateThread::ThreadMain send_num:" << send_num_ << " response_num:" << response_num_; diff --git a/src/pika_slot_command.cc b/src/pika_slot_command.cc index 21e325100d..9340a6ebb2 100644 --- a/src/pika_slot_command.cc +++ b/src/pika_slot_command.cc @@ -1440,7 +1440,6 @@ void SlotsMgrtExecWrapperCmd::Do() { int ret = g_pika_server->SlotsMigrateOne(key_, db_); switch (ret) { case 0: - case -2: res_.AppendInteger(0); res_.AppendInteger(0); return; From e7edec67fe675cf0b22e76e9b0e15ec6012b408c Mon Sep 17 00:00:00 2001 From: JayLiu <38887641+luky116@users.noreply.github.com> Date: Wed, 19 Jun 2024 15:35:52 +0800 Subject: [PATCH 13/25] docs: modify run pika in docker readme (#2743) * modify readme * modify readme * modify readme * modify readme --------- Co-authored-by: liuyuecai --- README.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 37de45f596..a97f29e074 100644 --- a/README.md +++ b/README.md @@ -252,14 +252,21 @@ Users can directly download the latest binary version package from [releases](ht * #### 3.1 Running with Docker - ```bash + Modify the following configuration items of conf/pika.conf file: + ``` + log-path : /data/log/ + db-path : /data/db/ + db-sync-path : /data/dbsync/ + dump-path : /data/dump/ + ``` + + And then execute the following statement to start pika in docker: + ```bash docker run -d \ --restart=always \ -p 9221:9221 \ - -v :/pika/log \ - -v :/pika/db \ - -v :/pika/dump \ - -v :/pika/dbsync \ + -v "$(pwd)/conf":"/pika/conf" \ + -v "/tmp/pika-data":"/data" \ pikadb/pika:v3.3.6 redis-cli -p 9221 "info" From c21fd6eef4c29ce9aaae456306ba1688bcfd1212 Mon Sep 17 00:00:00 2001 From: QlQl <2458371920@qq.com> Date: Wed, 19 Jun 2024 22:06:02 +0800 Subject: [PATCH 14/25] mutil threads per link and mutil links --- src/net/include/thread_pool.h | 6 ++++-- src/net/src/thread_pool.cc | 35 +++++++++++++++++++---------------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index 8909a1c071..9935512a9f 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -108,8 +108,10 @@ class ThreadPool : public pstd::noncopyable { Node* CreateMissingNewerLinks(Node* head, int* cnt); bool LinkOne(Node* node, std::atomic* newest_node); - int task_idx_; -std::vector> asd; + uint16_t task_idx_; + + const uint8_t nworkers_per_link_ = 2; // numer of workers per link + const uint8_t nlinks_; // number of links (upper around) std::vector> newest_node_; std::atomic node_cnt_; // for task std::vector> time_newest_node_; diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index a1f615c831..5ca9723947 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -45,9 +45,11 @@ int ThreadPool::Worker::stop() { } ThreadPool::ThreadPool(size_t worker_num, size_t max_queue_size, std::string thread_pool_name) - : newest_node_(worker_num), + : nlinks_((worker_num + nworkers_per_link_ - 1) / nworkers_per_link_), + // : nlinks_(worker_num), + newest_node_(nlinks_), node_cnt_(0), - time_newest_node_(worker_num), + time_newest_node_(nlinks_), time_node_cnt_(0), queue_slow_size_(std::min(worker_num * 10, max_queue_size)), max_queue_size_(max_queue_size), @@ -58,9 +60,9 @@ ThreadPool::ThreadPool(size_t worker_num, size_t max_queue_size, std::string thr thread_pool_name_(std::move(thread_pool_name)), running_(false), should_stop_(false), - mu_(worker_num), - rsignal_(worker_num) { - for (size_t i = 0; i < worker_num_; ++i) { + mu_(nlinks_), + rsignal_(nlinks_) { + for (size_t i = 0; i < nlinks_; ++i) { newest_node_[i] = nullptr; time_newest_node_[i] = nullptr; } @@ -71,7 +73,7 @@ ThreadPool::~ThreadPool() { stop_thread_pool(); } int ThreadPool::start_thread_pool() { if (!running_.load()) { should_stop_.store(false); - for (size_t i = 0; i < worker_num_; ++i) { + for (size_t i = 0; i < nlinks_; ++i) { workers_.push_back(new Worker(this, i)); int res = workers_[i]->start(); if (res != 0) { @@ -120,10 +122,10 @@ void ThreadPool::Schedule(TaskFunc func, void* arg) { if (LIKELY(!should_stop())) { auto node = new Node(func, arg); - auto idx = ++task_idx_ % worker_num_; - LinkOne(node, &newest_node_[idx]); + auto idx = ++task_idx_; + LinkOne(node, &newest_node_[idx % nlinks_]); node_cnt_++; - rsignal_[idx].notify_one(); + rsignal_[idx % nlinks_].notify_one(); } } @@ -136,11 +138,11 @@ void ThreadPool::DelaySchedule(uint64_t timeout, TaskFunc func, void* arg) { uint64_t exec_time = unow + timeout * 1000; if (LIKELY(!should_stop())) { - auto idx = ++task_idx_ % worker_num_; + auto idx = ++task_idx_; auto node = new Node(exec_time, func, arg); - LinkOne(node, &newest_node_[idx]); + LinkOne(node, &newest_node_[idx % nlinks_]); time_node_cnt_++; - rsignal_[idx].notify_all(); + rsignal_[idx % nlinks_].notify_all(); } } @@ -158,10 +160,11 @@ void ThreadPool::runInThread(const int idx) { Node* tmp = nullptr; Node* last = nullptr; Node* time_last = nullptr; - auto& newest_node = newest_node_[idx]; - auto& time_newest_node = time_newest_node_[idx]; - auto& mu = mu_[idx]; - auto& rsignal = rsignal_[idx]; + + auto& newest_node = newest_node_[idx % nlinks_]; + auto& time_newest_node = time_newest_node_[idx % nlinks_]; + auto& mu = mu_[idx % nlinks_]; + auto& rsignal = rsignal_[idx % nlinks_]; while (LIKELY(!should_stop())) { std::unique_lock lock(mu); From 09e9673b25dba0fbd72a5301dadc8cc6da96cabd Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Wed, 19 Jun 2024 22:44:40 +0800 Subject: [PATCH 15/25] feat:thread purge (#2697) * feat:thread purge --------- Co-authored-by: chejinge --- conf/pika.conf | 5 ++++ include/pika_client_processor.h | 2 -- include/pika_conf.h | 10 ++++++++ include/pika_repl_bgworker.h | 3 +++ include/pika_server.h | 9 +++++-- src/net/include/backend_thread.h | 1 + src/net/include/client_thread.h | 1 + src/net/include/net_thread.h | 2 +- src/net/include/server_thread.h | 2 ++ src/net/src/backend_thread.cc | 2 ++ src/net/src/client_thread.cc | 2 ++ src/net/src/dispatch_thread.cc | 2 +- src/net/src/holy_thread.h | 2 ++ src/net/src/net_thread_name.h | 2 +- src/net/src/net_util.cc | 1 + src/net/src/net_util.h | 1 + src/net/src/thread_pool.cc | 4 ++- src/pika_admin.cc | 20 +++++++++++++++ src/pika_client_processor.cc | 18 ------------- src/pika_conf.cc | 13 +++++++--- src/pika_repl_client.cc | 10 ++++++-- src/pika_repl_server.cc | 3 ++- src/pika_server.cc | 44 ++++++++++++++++++++++---------- src/rsync_client.cc | 1 + src/rsync_server.cc | 3 ++- 25 files changed, 115 insertions(+), 48 deletions(-) diff --git a/conf/pika.conf b/conf/pika.conf index 1396caf5e5..3fcb5d5158 100644 --- a/conf/pika.conf +++ b/conf/pika.conf @@ -27,6 +27,11 @@ thread-num : 1 # are dedicated to handling user requests. thread-pool-size : 12 +# This parameter is used to control whether to separate fast and slow commands. +# When slow-cmd-pool is set to yes, fast and slow commands are separated. +# When set to no, they are not separated. +slow-cmd-pool : no + # Size of the low level thread pool, The threads within this pool # are dedicated to handling slow user requests. slow-cmd-thread-pool-size : 1 diff --git a/include/pika_client_processor.h b/include/pika_client_processor.h index a2c628394e..dccd4ef96c 100644 --- a/include/pika_client_processor.h +++ b/include/pika_client_processor.h @@ -19,12 +19,10 @@ class PikaClientProcessor { int Start(); void Stop(); void SchedulePool(net::TaskFunc func, void* arg); - void ScheduleBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str); size_t ThreadPoolCurQueueSize(); size_t ThreadPoolMaxQueueSize(); private: std::unique_ptr pool_; - std::vector> bg_threads_; }; #endif // PIKA_CLIENT_PROCESSOR_H_ diff --git a/include/pika_conf.h b/include/pika_conf.h index d55b45e027..e93a5e7e5b 100644 --- a/include/pika_conf.h +++ b/include/pika_conf.h @@ -186,6 +186,10 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return slotmigrate_; } + bool slow_cmd_pool() { + std::shared_lock l(rwlock_); + return slow_cmd_pool_; + } std::string server_id() { std::shared_lock l(rwlock_); return server_id_; @@ -584,6 +588,11 @@ class PikaConf : public pstd::BaseConf { TryPushDiffCommands("slotmigrate", value ? "yes" : "no"); slotmigrate_.store(value); } + void SetSlowCmdPool(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slow-cmd-pool", value ? "yes" : "no"); + slow_cmd_pool_.store(value); + } void SetSlotMigrateThreadNum(const int value) { std::lock_guard l(rwlock_); TryPushDiffCommands("slotmigrate-thread-num", std::to_string(value)); @@ -872,6 +881,7 @@ class PikaConf : public pstd::BaseConf { std::string bgsave_path_; std::string bgsave_prefix_; std::string pidfile_; + std::atomic slow_cmd_pool_; std::string compression_; std::string compression_per_level_; diff --git a/include/pika_repl_bgworker.h b/include/pika_repl_bgworker.h index 2401d72009..e9d6a1b034 100644 --- a/include/pika_repl_bgworker.h +++ b/include/pika_repl_bgworker.h @@ -28,6 +28,9 @@ class PikaReplBgWorker { void QueueClear(); static void HandleBGWorkerWriteBinlog(void* arg); static void HandleBGWorkerWriteDB(void* arg); + void SetThreadName(const std::string& thread_name) { + bg_thread_.set_thread_name(thread_name); + } BinlogItem binlog_item_; net::RedisParser redis_parser_; diff --git a/include/pika_server.h b/include/pika_server.h index 4811c54045..480ba5c17e 100644 --- a/include/pika_server.h +++ b/include/pika_server.h @@ -97,6 +97,7 @@ class PikaServer : public pstd::noncopyable { bool force_full_sync(); void SetForceFullSync(bool v); void SetDispatchQueueLimit(int queue_limit); + void SetSlowCmdThreadPoolFlag(bool flag); storage::StorageOptions storage_options(); std::unique_ptr& pika_dispatch_thread() { return pika_dispatch_thread_; @@ -170,7 +171,6 @@ class PikaServer : public pstd::noncopyable { void FinishMetaSync(); bool MetaSyncDone(); void ResetMetaSyncStatus(); - void SetLoopDBStateMachine(bool need_loop); int GetMetaSyncTimestamp(); void UpdateMetaSyncTimestamp(); void UpdateMetaSyncTimestampWithoutLock(); @@ -181,7 +181,7 @@ class PikaServer : public pstd::noncopyable { * PikaClientProcessor Process Task */ void ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd); - void ScheduleClientBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str); + // for info debug size_t ClientProcessorThreadPoolCurQueueSize(); size_t ClientProcessorThreadPoolMaxQueueSize(); @@ -644,6 +644,11 @@ class PikaServer : public pstd::noncopyable { * acl */ std::unique_ptr<::Acl> acl_ = nullptr; + + /* + * fast and slow thread pools + */ + bool slow_cmd_thread_pool_flag_; }; #endif diff --git a/src/net/include/backend_thread.h b/src/net/include/backend_thread.h index 6e39583014..b374ec86c6 100644 --- a/src/net/include/backend_thread.h +++ b/src/net/include/backend_thread.h @@ -110,6 +110,7 @@ class BackendThread : public Thread { */ int StartThread() override; int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } pstd::Status Write(int fd, const std::string& msg); pstd::Status Close(int fd); // Try to connect fd noblock, if return EINPROGRESS or EAGAIN or EWOULDBLOCK diff --git a/src/net/include/client_thread.h b/src/net/include/client_thread.h index 25846555c2..c57174724d 100644 --- a/src/net/include/client_thread.h +++ b/src/net/include/client_thread.h @@ -110,6 +110,7 @@ class ClientThread : public Thread { */ int StartThread() override; int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } pstd::Status Write(const std::string& ip, int port, const std::string& msg); pstd::Status Close(const std::string& ip, int port); diff --git a/src/net/include/net_thread.h b/src/net/include/net_thread.h index ac700819a5..ff96811e91 100644 --- a/src/net/include/net_thread.h +++ b/src/net/include/net_thread.h @@ -34,7 +34,7 @@ class Thread : public pstd::noncopyable { std::string thread_name() const { return thread_name_; } - void set_thread_name(const std::string& name) { thread_name_ = name; } + virtual void set_thread_name(const std::string& name) { thread_name_ = name; } protected: std::atomic_bool should_stop_; diff --git a/src/net/include/server_thread.h b/src/net/include/server_thread.h index d0d6d63612..b8defbf2a6 100644 --- a/src/net/include/server_thread.h +++ b/src/net/include/server_thread.h @@ -150,6 +150,8 @@ class ServerThread : public Thread { // Move into server thread virtual void MoveConnIn(std::shared_ptr conn, const NotifyType& type) = 0; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + virtual void KillAllConns() = 0; virtual bool KillConn(const std::string& ip_port) = 0; diff --git a/src/net/src/backend_thread.cc b/src/net/src/backend_thread.cc index b0eaa53687..27389293d7 100644 --- a/src/net/src/backend_thread.cc +++ b/src/net/src/backend_thread.cc @@ -48,6 +48,8 @@ int BackendThread::StartThread() { if (res) { return res; } + set_thread_name("BackendThread"); + return Thread::StartThread(); } diff --git a/src/net/src/client_thread.cc b/src/net/src/client_thread.cc index 916fd8f6ee..5561d6d3c0 100644 --- a/src/net/src/client_thread.cc +++ b/src/net/src/client_thread.cc @@ -47,6 +47,8 @@ int ClientThread::StartThread() { if (res) { return res; } + set_thread_name("ClientThread"); + return Thread::StartThread(); } diff --git a/src/net/src/dispatch_thread.cc b/src/net/src/dispatch_thread.cc index d98c44b68b..922688c178 100644 --- a/src/net/src/dispatch_thread.cc +++ b/src/net/src/dispatch_thread.cc @@ -66,7 +66,7 @@ int DispatchThread::StartThread() { // Adding timer tasks and run timertaskThread timerTaskThread_.AddTimerTask("blrpop_blocking_info_scan", 250, true, [this] { this->ScanExpiredBlockedConnsOfBlrpop(); }); - + timerTaskThread_.set_thread_name("TimerTaskThread"); timerTaskThread_.StartThread(); return ServerThread::StartThread(); } diff --git a/src/net/src/holy_thread.h b/src/net/src/holy_thread.h index 0b4f0d700b..312de4c84f 100644 --- a/src/net/src/holy_thread.h +++ b/src/net/src/holy_thread.h @@ -35,6 +35,8 @@ class HolyThread : public ServerThread { int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + void set_keepalive_timeout(int timeout) override { keepalive_timeout_ = timeout; } int conn_num() const override; diff --git a/src/net/src/net_thread_name.h b/src/net/src/net_thread_name.h index e85cd1a6df..5d8dc78db8 100644 --- a/src/net/src/net_thread_name.h +++ b/src/net/src/net_thread_name.h @@ -26,7 +26,7 @@ inline bool SetThreadName(pthread_t id, const std::string& name) { #else inline bool SetThreadName(pthread_t id, const std::string& name) { // printf ("no pthread_setname\n"); - return false; + return pthread_setname_np(name.c_str()) == 0; } #endif } // namespace net diff --git a/src/net/src/net_util.cc b/src/net/src/net_util.cc index 6f1f4692d0..7efbb0f6cd 100644 --- a/src/net/src/net_util.cc +++ b/src/net/src/net_util.cc @@ -126,6 +126,7 @@ int TimerTaskThread::StartThread() { // if there is no timer task registered, no need of start the thread return -1; } + set_thread_name("TimerTask"); LOG(INFO) << "TimerTaskThread Starting..."; return Thread::StartThread(); } diff --git a/src/net/src/net_util.h b/src/net/src/net_util.h index a6fcbdc932..fe96e0a950 100644 --- a/src/net/src/net_util.h +++ b/src/net/src/net_util.h @@ -80,6 +80,7 @@ class TimerTaskThread : public Thread { ~TimerTaskThread() override; int StartThread() override; int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } uint32_t AddTimerTask(const std::string& task_name, int interval_ms, bool repeat_exec, const std::function &task){ return timer_task_manager_.AddTimerTask(task_name, interval_ms, repeat_exec, task); diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index 4ea4b82125..8e20694244 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -8,6 +8,7 @@ #include +#include #include namespace net { @@ -24,7 +25,8 @@ int ThreadPool::Worker::start() { return -1; } else { start_.store(true); - SetThreadName(thread_id_, thread_pool_->thread_pool_name() + "Worker"); + std::string thread_id_str = std::to_string(reinterpret_cast(thread_id_)); + SetThreadName(thread_id_, thread_pool_->thread_pool_name() + "_Worker_" + thread_id_str); } } return 0; diff --git a/src/pika_admin.cc b/src/pika_admin.cc index bb52159dd6..c47a90649b 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1602,6 +1602,12 @@ void ConfigCmd::ConfigGet(std::string& ret) { EncodeString(&config_body, g_pika_conf->slotmigrate() ? "yes" : "no"); } + if (pstd::stringmatch(pattern.data(), "slow-cmd-pool", 1)) { + elements += 2; + EncodeString(&config_body, "slow-cmd-pool"); + EncodeString(&config_body, g_pika_conf->slow_cmd_pool() ? "yes" : "no"); + } + if (pstd::stringmatch(pattern.data(), "slotmigrate-thread-num", 1)!= 0) { elements += 2; EncodeString(&config_body, "slotmigrate-thread-num"); @@ -2143,6 +2149,7 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { "requirepass", "masterauth", "slotmigrate", + "slow-cmd-pool", "slotmigrate-thread-num", "thread-migrate-keys-num", "userpass", @@ -2302,6 +2309,19 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { } g_pika_conf->SetSlotMigrate(slotmigrate); res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slow_cmd_pool") { + bool SlowCmdPool; + if (value == "yes") { + SlowCmdPool = true; + } else if (value == "no") { + SlowCmdPool = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slow-cmd-pool'\r\n"); + return; + } + g_pika_conf->SetSlowCmdPool(SlowCmdPool); + g_pika_server->SetSlowCmdThreadPoolFlag(SlowCmdPool); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "slowlog-log-slower-than") { if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-log-slower-than'\r\n"); diff --git a/src/pika_client_processor.cc b/src/pika_client_processor.cc index 8a26ccd4a4..5a1c60cee0 100644 --- a/src/pika_client_processor.cc +++ b/src/pika_client_processor.cc @@ -9,10 +9,6 @@ PikaClientProcessor::PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix) { pool_ = std::make_unique(worker_num, max_queue_size, name_prefix + "Pool"); - for (size_t i = 0; i < worker_num; ++i) { - bg_threads_.push_back(std::make_unique(max_queue_size)); - bg_threads_.back()->set_thread_name(name_prefix + "BgThread"); - } } PikaClientProcessor::~PikaClientProcessor() { @@ -24,29 +20,15 @@ int PikaClientProcessor::Start() { if (res != net::kSuccess) { return res; } - for (auto& bg_thread : bg_threads_) { - res = bg_thread->StartThread(); - if (res != net::kSuccess) { - return res; - } - } return res; } void PikaClientProcessor::Stop() { pool_->stop_thread_pool(); - for (auto & bg_thread : bg_threads_) { - bg_thread->StopThread(); - } } void PikaClientProcessor::SchedulePool(net::TaskFunc func, void* arg) { pool_->Schedule(func, arg); } -void PikaClientProcessor::ScheduleBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str) { - std::size_t index = std::hash{}(hash_str) % bg_threads_.size(); - bg_threads_[index]->Schedule(func, arg); -} - size_t PikaClientProcessor::ThreadPoolCurQueueSize() { size_t cur_size = 0; if (pool_) { diff --git a/src/pika_conf.cc b/src/pika_conf.cc index 4ca6710b60..3d54e3e895 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -66,6 +66,11 @@ int PikaConf::Load() { GetConfStr("slotmigrate", &smgrt); slotmigrate_.store(smgrt == "yes" ? true : false); + // slow cmd thread pool + std::string slowcmdpool; + GetConfStr("slow-cmd-pool", &slowcmdpool); + slow_cmd_pool_.store(slowcmdpool == "yes" ? true : false); + int binlog_writer_num = 1; GetConfInt("binlog-writer-num", &binlog_writer_num); if (binlog_writer_num <= 0 || binlog_writer_num > 24) { @@ -154,11 +159,11 @@ int PikaConf::Load() { } GetConfInt("slow-cmd-thread-pool-size", &slow_cmd_thread_pool_size_); - if (slow_cmd_thread_pool_size_ <= 0) { - slow_cmd_thread_pool_size_ = 12; + if (slow_cmd_thread_pool_size_ < 0) { + slow_cmd_thread_pool_size_ = 8; } - if (slow_cmd_thread_pool_size_ > 100) { - slow_cmd_thread_pool_size_ = 100; + if (slow_cmd_thread_pool_size_ > 50) { + slow_cmd_thread_pool_size_ = 50; } std::string slow_cmd_list; diff --git a/src/pika_repl_client.cc b/src/pika_repl_client.cc index 352fbdf7e5..2d53be265c 100644 --- a/src/pika_repl_client.cc +++ b/src/pika_repl_client.cc @@ -28,10 +28,16 @@ PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) { client_thread_ = std::make_unique(cron_interval, keepalive_timeout); client_thread_->set_thread_name("PikaReplClient"); for (int i = 0; i < g_pika_conf->sync_binlog_thread_num(); i++) { - write_binlog_workers_.emplace_back(std::make_unique(PIKA_SYNC_BUFFER_SIZE)); + auto new_binlog_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string binlog_worker_name = "ReplBinlogWorker" + std::to_string(i); + new_binlog_worker->SetThreadName(binlog_worker_name); + write_binlog_workers_.emplace_back(std::move(new_binlog_worker)); } for (int i = 0; i < g_pika_conf->sync_thread_num(); ++i) { - write_db_workers_.emplace_back(std::make_unique(PIKA_SYNC_BUFFER_SIZE)); + auto new_db_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string db_worker_name = "ReplWriteDBWorker" + std::to_string(i); + new_db_worker->SetThreadName(db_worker_name); + write_db_workers_.emplace_back(std::move(new_db_worker)); } } diff --git a/src/pika_repl_server.cc b/src/pika_repl_server.cc index a99fc18047..b92d239b18 100644 --- a/src/pika_repl_server.cc +++ b/src/pika_repl_server.cc @@ -17,7 +17,7 @@ extern PikaServer* g_pika_server; extern std::unique_ptr g_pika_rm; PikaReplServer::PikaReplServer(const std::set& ips, int port, int cron_interval) { - server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000); + server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000, "PikaReplServer"); pika_repl_server_thread_ = std::make_unique(ips, port, cron_interval); pika_repl_server_thread_->set_thread_name("PikaReplServer"); } @@ -27,6 +27,7 @@ PikaReplServer::~PikaReplServer() { } int PikaReplServer::Start() { + pika_repl_server_thread_->set_thread_name("PikaReplServer"); int res = pika_repl_server_thread_->StartThread(); if (res != net::kSuccess) { LOG(FATAL) << "Start Pika Repl Server Thread Error: " << res diff --git a/src/pika_server.cc b/src/pika_server.cc index 5c3aae16df..eaa73e5749 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -43,6 +43,7 @@ void DoPurgeDir(void* arg) { PikaServer::PikaServer() : exit_(false), + slow_cmd_thread_pool_flag_(g_pika_conf->slow_cmd_pool()), last_check_compact_time_({0, 0}), last_check_resume_time_({0, 0}), repl_state_(PIKA_REPL_NO_CONNECT), @@ -100,6 +101,7 @@ PikaServer::PikaServer() } acl_ = std::make_unique<::Acl>(); + SetSlowCmdThreadPoolFlag(g_pika_conf->slow_cmd_pool()); } PikaServer::~PikaServer() { @@ -166,12 +168,6 @@ void PikaServer::Start() { LOG(FATAL) << "Start PikaClientProcessor Error: " << ret << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } - ret = pika_slow_cmd_thread_pool_->start_thread_pool(); - if (ret != net::kSuccess) { - dbs_.clear(); - LOG(FATAL) << "Start PikaLowLevelThreadPool Error: " << ret - << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); - } ret = pika_dispatch_thread_->StartThread(); if (ret != net::kSuccess) { dbs_.clear(); @@ -205,6 +201,24 @@ void PikaServer::Start() { LOG(INFO) << "Goodbye..."; } +void PikaServer::SetSlowCmdThreadPoolFlag(bool flag) { + slow_cmd_thread_pool_flag_ = flag; + int ret = 0; + if (flag) { + ret = pika_slow_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(ERROR) << "Start PikaLowLevelThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } else { + while (SlowCmdThreadPoolCurQueueSize() != 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + pika_slow_cmd_thread_pool_->stop_thread_pool(); + } +} + void PikaServer::Exit() { g_pika_server->DisableCompact(); exit_mutex_.unlock(); @@ -707,17 +721,13 @@ void PikaServer::SetFirstMetaSync(bool v) { } void PikaServer::ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd) { - if (is_slow_cmd) { + if (is_slow_cmd && g_pika_conf->slow_cmd_pool()) { pika_slow_cmd_thread_pool_->Schedule(func, arg); return; } pika_client_processor_->SchedulePool(func, arg); } -void PikaServer::ScheduleClientBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str) { - pika_client_processor_->ScheduleBgThreads(func, arg, hash_str); -} - size_t PikaServer::ClientProcessorThreadPoolCurQueueSize() { if (!pika_client_processor_) { return 0; @@ -749,11 +759,13 @@ size_t PikaServer::SlowCmdThreadPoolMaxQueueSize() { } void PikaServer::BGSaveTaskSchedule(net::TaskFunc func, void* arg) { + bgsave_thread_.set_thread_name("BGSaveTask"); bgsave_thread_.StartThread(); bgsave_thread_.Schedule(func, arg); } void PikaServer::PurgelogsTaskSchedule(net::TaskFunc func, void* arg) { + purge_thread_.set_thread_name("PurgelogsTask"); purge_thread_.StartThread(); purge_thread_.Schedule(func, arg); } @@ -764,6 +776,7 @@ void PikaServer::PurgeDir(const std::string& path) { } void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) { + purge_thread_.set_thread_name("PurgeDirTask"); purge_thread_.StartThread(); purge_thread_.Schedule(function, arg); } @@ -814,6 +827,7 @@ void PikaServer::TryDBSync(const std::string& ip, int port, const std::string& d } void PikaServer::KeyScanTaskSchedule(net::TaskFunc func, void* arg) { + key_scan_thread_.set_thread_name("KeyScanTask"); key_scan_thread_.StartThread(); key_scan_thread_.Schedule(func, arg); } @@ -1453,6 +1467,7 @@ void PikaServer::Bgslotsreload(const std::shared_ptr& db) { LOG(INFO) << "Start slot reloading"; // Start new thread if needed + bgsave_thread_.set_thread_name("SlotsReload"); bgsave_thread_.StartThread(); bgsave_thread_.Schedule(&DoBgslotsreload, static_cast(this)); } @@ -1520,6 +1535,7 @@ void PikaServer::Bgslotscleanup(std::vector cleanupSlots, const std::shared LOG(INFO) << "Start slot cleanup, slots: " << slotsStr << std::endl; // Start new thread if needed + bgslots_cleanup_thread_.set_thread_name("SlotsCleanup"); bgslots_cleanup_thread_.StartThread(); bgslots_cleanup_thread_.Schedule(&DoBgslotscleanup, static_cast(this)); } @@ -1624,7 +1640,7 @@ void DoBgslotscleanup(void* arg) { void PikaServer::ResetCacheAsync(uint32_t cache_num, std::shared_ptr db, cache::CacheConfig *cache_cfg) { if (PIKA_CACHE_STATUS_OK == db->cache()->CacheStatus() || PIKA_CACHE_STATUS_NONE == db->cache()->CacheStatus()) { - + common_bg_thread_.set_thread_name("ResetCacheTask"); common_bg_thread_.StartThread(); BGCacheTaskArg *arg = new BGCacheTaskArg(); arg->db = db; @@ -1648,7 +1664,7 @@ void PikaServer::ClearCacheDbAsync(std::shared_ptr db) { LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); return; } - + common_bg_thread_.set_thread_name("CacheClearThread"); common_bg_thread_.StartThread(); BGCacheTaskArg *arg = new BGCacheTaskArg(); arg->db = db; @@ -1716,7 +1732,7 @@ void PikaServer::ClearCacheDbAsyncV2(std::shared_ptr db) { LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); return; } - + common_bg_thread_.set_thread_name("V2CacheClearThread"); common_bg_thread_.StartThread(); BGCacheTaskArg *arg = new BGCacheTaskArg(); arg->db = db; diff --git a/src/rsync_client.cc b/src/rsync_client.cc index 0cf683ba75..7def7cbadc 100644 --- a/src/rsync_client.cc +++ b/src/rsync_client.cc @@ -28,6 +28,7 @@ RsyncClient::RsyncClient(const std::string& dir, const std::string& db_name) parallel_num_(g_pika_conf->max_rsync_parallel_num()) { wo_mgr_.reset(new WaitObjectManager()); client_thread_ = std::make_unique(3000, 60, wo_mgr_.get()); + client_thread_->set_thread_name("RsyncClientThread"); work_threads_.resize(GetParallelNum()); finished_work_cnt_.store(0); } diff --git a/src/rsync_server.cc b/src/rsync_server.cc index ea339af59c..5696719980 100644 --- a/src/rsync_server.cc +++ b/src/rsync_server.cc @@ -31,7 +31,7 @@ void RsyncWriteResp(RsyncService::RsyncResponse& response, std::shared_ptr& ips, const int port) { - work_thread_ = std::make_unique(2, 100000); + work_thread_ = std::make_unique(2, 100000, "RsyncServerWork"); rsync_server_thread_ = std::make_unique(ips, port, 1 * 1000, this); } @@ -46,6 +46,7 @@ void RsyncServer::Schedule(net::TaskFunc func, void* arg) { int RsyncServer::Start() { LOG(INFO) << "start RsyncServer ..."; + rsync_server_thread_->set_thread_name("RsyncServerThread"); int res = rsync_server_thread_->StartThread(); if (res != net::kSuccess) { LOG(FATAL) << "Start rsync Server Thread Error. ret_code: " << res << " message: " From 55de8b392b99bb86b9bd1a699e7e922e794c6600 Mon Sep 17 00:00:00 2001 From: cheniujh <41671101+cheniujh@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:51:24 +0800 Subject: [PATCH 16/25] fix: Pika can not exec full-sync when multi slaves connect to the same master within a short time (#2746) * use int64_t instead of int32_t --------- Co-authored-by: cjh <1271435567@qq.com> --- src/pika_server.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pika_server.cc b/src/pika_server.cc index eaa73e5749..ccdd64499f 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -820,7 +820,8 @@ void PikaServer::TryDBSync(const std::string& ip, int port, const std::string& d std::string logger_filename = sync_db->Logger()->filename(); if (pstd::IsDir(bgsave_info.path) != 0 || !pstd::FileExists(NewFileName(logger_filename, bgsave_info.offset.b_offset.filenum)) || - top - bgsave_info.offset.b_offset.filenum > kDBSyncMaxGap) { + static_cast(top) - static_cast(bgsave_info.offset.b_offset.filenum) > + static_cast(kDBSyncMaxGap)) { // Need Bgsave first db->BgSaveDB(); } From 6f85ab51bc71dafe62e9ba35f3c521e76a881f7f Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Thu, 20 Jun 2024 21:22:20 +0800 Subject: [PATCH 17/25] fix: keyspace causes heap-buffer-overflow (#2749) * fix keyspace error about heap-buffer-overflow * fix by ai review comments --------- Co-authored-by: wangshaoyi --- src/pika_admin.cc | 13 +++++++------ src/storage/src/base_value_format.h | 1 + src/storage/src/redis.cc | 4 ++-- src/storage/src/storage.cc | 2 +- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/pika_admin.cc b/src/pika_admin.cc index c47a90649b..9e974bd7c1 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1190,9 +1190,9 @@ void InfoCmd::InfoKeyspace(std::string& info) { if (argv_.size() > 1 && strcasecmp(argv_[1].data(), kAllSection.data()) == 0) { tmp_stream << "# Start async statistics\r\n"; } else if (argv_.size() == 3 && strcasecmp(argv_[1].data(), kKeyspaceSection.data()) == 0) { - tmp_stream << "# Start async statistics\r\n"; + tmp_stream << "# Start async statistics\r\n"; } else { - tmp_stream << "# Use \"info keyspace 1\" to do async statistics\r\n"; + tmp_stream << "# Use \"info keyspace 1\" to do async statistics\r\n"; } std::shared_lock rwl(g_pika_server->dbs_rw_); for (const auto& db_item : g_pika_server->dbs_) { @@ -1201,7 +1201,8 @@ void InfoCmd::InfoKeyspace(std::string& info) { key_scan_info = db_item.second->GetKeyScanInfo(); key_infos = key_scan_info.key_infos; duration = key_scan_info.duration; - if (key_infos.size() != (size_t)(storage::DataType::kNones)) { + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + LOG(ERROR) << "key_infos size is not equal with expected, potential data inconsistency"; info.append("info keyspace error\r\n"); return; } @@ -1216,7 +1217,7 @@ void InfoCmd::InfoKeyspace(std::string& info) { tmp_stream << "# Duration: " << std::to_string(duration) + "s" << "\r\n"; } - + tmp_stream << db_name << " Strings_keys=" << key_infos[0].keys << ", expires=" << key_infos[0].expires << ", invalid_keys=" << key_infos[0].invaild_keys << "\r\n"; tmp_stream << db_name << " Hashes_keys=" << key_infos[1].keys << ", expires=" << key_infos[1].expires @@ -2911,8 +2912,8 @@ void DbsizeCmd::Do() { } KeyScanInfo key_scan_info = dbs->GetKeyScanInfo(); std::vector key_infos = key_scan_info.key_infos; - if (key_infos.size() != (size_t)(storage::DataType::kNones)) { - res_.SetRes(CmdRes::kErrOther, "keyspace error"); + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + res_.SetRes(CmdRes::kErrOther, "Mismatch in expected data types and actual key info count"); return; } uint64_t dbsize = 0; diff --git a/src/storage/src/base_value_format.h b/src/storage/src/base_value_format.h index 4663d3df12..3f0f181f97 100644 --- a/src/storage/src/base_value_format.h +++ b/src/storage/src/base_value_format.h @@ -19,6 +19,7 @@ namespace storage { enum class DataType : uint8_t { kStrings = 0, kHashes = 1, kSets = 2, kLists = 3, kZSets = 4, kStreams = 5, kNones = 6, kAll = 7 }; +constexpr int DataTypeNum = int(DataType::kNones); constexpr char DataTypeTag[] = { 'k', 'h', 's', 'l', 'z', 'x', 'n', 'a'}; constexpr char* DataTypeStrings[] = { "string", "hash", "set", "list", "zset", "streams", "none", "all"}; diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index b5bfb66bd4..8b796c111d 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -352,7 +352,7 @@ void Redis::SetCompactRangeOptions(const bool is_canceled) { default_compact_range_options_.canceled = new std::atomic(is_canceled); } else { default_compact_range_options_.canceled->store(is_canceled); - } + } } Status Redis::GetProperty(const std::string& property, uint64_t* out) { @@ -365,7 +365,7 @@ Status Redis::GetProperty(const std::string& property, uint64_t* out) { } Status Redis::ScanKeyNum(std::vector* key_infos) { - key_infos->resize(5); + key_infos->resize(DataTypeNum); rocksdb::Status s; s = ScanStringsKeyNum(&((*key_infos)[0])); if (!s.ok()) { diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index ff4378367d..53454cec53 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1828,7 +1828,7 @@ uint64_t Storage::GetProperty(const std::string& property) { Status Storage::GetKeyNum(std::vector* key_infos) { KeyInfo key_info; - key_infos->resize(size_t(DataType::kNones)); + key_infos->resize(DataTypeNum); for (const auto& db : insts_) { std::vector db_key_infos; // check the scanner was stopped or not, before scanning the next db From 1c1c113da2e1ca312f2ca9403e44cb402c63fbad Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Thu, 20 Jun 2024 21:26:21 +0800 Subject: [PATCH 18/25] fix:not correct used bgsave_info_ (#2745) * fix:not correct used bgsave_info_ * fix:not correct used bgsave_info_ --------- Co-authored-by: chejinge --- include/pika_server.h | 29 ++++++++++++++--------------- src/pika_server.cc | 8 ++++---- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/include/pika_server.h b/include/pika_server.h index 480ba5c17e..02aaad1bfa 100644 --- a/include/pika_server.h +++ b/include/pika_server.h @@ -310,8 +310,7 @@ class PikaServer : public pstd::noncopyable { bool SlotsMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slots, int64_t keys_num, const std::shared_ptr& db); void GetSlotsMgrtSenderStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, int64_t *remained); bool SlotsMigrateAsyncCancel(); - std::shared_mutex bgsave_protector_; - BgSaveInfo bgsave_info_; + std::shared_mutex bgslots_protector_; /* * BGSlotsReload used @@ -337,28 +336,28 @@ class PikaServer : public pstd::noncopyable { BGSlotsReload bgslots_reload_; BGSlotsReload bgslots_reload() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_reload_; } bool GetSlotsreloading() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_reload_.reloading; } void SetSlotsreloading(bool reloading) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_reload_.reloading = reloading; } void SetSlotsreloadingCursor(int64_t cursor) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_reload_.cursor = cursor; } int64_t GetSlotsreloadingCursor() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_reload_.cursor; } void SetSlotsreloadingEndTime() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_reload_.end_time = time(nullptr); } void Bgslotsreload(const std::shared_ptr& db); @@ -399,33 +398,33 @@ class PikaServer : public pstd::noncopyable { net::BGThread bgslots_cleanup_thread_; BGSlotsCleanup bgslots_cleanup() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_cleanup_; } bool GetSlotscleaningup() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_cleanup_.cleaningup; } void SetSlotscleaningup(bool cleaningup) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cleaningup = cleaningup; } void SetSlotscleaningupCursor(int64_t cursor) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cursor = cursor; } void SetCleanupSlots(std::vector cleanup_slots) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); } std::vector GetCleanupSlots() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_cleanup_.cleanup_slots; } void Bgslotscleanup(std::vector cleanup_slots, const std::shared_ptr& db); void StopBgslotscleanup() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cleaningup = false; std::vector cleanup_slots; bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); diff --git a/src/pika_server.cc b/src/pika_server.cc index ccdd64499f..450a180012 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -1449,8 +1449,8 @@ bool PikaServer::SlotsMigrateAsyncCancel() { void PikaServer::Bgslotsreload(const std::shared_ptr& db) { // Only one thread can go through { - std::lock_guard ml(bgsave_protector_); - if (bgslots_reload_.reloading || bgsave_info_.bgsaving) { + std::lock_guard ml(bgslots_protector_); + if (bgslots_reload_.reloading || db->IsBgSaving()) { return; } bgslots_reload_.reloading = true; @@ -1514,8 +1514,8 @@ void DoBgslotsreload(void* arg) { void PikaServer::Bgslotscleanup(std::vector cleanupSlots, const std::shared_ptr& db) { // Only one thread can go through { - std::lock_guard ml(bgsave_protector_); - if (bgslots_cleanup_.cleaningup || bgslots_reload_.reloading || bgsave_info_.bgsaving) { + std::lock_guard ml(bgslots_protector_); + if (bgslots_cleanup_.cleaningup || bgslots_reload_.reloading || db->IsBgSaving()) { return; } bgslots_cleanup_.cleaningup = true; From 8dea10f4262c0703c71635f3a171c03d12e19495 Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:02:20 +0800 Subject: [PATCH 19/25] fix repleat get meta from rocksdb in ttl/persist/expire/expireat api (#2744) Co-authored-by: wangshaoyi --- src/storage/src/redis.h | 72 +++---- src/storage/src/redis_hashes.cc | 241 ++++++++++++++------- src/storage/src/redis_lists.cc | 240 +++++++++++++++------ src/storage/src/redis_sets.cc | 276 +++++++++++++++++------- src/storage/src/redis_streams.cc | 56 +++-- src/storage/src/redis_strings.cc | 356 +++++++++++++++++++------------ src/storage/src/redis_zsets.cc | 276 +++++++++++++++++------- src/storage/src/storage.cc | 5 +- 8 files changed, 1026 insertions(+), 496 deletions(-) diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index ad8906ba0c..d818fc3e71 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -117,36 +117,36 @@ class Redis { Status ScanStreamsKeyNum(KeyInfo* key_info); // Keys Commands - virtual Status StringsExpire(const Slice& key, int64_t ttl); - virtual Status HashesExpire(const Slice& key, int64_t ttl); - virtual Status ListsExpire(const Slice& key, int64_t ttl); - virtual Status ZsetsExpire(const Slice& key, int64_t ttl); - virtual Status SetsExpire(const Slice& key, int64_t ttl); - - virtual Status StringsDel(const Slice& key); - virtual Status HashesDel(const Slice& key); - virtual Status ListsDel(const Slice& key); - virtual Status ZsetsDel(const Slice& key); - virtual Status SetsDel(const Slice& key); - virtual Status StreamsDel(const Slice& key); - - virtual Status StringsExpireat(const Slice& key, int64_t timestamp); - virtual Status HashesExpireat(const Slice& key, int64_t timestamp); - virtual Status ListsExpireat(const Slice& key, int64_t timestamp); - virtual Status SetsExpireat(const Slice& key, int64_t timestamp); - virtual Status ZsetsExpireat(const Slice& key, int64_t timestamp); - - virtual Status StringsPersist(const Slice& key); - virtual Status HashesPersist(const Slice& key); - virtual Status ListsPersist(const Slice& key); - virtual Status ZsetsPersist(const Slice& key); - virtual Status SetsPersist(const Slice& key); - - virtual Status StringsTTL(const Slice& key, int64_t* timestamp); - virtual Status HashesTTL(const Slice& key, int64_t* timestamp); - virtual Status ListsTTL(const Slice& key, int64_t* timestamp); - virtual Status ZsetsTTL(const Slice& key, int64_t* timestamp); - virtual Status SetsTTL(const Slice& key, int64_t* timestamp); + virtual Status StringsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status HashesExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status ListsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status ZsetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status SetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + + virtual Status StringsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status HashesDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ListsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ZsetsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status SetsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status StreamsDel(const Slice& key, std::string&& prefetch_meta = {}); + + virtual Status StringsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status HashesExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status ListsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status SetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status ZsetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + + virtual Status StringsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status HashesPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ListsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ZsetsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status SetsPersist(const Slice& key, std::string&& prefetch_meta = {}); + + virtual Status StringsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status HashesTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status ListsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status ZsetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status SetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); // Strings Commands Status Append(const Slice& key, const Slice& value, int32_t* ret); @@ -200,7 +200,7 @@ class Redis { Status HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret); Status HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value); Status HKeys(const Slice& key, std::vector* fields); - Status HLen(const Slice& key, int32_t* ret); + Status HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); Status HMGet(const Slice& key, const std::vector& fields, std::vector* vss); Status HMSet(const Slice& key, const std::vector& fvs); Status HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); @@ -246,7 +246,7 @@ class Redis { // Sets Commands Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); - Status SCard(const Slice& key, int32_t* ret); + Status SCard(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); Status SDiff(const std::vector& keys, std::vector* members); Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); Status SInter(const std::vector& keys, std::vector* members); @@ -269,7 +269,7 @@ class Redis { Status LIndex(const Slice& key, int64_t index, std::string* element); Status LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, const std::string& value, int64_t* ret); - Status LLen(const Slice& key, uint64_t* len); + Status LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta = {}); Status LPop(const Slice& key, int64_t count, std::vector* elements); Status LPush(const Slice& key, const std::vector& values, uint64_t* ret); Status LPushx(const Slice& key, const std::vector& values, uint64_t* len); @@ -285,7 +285,7 @@ class Redis { // Zsets Commands Status ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret); - Status ZCard(const Slice& key, int32_t* card); + Status ZCard(const Slice& key, int32_t* card, std::string&& prefetch_meta = {}); Status ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); Status ZIncrby(const Slice& key, const Slice& member, double increment, double* ret); Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); @@ -323,7 +323,7 @@ class Redis { Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); Status XDel(const Slice& key, const std::vector& ids, int32_t& count); Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); - Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages, std::string&& prefetch_meta = {}); Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); Status XLen(const Slice& key, int32_t& len); Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, @@ -333,7 +333,7 @@ class Redis { rocksdb::ReadOptions& read_options); // get and parse the stream meta if found // @return ok only when the stream meta exists - Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options); + Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options, std::string&& prefetch_meta = {}); // Before calling this function, the caller should ensure that the ids are valid Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index e256757e43..03a3c1c9b8 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -88,7 +88,10 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -149,7 +152,10 @@ Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -186,7 +192,10 @@ Status Redis::HGetall(const Slice& key, std::vector* fvs) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -226,7 +235,10 @@ Status Redis::HGetallWithTTL(const Slice& key, std::vector* fvs, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -281,7 +293,10 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -370,7 +385,10 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -445,14 +463,16 @@ Status Redis::HKeys(const Slice& key, std::vector* fields) { ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - BaseMetaKey base_meta_key(key); Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -477,17 +497,25 @@ Status Redis::HKeys(const Slice& key, std::vector* fields) { return s; } -Status Redis::HLen(const Slice& key, int32_t* ret) { +Status Redis::HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta) { *ret = 0; - std::string meta_value; - - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + std::string meta_value(std::move(prefetch_meta)); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -523,7 +551,10 @@ Status Redis::HMGet(const Slice& key, const std::vector& fields, st if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -584,7 +615,10 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -656,7 +690,10 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -728,7 +765,10 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -788,7 +828,10 @@ Status Redis::HVals(const Slice& key, std::vector* values) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -848,7 +891,10 @@ Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -922,7 +968,10 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -983,14 +1032,16 @@ Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std return Status::InvalidArgument("error in given range"); } - BaseMetaKey base_meta_key(key); Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1052,14 +1103,16 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st return Status::InvalidArgument("error in given range"); } - BaseMetaKey base_meta_key(key); Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1103,17 +1156,25 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st return Status::OK(); } -Status Redis::HashesExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +Status Redis::HashesExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1135,17 +1196,25 @@ Status Redis::HashesExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::HashesDel(const Slice& key) { - std::string meta_value; +Status Redis::HashesDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1164,17 +1233,25 @@ Status Redis::HashesDel(const Slice& key) { return s; } -Status Redis::HashesExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +Status Redis::HashesExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1195,17 +1272,25 @@ Status Redis::HashesExpireat(const Slice& key, int64_t timestamp) { return s; } -Status Redis::HashesPersist(const Slice& key) { - std::string meta_value; +Status Redis::HashesPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1227,16 +1312,24 @@ Status Redis::HashesPersist(const Slice& key) { return s; } -Status Redis::HashesTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +Status Redis::HashesTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + Status s; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/redis_lists.cc b/src/storage/src/redis_lists.cc index 1998a76d23..db007ee2cf 100644 --- a/src/storage/src/redis_lists.cc +++ b/src/storage/src/redis_lists.cc @@ -72,7 +72,10 @@ Status Redis::LIndex(const Slice& key, int64_t index, std::string* element) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -113,7 +116,10 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -211,17 +217,25 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co return s; } -Status Redis::LLen(const Slice& key, uint64_t* len) { +Status Redis::LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta) { *len = 0; - std::string meta_value; + Status s; - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + // meta_value is empty means no meta value get before, + // we should get meta first + std::string meta_value(std::move(prefetch_meta)); + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -253,7 +267,10 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -308,7 +325,10 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -361,7 +381,10 @@ Status Redis::LPushx(const Slice& key, const std::vector& values, u if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -402,7 +425,10 @@ Status Redis::LRange(const Slice& key, int64_t start, int64_t stop, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -459,7 +485,10 @@ Status Redis::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std:: if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -526,7 +555,10 @@ Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -655,7 +687,10 @@ Status Redis::LSet(const Slice& key, int64_t index, const Slice& value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -696,7 +731,10 @@ Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -766,7 +804,10 @@ Status Redis::RPop(const Slice& key, int64_t count, std::vector* el if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -820,7 +861,10 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + destination.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -872,7 +916,10 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (ExpectedStale(source_meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + source.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(source_meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + source.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(source_meta_value))]); } } if (s.ok()) { @@ -907,7 +954,10 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (ExpectedStale(destination_meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + destination.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(destination_meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(destination_meta_value))]); } } if (s.ok()) { @@ -961,7 +1011,10 @@ Status Redis::RPush(const Slice& key, const std::vector& values, ui if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1014,7 +1067,10 @@ Status Redis::RPushx(const Slice& key, const std::vector& values, u if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1041,17 +1097,25 @@ Status Redis::RPushx(const Slice& key, const std::vector& values, u return s; } -Status Redis::ListsExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +Status Redis::ListsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1073,17 +1137,25 @@ Status Redis::ListsExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::ListsDel(const Slice& key) { - std::string meta_value; +Status Redis::ListsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1102,17 +1174,25 @@ Status Redis::ListsDel(const Slice& key) { return s; } -Status Redis::ListsExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +Status Redis::ListsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1133,16 +1213,25 @@ Status Redis::ListsExpireat(const Slice& key, int64_t timestamp) { return s; } -Status Redis::ListsPersist(const Slice& key) { - std::string meta_value; +Status Redis::ListsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1152,8 +1241,8 @@ Status Redis::ListsPersist(const Slice& key) { } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - uint64_t timestamp = parsed_lists_meta_value.Etime(); - if (timestamp == 0) { + // Check if the list has set expiration time before attempting to persist + if (parsed_lists_meta_value.Etime() == 0) { return Status::NotFound("Not have an associated timeout"); } else { parsed_lists_meta_value.SetEtime(0); @@ -1164,16 +1253,24 @@ Status Redis::ListsPersist(const Slice& key) { return s; } -Status Redis::ListsTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +Status Redis::ListsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1185,6 +1282,7 @@ Status Redis::ListsTTL(const Slice& key, int64_t* timestamp) { *timestamp = -2; return Status::NotFound(); } else { + // Return -1 for lists with no set expiration, and calculate remaining time for others *timestamp = parsed_lists_meta_value.Etime(); if (*timestamp == 0) { *timestamp = -1; diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index 9fc400d039..db5044b440 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -83,7 +83,10 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -146,19 +149,25 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me return db_->Write(default_write_options_, &batch); } -rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret) { +rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret, std::string&& meta) { *ret = 0; - std::string meta_value; - - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + std::string meta_value(std::move(meta)); + rocksdb::Status s; + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } + if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -195,7 +204,10 @@ rocksdb::Status Redis::SDiff(const std::vector& keys, std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -214,7 +226,10 @@ rocksdb::Status Redis::SDiff(const std::vector& keys, std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -280,7 +295,10 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -300,7 +318,10 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -346,7 +367,10 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -401,7 +425,10 @@ rocksdb::Status Redis::SInter(const std::vector& keys, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + keys[idx] + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -424,7 +451,10 @@ rocksdb::Status Redis::SInter(const std::vector& keys, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + keys[0] + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -496,7 +526,10 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -523,7 +556,10 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -575,7 +611,10 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -624,7 +663,10 @@ rocksdb::Status Redis::SIsmember(const Slice& key, const Slice& member, int32_t* if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -661,14 +703,20 @@ rocksdb::Status Redis::SMembers(const Slice& key, std::vector* memb if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -709,7 +757,10 @@ Status Redis::SMembersWithTTL(const Slice& key, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -768,7 +819,10 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + source.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + source.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -811,7 +865,10 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + destination.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -872,7 +929,10 @@ rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -986,7 +1046,10 @@ rocksdb::Status Redis::SRandmember(const Slice& key, int32_t count, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1057,7 +1120,10 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1121,7 +1187,10 @@ rocksdb::Status Redis::SUnion(const std::vector& keys, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1178,7 +1247,10 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1217,7 +1289,10 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1274,7 +1349,10 @@ rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1329,17 +1407,25 @@ rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string return rocksdb::Status::OK(); } -rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1361,17 +1447,25 @@ rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl) { return s; } -rocksdb::Status Redis::SetsDel(const Slice& key) { - std::string meta_value; +rocksdb::Status Redis::SetsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - + rocksdb::Status s; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1390,17 +1484,25 @@ rocksdb::Status Redis::SetsDel(const Slice& key) { return s; } -rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1421,17 +1523,25 @@ rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp) { return s; } -rocksdb::Status Redis::SetsPersist(const Slice& key) { - std::string meta_value; +rocksdb::Status Redis::SetsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1453,16 +1563,24 @@ rocksdb::Status Redis::SetsPersist(const Slice& key) { return s; } -rocksdb::Status Redis::SetsTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +rocksdb::Status Redis::SetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/redis_streams.cc b/src/storage/src/redis_streams.cc index 47942244c5..606fb99c05 100644 --- a/src/storage/src/redis_streams.cc +++ b/src/storage/src/redis_streams.cc @@ -171,11 +171,11 @@ Status Redis::XDel(const Slice& key, const std::vector& ids, int32_t& return s; } } - + return db_->Put(default_write_options_, handles_[kMetaCF], BaseMetaKey(key).Encode(), stream_meta.value()); } -Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { +Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values, std::string&& prefetch_meta) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -184,7 +184,7 @@ Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vectorGet(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kStreams, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStreams)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStreams, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kStreams)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -387,15 +396,24 @@ Status Redis::StreamsDel(const Slice& key) { } Status Redis::GetStreamMeta(StreamMetaValue& stream_meta, const rocksdb::Slice& key, - rocksdb::ReadOptions& read_options) { - std::string value; + rocksdb::ReadOptions& read_options, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - auto s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStreams, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStreams)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStreams, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kStreams)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index cab41de9aa..007b92f05a 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -74,7 +74,10 @@ Status Redis::Append(const Slice& key, const Slice& value, int32_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -127,7 +130,10 @@ Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offse if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -230,7 +236,10 @@ Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + dest_key + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + dest_key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -273,7 +282,10 @@ Status Redis::Decrby(const Slice& key, int64_t value, int64_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -321,7 +333,10 @@ Status Redis::Get(const Slice& key, std::string* value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -390,7 +405,10 @@ Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } @@ -435,7 +453,10 @@ Status Redis::GetBit(const Slice& key, int64_t offset, int32_t* ret) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -470,7 +491,10 @@ Status Redis::Getrange(const Slice& key, int64_t start_offset, int64_t end_offse if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -512,7 +536,10 @@ Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -571,7 +598,10 @@ Status Redis::GetSet(const Slice& key, const Slice& value, std::string* old_valu if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -600,7 +630,10 @@ Status Redis::Incrby(const Slice& key, int64_t value, int64_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -652,7 +685,10 @@ Status Redis::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -714,20 +750,14 @@ Status Redis::MSetnx(const std::vector& kvs, int32_t* ret) { for (const auto & kv : kvs) { BaseKey base_key(kv.key); s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + kv.key + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); - } + if (!s.ok() && !s.IsNotFound()) { + return s; } - if (s.ok()) { - ParsedStringsValue parsed_strings_value(&value); - if (!parsed_strings_value.IsStale()) { - exists = true; - break; - } + if (s.ok() && !ExpectedStale(value)) { + exists = true; + break; } + // when reaches here, either s is not found or s is ok but expired } if (!exists) { s = MSet(kvs); @@ -758,7 +788,10 @@ Status Redis::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -795,7 +828,10 @@ Status Redis::SetBit(const Slice& key, int64_t offset, int32_t on, int32_t* ret) if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok() || s.IsNotFound()) { @@ -860,34 +896,22 @@ Status Redis::Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { - if (ExpectedStale(old_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); - } + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (s.ok() && !ExpectedStale(old_value)) { + return s; + } + // when reaches here, either s is not found or s is ok but expired + s = Status::NotFound(); + + StringsValue strings_value(value); + if (ttl > 0) { + strings_value.SetRelativeTimestamp(ttl); } + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); if (s.ok()) { - ParsedStringsValue parsed_strings_value(&old_value); - if (parsed_strings_value.IsStale()) { - StringsValue strings_value(value); - if (ttl > 0) { - strings_value.SetRelativeTimestamp(ttl); - } - s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); - if (s.ok()) { - *ret = 1; - } - } - } else if (s.IsNotFound()) { - StringsValue strings_value(value); - if (ttl > 0) { - strings_value.SetRelativeTimestamp(ttl); - } - s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); - if (s.ok()) { - *ret = 1; - } + *ret = 1; } return s; } @@ -904,7 +928,10 @@ Status Redis::Setvx(const Slice& key, const Slice& value, const Slice& new_value if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -945,7 +972,10 @@ Status Redis::Delvx(const Slice& key, const Slice& value, int32_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -981,7 +1011,10 @@ Status Redis::Setrange(const Slice& key, int64_t start_offset, const Slice& valu if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -1087,7 +1120,10 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t* ret) { if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1131,7 +1167,10 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_ if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1188,7 +1227,10 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_ if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1254,17 +1296,25 @@ Status Redis::PKSetexAt(const Slice& key, const Slice& value, int64_t timestamp) return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } -Status Redis::StringsExpire(const Slice& key, int64_t ttl) { - std::string value; +Status Redis::StringsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1282,17 +1332,25 @@ Status Redis::StringsExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::StringsDel(const Slice& key) { - std::string value; +Status Redis::StringsDel(const Slice& key, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1305,17 +1363,25 @@ Status Redis::StringsDel(const Slice& key) { return s; } -Status Redis::StringsExpireat(const Slice& key, int64_t timestamp) { - std::string value; +Status Redis::StringsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1334,17 +1400,25 @@ Status Redis::StringsExpireat(const Slice& key, int64_t timestamp) { return s; } -Status Redis::StringsPersist(const Slice& key) { - std::string value; +Status Redis::StringsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1364,17 +1438,25 @@ Status Redis::StringsPersist(const Slice& key) { return s; } -Status Redis::StringsTTL(const Slice& key, int64_t* timestamp) { - std::string value; +Status Redis::StringsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1430,7 +1512,6 @@ void Redis::ScanStrings() { rocksdb::Status Redis::Exists(const Slice& key) { std::string meta_value; uint64_t llen = 0; - std::string value; int32_t ret = 0; BaseMetaKey base_meta_key(key); std::vector id_messages; @@ -1442,17 +1523,17 @@ rocksdb::Status Redis::Exists(const Slice& key) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SCard(key, &ret); + return SCard(key, &ret, std::move(meta_value)); case DataType::kZSets: - return ZCard(key, &ret); + return ZCard(key, &ret, std::move(meta_value)); case DataType::kHashes: - return HLen(key, &ret); + return HLen(key, &ret, std::move(meta_value)); case DataType::kLists: - return LLen(key, &llen); - case DataType::kStrings: - return Get(key, &value); + return LLen(key, &llen, std::move(meta_value)); case DataType::kStreams: - return XRange(key, arg, id_messages); + return XRange(key, arg, id_messages, std::move(meta_value)); + case DataType::kStrings: + return ExpectedStale(meta_value) ? rocksdb::Status::NotFound() : rocksdb::Status::OK(); default: return rocksdb::Status::NotFound(); } @@ -1468,17 +1549,17 @@ rocksdb::Status Redis::Del(const Slice& key) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsDel(key); + return SetsDel(key, std::move(meta_value)); case DataType::kZSets: - return ZsetsDel(key); + return ZsetsDel(key, std::move(meta_value)); case DataType::kHashes: - return HashesDel(key); + return HashesDel(key, std::move(meta_value)); case DataType::kLists: - return ListsDel(key); + return ListsDel(key, std::move(meta_value)); case DataType::kStrings: - return StringsDel(key); + return StringsDel(key, std::move(meta_value)); case DataType::kStreams: - return StreamsDel(key); + return StreamsDel(key, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1494,15 +1575,15 @@ rocksdb::Status Redis::Expire(const Slice& key, int64_t ttl) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsExpire(key, ttl); + return SetsExpire(key, ttl, std::move(meta_value)); case DataType::kZSets: - return ZsetsExpire(key, ttl); + return ZsetsExpire(key, ttl, std::move(meta_value)); case DataType::kHashes: - return HashesExpire(key, ttl); + return HashesExpire(key, ttl, std::move(meta_value)); case DataType::kLists: - return ListsExpire(key, ttl); + return ListsExpire(key, ttl, std::move(meta_value)); case DataType::kStrings: - return StringsExpire(key, ttl); + return StringsExpire(key, ttl, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1518,15 +1599,15 @@ rocksdb::Status Redis::Expireat(const Slice& key, int64_t ttl) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsExpireat(key, ttl); + return SetsExpireat(key, ttl, std::move(meta_value)); case DataType::kZSets: - return ZsetsExpireat(key, ttl); + return ZsetsExpireat(key, ttl, std::move(meta_value)); case DataType::kHashes: - return HashesExpireat(key, ttl); + return HashesExpireat(key, ttl, std::move(meta_value)); case DataType::kLists: - return ListsExpireat(key, ttl); + return ListsExpireat(key, ttl, std::move(meta_value)); case DataType::kStrings: - return StringsExpireat(key, ttl); + return StringsExpireat(key, ttl, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1542,15 +1623,15 @@ rocksdb::Status Redis::Persist(const Slice& key) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsPersist(key); + return SetsPersist(key, std::move(meta_value)); case DataType::kZSets: - return ZsetsPersist(key); + return ZsetsPersist(key, std::move(meta_value)); case DataType::kHashes: - return HashesPersist(key); + return HashesPersist(key, std::move(meta_value)); case DataType::kLists: - return ListsPersist(key); + return ListsPersist(key, std::move(meta_value)); case DataType::kStrings: - return StringsPersist(key); + return StringsPersist(key, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1566,15 +1647,15 @@ rocksdb::Status Redis::TTL(const Slice& key, int64_t* timestamp) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsTTL(key, timestamp); + return SetsTTL(key, timestamp, std::move(meta_value)); case DataType::kZSets: - return ZsetsTTL(key, timestamp); + return ZsetsTTL(key, timestamp, std::move(meta_value)); case DataType::kHashes: - return HashesTTL(key, timestamp); + return HashesTTL(key, timestamp, std::move(meta_value)); case DataType::kLists: - return ListsTTL(key, timestamp); + return ListsTTL(key, timestamp, std::move(meta_value)); case DataType::kStrings: - return StringsTTL(key, timestamp); + return StringsTTL(key, timestamp, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1597,6 +1678,9 @@ rocksdb::Status Redis::IsExist(const storage::Slice& key) { BaseMetaKey base_meta_key(key); rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { + if (ExpectedStale(meta_value)) { + return Status::NotFound(); + } return Status::OK(); } return rocksdb::Status::NotFound(); diff --git a/src/storage/src/redis_zsets.cc b/src/storage/src/redis_zsets.cc index 503d3710dc..ce89afe885 100644 --- a/src/storage/src/redis_zsets.cc +++ b/src/storage/src/redis_zsets.cc @@ -77,7 +77,10 @@ Status Redis::ZPopMax(const Slice& key, const int64_t count, std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -132,7 +135,10 @@ Status Redis::ZPopMin(const Slice& key, const int64_t count, std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -198,7 +204,10 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -284,20 +293,28 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe return s; } -Status Redis::ZCard(const Slice& key, int32_t* card) { +Status Redis::ZCard(const Slice& key, int32_t* card, std::string&& prefetch_meta) { *card = 0; - std::string meta_value; - + Status s; - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + // meta_value is empty means no meta value get before, + // we should get meta first + std::string meta_value(std::move(prefetch_meta)); + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } + if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -329,7 +346,10 @@ Status Redis::ZCount(const Slice& key, double min, double max, bool left_close, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -394,7 +414,10 @@ Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, d if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -470,7 +493,10 @@ Status Redis::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -525,7 +551,10 @@ Status Redis::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std:: if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -594,7 +623,10 @@ Status Redis::ZRangebyscore(const Slice& key, double min, double max, bool left_ if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -668,7 +700,10 @@ Status Redis::ZRank(const Slice& key, const Slice& member, int32_t* rank) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -727,7 +762,10 @@ Status Redis::ZRem(const Slice& key, const std::vector& members, in if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -787,7 +825,10 @@ Status Redis::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -852,7 +893,10 @@ Status Redis::ZRemrangebyscore(const Slice& key, double min, double max, bool le if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -931,7 +975,10 @@ Status Redis::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vect if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -986,7 +1033,10 @@ Status Redis::ZRevrangebyscore(const Slice& key, double min, double max, bool le if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1060,7 +1110,10 @@ Status Redis::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1111,7 +1164,10 @@ Status Redis::ZScore(const Slice& key, const Slice& member, double* score) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1155,7 +1211,10 @@ Status Redis::ZGetAll(const Slice& key, double weight, std::map(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1204,7 +1263,10 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1255,7 +1317,10 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1328,7 +1393,10 @@ Status Redis::ZInterstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1405,7 +1473,10 @@ Status Redis::ZInterstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1464,7 +1535,10 @@ Status Redis::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1534,7 +1608,10 @@ Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& ma if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1593,17 +1670,25 @@ Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& ma return s; } -Status Redis::ZsetsExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +Status Redis::ZsetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1624,17 +1709,25 @@ Status Redis::ZsetsExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::ZsetsDel(const Slice& key) { - std::string meta_value; +Status Redis::ZsetsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1653,17 +1746,25 @@ Status Redis::ZsetsDel(const Slice& key) { return s; } -Status Redis::ZsetsExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +Status Redis::ZsetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1708,7 +1809,10 @@ Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1767,17 +1871,25 @@ Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern return Status::OK(); } -Status Redis::ZsetsPersist(const Slice& key) { - std::string meta_value; +Status Redis::ZsetsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); ScopeRecordLock l(lock_mgr_, key); + Status s; - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1799,16 +1911,24 @@ Status Redis::ZsetsPersist(const Slice& key) { return s; } -Status Redis::ZsetsTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +Status Redis::ZsetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 53454cec53..6df8f6eacd 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -238,9 +238,8 @@ Status Storage::MSetnx(const std::vector& kvs, int32_t* ret) { Status s; for (const auto& kv : kvs) { auto& inst = GetDBInstance(kv.key); - std::string value; - s = inst->Get(Slice(kv.key), &value); - if (s.ok() || !s.IsNotFound()) { + s = inst->IsExist(Slice(kv.key)); + if (!s.IsNotFound()) { return s; } } From 3eb2e485be169a889b4f0d0d469f742323564d11 Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:50:19 +0800 Subject: [PATCH 20/25] fix: some streams errors such as pkpatternmatchdel etc (#2726) * fix pkpatternmatchdel error --------- Co-authored-by: wangshaoyi --- src/storage/src/base_filter.h | 26 +- src/storage/src/pika_stream_meta_value.h | 15 +- src/storage/src/redis_strings.cc | 7 +- src/storage/src/storage.cc | 5 +- src/storage/tests/keys_test.cc | 1022 +++++++++++----------- 5 files changed, 548 insertions(+), 527 deletions(-) diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index 3a092c109e..5dd17b09c6 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -16,6 +16,7 @@ #include "src/base_value_format.h" #include "src/base_meta_value_format.h" #include "src/lists_meta_value_format.h" +#include "src/pika_stream_meta_value.h" #include "src/strings_value_format.h" #include "src/zsets_data_key_format.h" #include "src/debug.h" @@ -36,11 +37,12 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { * The field designs of the remaining zset,set,hash and stream in meta-value * are the same, so the same filtering strategy is used */ + ParsedBaseKey parsed_key(key); auto type = static_cast(static_cast(value[0])); DEBUG("==========================START=========================="); if (type == DataType::kStrings) { ParsedStringsValue parsed_strings_value(value); - DEBUG("[StringsFilter] key: {}, value = {}, timestamp: {}, cur_time: {}", key.ToString().c_str(), + DEBUG("[string type] key: %s, value = %s, timestamp: %llu, cur_time: %llu", parsed_key.Key().ToString().c_str(), parsed_strings_value.UserValue().ToString().c_str(), parsed_strings_value.Etime(), cur_time); if (parsed_strings_value.Etime() != 0 && parsed_strings_value.Etime() < cur_time) { DEBUG("Drop[Stale]"); @@ -49,9 +51,17 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { DEBUG("Reserve"); return false; } + } else if (type == DataType::kStreams) { + ParsedStreamMetaValue parsed_stream_meta_value(value); + DEBUG("[stream meta type], key: %s, entries_added = %llu, first_id: %s, last_id: %s, version: %llu", + parsed_key.Key().ToString().c_str(), parsed_stream_meta_value.entries_added(), + parsed_stream_meta_value.first_id().ToString().c_str(), + parsed_stream_meta_value.last_id().ToString().c_str(), + parsed_stream_meta_value.version()); + return false; } else if (type == DataType::kLists) { ParsedListsMetaValue parsed_lists_meta_value(value); - DEBUG("[ListMetaFilter], key: {}, count = {}, timestamp: {}, cur_time: {}, version: {}", key.ToString().c_str(), + DEBUG("[list meta type], key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", parsed_key.Key().ToString().c_str(), parsed_lists_meta_value.Count(), parsed_lists_meta_value.Etime(), cur_time, parsed_lists_meta_value.Version()); @@ -68,8 +78,9 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { return false; } else { ParsedBaseMetaValue parsed_base_meta_value(value); - DEBUG("[MetaFilter] key: {}, count = {}, timestamp: {}, cur_time: {}, version: {}", key.ToString().c_str(), - parsed_base_meta_value.Count(), parsed_base_meta_value.Etime(), cur_time, parsed_base_meta_value.Version()); + DEBUG("[%s meta type] key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", + DataTypeToString(type), parsed_key.Key().ToString().c_str(), parsed_base_meta_value.Count(), + parsed_base_meta_value.Etime(), cur_time, parsed_base_meta_value.Version()); if (parsed_base_meta_value.Etime() != 0 && parsed_base_meta_value.Etime() < cur_time && parsed_base_meta_value.Version() < cur_time) { @@ -143,7 +154,12 @@ class BaseDataFilter : public rocksdb::CompactionFilter { auto type = static_cast(static_cast(meta_value[0])); if (type != type_) { return true; - } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kStreams || type == DataType::kZSets) { + } else if (type == DataType::kStreams) { + ParsedStreamMetaValue parsed_stream_meta_value(meta_value); + meta_not_found_ = false; + cur_meta_version_ = parsed_stream_meta_value.version(); + cur_meta_etime_ = 0; // stream do not support ttl + } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kZSets) { ParsedBaseMetaValue parsed_base_meta_value(&meta_value); meta_not_found_ = false; cur_meta_version_ = parsed_base_meta_value.Version(); diff --git a/src/storage/src/pika_stream_meta_value.h b/src/storage/src/pika_stream_meta_value.h index e010d5c830..d505eb9094 100644 --- a/src/storage/src/pika_stream_meta_value.h +++ b/src/storage/src/pika_stream_meta_value.h @@ -82,7 +82,8 @@ class StreamMetaValue { value_ = std::move(value); assert(value_.size() == kDefaultStreamValueLength); if (value_.size() != kDefaultStreamValueLength) { - LOG(ERROR) << "Invalid stream meta value length: "; + LOG(ERROR) << "Invalid stream meta value length: " << value_.size() + << " expected: " << kDefaultStreamValueLength; return; } char* pos = &value_[0]; @@ -215,7 +216,8 @@ class ParsedStreamMetaValue { ParsedStreamMetaValue(const Slice& value) { assert(value.size() == kDefaultStreamValueLength); if (value.size() != kDefaultStreamValueLength) { - LOG(ERROR) << "Invalid stream meta value length: "; + LOG(ERROR) << "Invalid stream meta value length: " << value.size() + << " expected: " << kDefaultStreamValueLength; return; } char* pos = const_cast(value.data()); @@ -294,7 +296,7 @@ class StreamCGroupMetaValue { uint64_t needed = kDefaultStreamCGroupValueLength; assert(value_.size() == 0); if (value_.size() != 0) { - LOG(FATAL) << "Init on a existed stream cgroup meta value!"; + LOG(ERROR) << "Init on a existed stream cgroup meta value!"; return; } value_.resize(needed); @@ -314,7 +316,8 @@ class StreamCGroupMetaValue { value_ = std::move(value); assert(value_.size() == kDefaultStreamCGroupValueLength); if (value_.size() != kDefaultStreamCGroupValueLength) { - LOG(FATAL) << "Invalid stream cgroup meta value length: "; + LOG(ERROR) << "Invalid stream cgroup meta value length: " << value_.size() + << " expected: " << kDefaultStreamValueLength; return; } if (value_.size() == kDefaultStreamCGroupValueLength) { @@ -373,7 +376,7 @@ class StreamConsumerMetaValue { value_ = std::move(value); assert(value_.size() == kDefaultStreamConsumerValueLength); if (value_.size() != kDefaultStreamConsumerValueLength) { - LOG(FATAL) << "Invalid stream consumer meta value length: " << value_.size() + LOG(ERROR) << "Invalid stream consumer meta value length: " << value_.size() << " expected: " << kDefaultStreamConsumerValueLength; return; } @@ -391,7 +394,7 @@ class StreamConsumerMetaValue { pel_ = pel; assert(value_.size() == 0); if (value_.size() != 0) { - LOG(FATAL) << "Invalid stream consumer meta value length: " << value_.size() << " expected: 0"; + LOG(ERROR) << "Invalid stream consumer meta value length: " << value_.size() << " expected: 0"; return; } uint64_t needed = kDefaultStreamConsumerValueLength; diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 007b92f05a..970695bf4b 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -1703,19 +1703,19 @@ rocksdb::Status Redis::PKPatternMatchDel(const std::string& pattern, int32_t* re rocksdb::WriteBatch batch; rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); iter->SeekToFirst(); - key = iter->key().ToString(); while (iter->Valid()) { auto meta_type = static_cast(static_cast(iter->value()[0])); ParsedBaseMetaKey parsed_meta_key(iter->key().ToString()); + key = iter->key().ToString(); + meta_value = iter->value().ToString(); + if (meta_type == DataType::kStrings) { - meta_value = iter->value().ToString(); ParsedStringsValue parsed_strings_value(&meta_value); if (!parsed_strings_value.IsStale() && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { batch.Delete(key); } } else if (meta_type == DataType::kLists) { - meta_value = iter->value().ToString(); ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (!parsed_lists_meta_value.IsStale() && (parsed_lists_meta_value.Count() != 0U) && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != @@ -1732,7 +1732,6 @@ rocksdb::Status Redis::PKPatternMatchDel(const std::string& pattern, int32_t* re batch.Put(handles_[kMetaCF], key, stream_meta_value.value()); } } else { - meta_value = iter->value().ToString(); ParsedBaseMetaValue parsed_meta_value(&meta_value); if (!parsed_meta_value.IsStale() && (parsed_meta_value.Count() != 0) && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 6df8f6eacd..ddeac6dd37 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1401,11 +1401,14 @@ Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, Status Storage::PKPatternMatchDel(const DataType& data_type, const std::string& pattern, int32_t* ret) { Status s; + *ret = 0; for (const auto& inst : insts_) { - s = inst->PKPatternMatchDel(pattern, ret); + int32_t tmp_ret = 0; + s = inst->PKPatternMatchDel(pattern, &tmp_ret); if (!s.ok()) { return s; } + *ret += tmp_ret; } return s; } diff --git a/src/storage/tests/keys_test.cc b/src/storage/tests/keys_test.cc index e7872c713b..4609da95f2 100644 --- a/src/storage/tests/keys_test.cc +++ b/src/storage/tests/keys_test.cc @@ -2095,517 +2095,517 @@ for (const auto& kv : kvs) { db.Compact(DataType::kAll, true); } -// TEST_F(KeysTest, PKPatternMatchDel) { -// int32_t ret; -// uint64_t ret64; -// int32_t delete_count; -// std::vector keys; -// std::map type_status; - -// //=============================== Strings =============================== - -// // ***************** Group 1 Test ***************** -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); -// s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY5")); -// s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY1_0xxx0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY3_0xxx0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY5_0xxx0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); -// s = db.PKPatternMatchDel(DataType::kStrings, "*0xxx0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ(keys[0], "GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0"); -// ASSERT_EQ(keys[1], "GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0"); -// ASSERT_EQ(keys[2], "GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); -// ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY5")); -// s = db.PKPatternMatchDel(DataType::kStrings, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// size_t gp5_total_kv = 23333; -// for (size_t idx = 0; idx < gp5_total_kv; ++idx) { -// db.Set("GP5_PKPATTERNMATCHDEL_STRING_KEY" + std::to_string(idx), "VALUE"); -// } -// s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp5_total_kv); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== Set =============================== - -// // ***************** Group 1 Test ***************** -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY5")); -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY2_0ooo0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY4_0ooo0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY6_0ooo0", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); -// db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY5_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY7_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0", {"M1"}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0")); -// db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); -// db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_set = 23333; -// for (size_t idx = 0; idx < gp6_total_set; ++idx) { -// db.SAdd("GP6_PKPATTERNMATCHDEL_SET_KEY" + std::to_string(idx), {"M1"}, &ret); -// } -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_set); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== Hashes =============================== - -// // ***************** Group 1 Test ***************** -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY5")); -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY2_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY4_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY6_0ooo0", "FIELD", "VALUE", &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); -// db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY1", {"FIELD"}, &ret); -// db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY3", {"FIELD"}, &ret); -// db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY5", {"FIELD"}, &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY5_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY7_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0", "FIELD", "VALUE", &ret); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0")); -// db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", {"FIELD"}, &ret); -// db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", {"FIELD"}, &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_hash = 23333; -// for (size_t idx = 0; idx < gp6_total_hash; ++idx) { -// db.HSet("GP6_PKPATTERNMATCHDEL_HASH_KEY" + std::to_string(idx), "FIELD", "VALUE", &ret); -// } -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_hash); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== ZSets =============================== - -// // ***************** Group 1 Test ***************** -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY5")); -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY2_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY4_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY6_0ooo0", {{1, "M"}}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); -// db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {"M"}, &ret); -// db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {"M"}, &ret); -// db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {"M"}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY5_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY7_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0", {{1, "M"}}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0")); -// db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {"M"}, &ret); -// db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {"M"}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_zset = 23333; -// for (size_t idx = 0; idx < gp6_total_zset; ++idx) { -// db.ZAdd("GP6_PKPATTERNMATCHDEL_ZSET_KEY" + std::to_string(idx), {{1, "M"}}, &ret); -// } -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_zset); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== List =============================== - -// // ***************** Group 1 Test ***************** -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY5")); -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY2_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY4_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY6_0ooo0", {"VALUE"}, &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); -// db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY1", 1, "VALUE", &ret64); -// db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY3", 1, "VALUE", &ret64); -// db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY5", 1, "VALUE", &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY5_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY7_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0", {"VALUE"}, &ret64); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0")); -// db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", 1, "VALUE", &ret64); -// db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", 1, "VALUE", &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_list = 23333; -// for (size_t idx = 0; idx < gp6_total_list; ++idx) { -// db.LPush("GP6_PKPATTERNMATCHDEL_LIST_KEY" + std::to_string(idx), {"VALUE"}, &ret64); -// } -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_hash); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// sleep(2); -// db.Compact(DataType::kAll, true); -// } +TEST_F(KeysTest, PKPatternMatchDel) { + int32_t ret; + uint64_t ret64; + int32_t delete_count = 0; + std::vector keys; + std::map type_status; + + //=============================== Strings =============================== + + // ***************** Group 1 Test ***************** + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); + s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY5")); + s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY1_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY3_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY5_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); + s = db.PKPatternMatchDel(DataType::kStrings, "*0xxx0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0"); + ASSERT_EQ(keys[1], "GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0"); + ASSERT_EQ(keys[2], "GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY5")); + s = db.PKPatternMatchDel(DataType::kStrings, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + size_t gp5_total_kv = 23333; + for (size_t idx = 0; idx < gp5_total_kv; ++idx) { + db.Set("GP5_PKPATTERNMATCHDEL_STRING_KEY" + std::to_string(idx), "VALUE"); + } + s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp5_total_kv); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== Set =============================== + + // ***************** Group 1 Test ***************** + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY5")); + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY2_0ooo0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY4_0ooo0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY6_0ooo0", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY5_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY7_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0", {"M1"}, &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0")); + db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); + db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_set = 23333; + for (size_t idx = 0; idx < gp6_total_set; ++idx) { + db.SAdd("GP6_PKPATTERNMATCHDEL_SET_KEY" + std::to_string(idx), {"M1"}, &ret); + } + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_set); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== Hashes =============================== + + // ***************** Group 1 Test ***************** + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY5")); + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY2_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY4_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY6_0ooo0", "FIELD", "VALUE", &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY1", {"FIELD"}, &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY3", {"FIELD"}, &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY5", {"FIELD"}, &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY5_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY7_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0", "FIELD", "VALUE", &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0")); + db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", {"FIELD"}, &ret); + db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", {"FIELD"}, &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_hash = 23333; + for (size_t idx = 0; idx < gp6_total_hash; ++idx) { + db.HSet("GP6_PKPATTERNMATCHDEL_HASH_KEY" + std::to_string(idx), "FIELD", "VALUE", &ret); + } + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_hash); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== ZSets =============================== + + // ***************** Group 1 Test ***************** + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY5")); + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY2_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY4_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY6_0ooo0", {{1, "M"}}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {"M"}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {"M"}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {"M"}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY5_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY7_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0", {{1, "M"}}, &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0")); + db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {"M"}, &ret); + db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {"M"}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_zset = 23333; + for (size_t idx = 0; idx < gp6_total_zset; ++idx) { + db.ZAdd("GP6_PKPATTERNMATCHDEL_ZSET_KEY" + std::to_string(idx), {{1, "M"}}, &ret); + } + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_zset); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== List =============================== + + // ***************** Group 1 Test ***************** + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY5")); + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY2_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY4_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY6_0ooo0", {"VALUE"}, &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY1", 1, "VALUE", &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY3", 1, "VALUE", &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY5", 1, "VALUE", &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY5_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY7_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0", {"VALUE"}, &ret64); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0")); + db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", 1, "VALUE", &ret64); + db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", 1, "VALUE", &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_list = 23333; + for (size_t idx = 0; idx < gp6_total_list; ++idx) { + db.LPush("GP6_PKPATTERNMATCHDEL_LIST_KEY" + std::to_string(idx), {"VALUE"}, &ret64); + } + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_hash); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + sleep(2); + db.Compact(DataType::kAll, true); +} // Scan // Note: This test needs to execute at first because all of the data is From e131567a056e39f87f2c95d9e5db735a4aa6f6a3 Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Fri, 21 Jun 2024 18:10:21 +0800 Subject: [PATCH 21/25] fix:CI run in branch 4.0 (#2754) Co-authored-by: chejinge --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/codis.yml | 4 ++-- .github/workflows/pika.yml | 4 ++-- .github/workflows/tools_go.yml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b946bf5608..976e1ce63f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -13,10 +13,10 @@ name: "CodeQL" on: push: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] pull_request: # The branches below must be a subset of the branches above - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] schedule: - cron: '25 19 * * 6' diff --git a/.github/workflows/codis.yml b/.github/workflows/codis.yml index b254dbeb28..e80a9c82ea 100644 --- a/.github/workflows/codis.yml +++ b/.github/workflows/codis.yml @@ -5,9 +5,9 @@ name: Codis on: push: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0" ] pull_request: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] jobs: diff --git a/.github/workflows/pika.yml b/.github/workflows/pika.yml index a2edf0b1e1..093f3fd276 100644 --- a/.github/workflows/pika.yml +++ b/.github/workflows/pika.yml @@ -2,9 +2,9 @@ name: Pika on: push: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] pull_request: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] env: # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) diff --git a/.github/workflows/tools_go.yml b/.github/workflows/tools_go.yml index 125679e3ea..e28c561400 100644 --- a/.github/workflows/tools_go.yml +++ b/.github/workflows/tools_go.yml @@ -2,11 +2,11 @@ name: Tools_go_build on: push: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] paths: - 'tools/**' pull_request: - branches: [ "unstable", "3.5" ] + branches: [ "unstable", "3.5" , "4.0"] paths: - 'tools/**' From 3d3c6d136ef66fcdabfe973731b59ab3f397ad7d Mon Sep 17 00:00:00 2001 From: saz97 <152467061+saz97@users.noreply.github.com> Date: Mon, 24 Jun 2024 14:31:25 +0800 Subject: [PATCH 22/25] fix: Add isolation between string and hyperloglog( issue#2719) (#2720) * use one bit in reserve to add isolation between string and hyperloglog --- src/pika_command.cc | 2 +- src/storage/src/redis.h | 2 + src/storage/src/redis_hyperloglog.cc | 65 +++++- src/storage/src/storage.cc | 17 +- src/storage/src/strings_value_format.h | 27 +++ tests/assets/default.conf | 2 +- tests/unit/type/hyperloglog.tcl | 262 +++++++++++++++++++++++++ 7 files changed, 364 insertions(+), 13 deletions(-) create mode 100644 tests/unit/type/hyperloglog.tcl diff --git a/src/pika_command.cc b/src/pika_command.cc index a40cb77f35..b374218cb6 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -703,7 +703,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNamePfCount, std::move(pfcountptr))); ////pfmergeCmd std::unique_ptr pfmergeptr = std::make_unique( - kCmdNamePfMerge, -3, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + kCmdNamePfMerge, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePfMerge, std::move(pfmergeptr))); // GEO diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index d818fc3e71..ccad635263 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -154,6 +154,7 @@ class Redis { Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); Status Decrby(const Slice& key, int64_t value, int64_t* ret); Status Get(const Slice& key, std::string* value); + Status HyperloglogGet(const Slice& key, std::string* value); Status MGet(const Slice& key, std::string* value); Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl); Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl); @@ -167,6 +168,7 @@ class Redis { Status MSet(const std::vector& kvs); Status MSetnx(const std::vector& kvs, int32_t* ret); Status Set(const Slice& key, const Slice& value); + Status HyperloglogSet(const Slice& key, const Slice& value); Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl = 0); Status SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret); Status Setex(const Slice& key, const Slice& value, int64_t ttl); diff --git a/src/storage/src/redis_hyperloglog.cc b/src/storage/src/redis_hyperloglog.cc index 52dae42465..c9cd1dd4c1 100644 --- a/src/storage/src/redis_hyperloglog.cc +++ b/src/storage/src/redis_hyperloglog.cc @@ -3,11 +3,18 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis_hyperloglog.h" + #include #include #include +#include + #include "src/storage_murmur3.h" +#include "storage/storage_define.h" +#include "src/redis.h" +#include "src/mutex.h" +#include "src/redis_hyperloglog.h" +#include "src/scope_record_lock.h" namespace storage { @@ -108,7 +115,59 @@ std::string HyperLogLog::Merge(const HyperLogLog& hll) { return result; } -// ::__builtin_ctz(x): 返回右起第一个‘1’之后的0的个数 +// ::__builtin_ctz(x): return the first number of '0' after the first '1' from the right uint8_t HyperLogLog::Nctz(uint32_t x, int b) { return static_cast(std::min(b, ::__builtin_ctz(x))) + 1; } -} // namespace storage + +bool IsHyperloglogObj(const std::string* internal_value_str) { + size_t kStringsValueSuffixLength = 2 * kTimestampLength + kSuffixReserveLength; + char reserve[16] = {0}; + size_t offset = internal_value_str->size() - kStringsValueSuffixLength; + memcpy(reserve, internal_value_str->data() + offset, kSuffixReserveLength); + + //if first bit in reserve is 0 , then this obj is string; else the obj is hyperloglog + return (reserve[0] & hyperloglog_reserve_flag) != 0;; +} + +Status Redis::HyperloglogGet(const Slice &key, std::string* value) { + value->clear(); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + if (!s.ok()) { + return s; + } + if (!ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + "hyperloglog " + "get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } else if (!IsHyperloglogObj(value)) { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ",expect type: " + "hyperloglog " + "get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } else { + ParsedStringsValue parsed_strings_value(value); + if (parsed_strings_value.IsStale()) { + value->clear(); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + } + } + return s; +} + +Status Redis::HyperloglogSet(const Slice &key, const Slice &value) { + HyperloglogValue hyperloglog_value(value); + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + return db_->Put(default_write_options_, base_key.Encode(), hyperloglog_value.Encode()); +} + +} // namespace storage \ No newline at end of file diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index ddeac6dd37..a264783927 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1555,7 +1555,7 @@ Status Storage::PfAdd(const Slice& key, const std::vector& values, std::string registers; std::string result; auto& inst = GetDBInstance(key); - Status s = inst->Get(key, &value); + Status s = inst->HyperloglogGet(key, &value); if (s.ok()) { registers = value; } else if (s.IsNotFound()) { @@ -1573,7 +1573,7 @@ Status Storage::PfAdd(const Slice& key, const std::vector& values, if (previous != now || (s.IsNotFound() && values.empty())) { *update = true; } - s = inst->Set(key, result); + s = inst->HyperloglogSet(key, result); return s; } @@ -1585,19 +1585,20 @@ Status Storage::PfCount(const std::vector& keys, int64_t* result) { std::string value; std::string first_registers; auto& inst = GetDBInstance(keys[0]); - Status s = inst->Get(keys[0], &value); + Status s = inst->HyperloglogGet(keys[0], &value); if (s.ok()) { first_registers = std::string(value.data(), value.size()); } else if (s.IsNotFound()) { first_registers = ""; + } else { + return s; } - HyperLogLog first_log(kPrecision, first_registers); for (size_t i = 1; i < keys.size(); ++i) { std::string value; std::string registers; auto& inst = GetDBInstance(keys[i]); - s = inst->Get(keys[i], &value); + s = inst->HyperloglogGet(keys[i], &value); if (s.ok()) { registers = value; } else if (s.IsNotFound()) { @@ -1622,7 +1623,7 @@ Status Storage::PfMerge(const std::vector& keys, std::string& value std::string first_registers; std::string result; auto& inst = GetDBInstance(keys[0]); - s = inst->Get(keys[0], &value); + s = inst->HyperloglogGet(keys[0], &value); if (s.ok()) { first_registers = std::string(value.data(), value.size()); } else if (s.IsNotFound()) { @@ -1635,7 +1636,7 @@ Status Storage::PfMerge(const std::vector& keys, std::string& value std::string value; std::string registers; auto& tmp_inst = GetDBInstance(keys[i]); - s = tmp_inst->Get(keys[i], &value); + s = tmp_inst->HyperloglogGet(keys[i], &value); if (s.ok()) { registers = std::string(value.data(), value.size()); } else if (s.IsNotFound()) { @@ -1647,7 +1648,7 @@ Status Storage::PfMerge(const std::vector& keys, std::string& value result = first_log.Merge(log); } auto& ninst = GetDBInstance(keys[0]); - s = ninst->Set(keys[0], result); + s = ninst->HyperloglogSet(keys[0], result); value_to_dest = std::move(result); return s; } diff --git a/src/storage/src/strings_value_format.h b/src/storage/src/strings_value_format.h index 96b9d4d279..6e001d7475 100644 --- a/src/storage/src/strings_value_format.h +++ b/src/storage/src/strings_value_format.h @@ -11,11 +11,15 @@ #include "src/base_value_format.h" #include "storage/storage_define.h" + namespace storage { /* * | type | value | reserve | cdate | timestamp | * | 1B | | 16B | 8B | 8B | +* The first bit in reservse field is used to isolate string and hyperloglog */ + // 80H = 1000000B +constexpr uint8_t hyperloglog_reserve_flag = 0x80; class StringsValue : public InternalValue { public: explicit StringsValue(const rocksdb::Slice& user_value) : InternalValue(DataType::kStrings, user_value) {} @@ -38,6 +42,29 @@ class StringsValue : public InternalValue { } }; +class HyperloglogValue : public InternalValue { + public: + explicit HyperloglogValue(const rocksdb::Slice& user_value) : InternalValue(DataType::kStrings, user_value) {} + virtual rocksdb::Slice Encode() override { + size_t usize = user_value_.size(); + size_t needed = usize + kSuffixReserveLength + 2 * kTimestampLength + kTypeLength; + char* dst = ReAllocIfNeeded(needed); + memcpy(dst, &type_, sizeof(type_)); + dst += sizeof(type_); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), usize); + dst += usize; + reserve_[0] |= hyperloglog_reserve_flag; + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + EncodeFixed64(dst, ctime_); + dst += kTimestampLength; + EncodeFixed64(dst, etime_); + return {start_, needed}; + } +}; + class ParsedStringsValue : public ParsedInternalValue { public: // Use this constructor after rocksdb::DB::Get(); diff --git a/tests/assets/default.conf b/tests/assets/default.conf index 468d253e89..d5d1318f5c 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -567,4 +567,4 @@ cache-lfu-decay-time: 1 # Warning: Ensure that the Settings of rename-command on the master and slave servers are consistent # # Example: -# rename-command : FLUSHDB 360flushdb +# rename-command : FLUSHDB 360flushdb \ No newline at end of file diff --git a/tests/unit/type/hyperloglog.tcl b/tests/unit/type/hyperloglog.tcl new file mode 100644 index 0000000000..1f719cc4d6 --- /dev/null +++ b/tests/unit/type/hyperloglog.tcl @@ -0,0 +1,262 @@ +start_server {tags {"hll"}} { +# Pika does not support the pfdebug command +# test {HyperLogLog self test passes} { +# catch {r pfselftest} e +# set e +# } {OK} + + test {PFADD without arguments creates an HLL value} { + r pfadd hll + r exists hll + } {1} + + test {Approximated cardinality after creation is zero} { + r pfcount hll + } {0} + + test {PFADD returns 1 when at least 1 reg was modified} { + r pfadd hll a b c + } {1} + + test {PFADD returns 0 when no reg was modified} { + r pfadd hll a b c + } {0} + + test {PFADD works with empty string (regression)} { + r pfadd hll "" + } + + # Note that the self test stresses much better the + # cardinality estimation error. We are testing just the + # command implementation itself here. + test {PFCOUNT returns approximated cardinality of set} { + r del hll + set res {} + r pfadd hll 1 2 3 4 5 + lappend res [r pfcount hll] + # Call it again to test cached value invalidation. + r pfadd hll 6 7 8 8 9 10 + lappend res [r pfcount hll] + set res + } {5 10} + +# This parameter is not available in Pika +# test {HyperLogLogs are promote from sparse to dense} { +# r del hll +# r config set hll-sparse-max-bytes 3000 +# set n 0 +# while {$n < 100} { +# set elements {} +# for {set j 0} {$j < 100} {incr j} {lappend elements [expr rand()]} +# incr n 100 +# r pfadd hll {*}$elements +# set card [r pfcount hll] +# set err [expr {abs($card-$n)}] +# assert {$err < (double($card)/100)*5} +# if {$n < 1000} { +# assert {[r pfdebug encoding hll] eq {sparse}} +# } elseif {$n > 10000} { +# assert {[r pfdebug encoding hll] eq {dense}} +# } +# } +# } + +# Pika does not support the pfdebug command +# test {HyperLogLog sparse encoding stress test} { +# for {set x 0} {$x < 1000} {incr x} { +# r del hll1 hll2 +# set numele [randomInt 100] +# set elements {} +# for {set j 0} {$j < $numele} {incr j} { +# lappend elements [expr rand()] +# } + # Force dense representation of hll2 +# r pfadd hll2 +# r pfdebug todense hll2 +# r pfadd hll1 {*}$elements +# r pfadd hll2 {*}$elements +# assert {[r pfdebug encoding hll1] eq {sparse}} +# assert {[r pfdebug encoding hll2] eq {dense}} +# # Cardinality estimated should match exactly. +# assert {[r pfcount hll1] eq [r pfcount hll2]} +# } +# } + +# The return value of Pika is inconsistent with Redis + test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { + r del hll + r pfadd hll a b c + r append hll "hello" + set e {} + catch {r pfcount hll} e + set e + } {*WRONGTYPE*} + +# The return value of Pika is inconsistent with Redis + test {Corrupted sparse HyperLogLogs are detected: Broken magic} { + r del hll + r pfadd hll a b c + r setrange hll 0 "0123" + set e {} + catch {r pfcount hll} e + set e + } {*WRONGTYPE*} + +# The return value of Pika is inconsistent with Redis + test {Corrupted sparse HyperLogLogs are detected: Invalid encoding} { + r del hll + r pfadd hll a b c + r setrange hll 4 "x" + set e {} + catch {r pfcount hll} e + set e + } {*WRONGTYPE*} + +# The return value of Pika is inconsistent with Redis + test {Corrupted dense HyperLogLogs are detected: Wrong length} { + r del hll + r pfadd hll a b c + r setrange hll 4 "\x00" + set e {} + catch {r pfcount hll} e + set e + } {*WRONGTYPE*} + +# The return value of Pika is inconsistent with Redis + test {PFADD, PFCOUNT, PFMERGE type checking works} { + r set foo bar + catch {r pfadd foo 1} e + assert_match {*WRONGTYPE*} $e + catch {r pfcount foo} e + assert_match {*WRONGTYPE*} $e + catch {r pfmerge bar foo} e + assert_match {*WRONGTYPE*} $e + # catch {r pfmerge foo bar} e + # assert_match {*WRONGTYPE*} $e + } + + test {PFMERGE results on the cardinality of union of sets} { + r del hll hll1 hll2 hll3 + r pfadd hll1 a b c + r pfadd hll2 b c d + r pfadd hll3 c d e + r pfmerge hll hll1 hll2 hll3 + r pfcount hll + } {5} + +# The return value of Pika is inconsistent with Redis + test {PFCOUNT multiple-keys merge returns cardinality of union} { + r del hll1 hll2 hll3 + for {set x 1} {$x < 100} {incr x} { + # Force dense representation of hll2 + r pfadd hll1 "foo-$x" + r pfadd hll2 "bar-$x" + r pfadd hll3 "zap-$x" + + set card [r pfcount hll1 hll2 hll3] + set realcard [expr {$x*3}] + set err [expr {abs($card-$realcard)}] + assert {$err < (double($card)/100)*5} + } + } + +# The return value of Pika is inconsistent with Redis +# test {HYPERLOGLOG press test: 5w, 10w, 15w, 20w, 30w, 50w, 100w} { +# r del hll1 +# for {set x 1} {$x <= 1000000} {incr x} { +# r pfadd hll1 "foo-$x" +# if {$x == 50000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 100000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 150000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 300000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 500000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 1000000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.03} +# } +# } +# } + +# Pika does not support the pfdebug command +# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { +# r del hll +# r pfadd hll 1 2 3 +# llength [r pfdebug getreg hll] +# } {16384} + +# Pika does not support the pfdebug command +# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { +# r del hll +# r pfadd hll 1 2 3 +# llength [r pfdebug getreg hll] +# } {16384} + +# The return value of Pika is inconsistent with Redis + test {PFADD / PFCOUNT cache invalidation works} { + r del hll + r pfadd hll a b c + r pfcount hll + assert {[r getrange hll 15 15] eq "\x00"} + r pfadd hll a b c + assert {[r getrange hll 15 15] eq "\x00"} + # r pfadd hll 1 2 3 + # assert {[r getrange hll 15 15] eq "\x80"} + } +} From 87309468bfba2c797a0b240e48a555e890bff3cd Mon Sep 17 00:00:00 2001 From: guangkun123 Date: Wed, 26 Jun 2024 10:43:25 +0800 Subject: [PATCH 23/25] Update migrator_thread.cc (#2758) no need c_str() --- tools/pika-port/pika_port_3/migrator_thread.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/pika-port/pika_port_3/migrator_thread.cc b/tools/pika-port/pika_port_3/migrator_thread.cc index 77e52f2572..87ffcc70d5 100644 --- a/tools/pika-port/pika_port_3/migrator_thread.cc +++ b/tools/pika-port/pika_port_3/migrator_thread.cc @@ -60,8 +60,8 @@ void MigratorThread::MigrateStringsDB() { std::string cmd; argv.push_back("SET"); - argv.push_back(iter->key().ToString().c_str()); - argv.push_back(parsed_strings_value.value().ToString().c_str()); + argv.push_back(iter->key().ToString()); + argv.push_back(parsed_strings_value.value().ToString()); if (ts != 0 && ttl > 0) { argv.push_back("EX"); argv.push_back(std::to_string(ttl)); From 8694da02e530bf6e74893a99ff6766258e39c066 Mon Sep 17 00:00:00 2001 From: QlQl <2458371920@qq.com> Date: Thu, 27 Jun 2024 19:01:01 +0800 Subject: [PATCH 24/25] fix: timer task maybe block worker thread --- src/net/include/thread_pool.h | 3 +++ src/net/src/thread_pool.cc | 23 ++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index 9935512a9f..fed8f42ddb 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -94,6 +94,9 @@ class ThreadPool : public pstd::noncopyable { inline Node* Next() { return link_newer; } }; + // re-push some timer tasks which has been poped + void ReDelaySchedule(Node* nodes); + static inline void AsmVolatilePause() { #if defined(__i386__) || defined(__x86_64__) asm volatile("pause"); diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index b0497a8606..5f067b1148 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -291,8 +291,16 @@ void ThreadPool::runInThread(const int idx) { time_first->Exec(); } else { lock.lock(); - rsignal.wait_for(lock, std::chrono::microseconds(exec_time - unow)); + // if task is coming now, do task immediately + auto res = rsignal.wait_for(lock, std::chrono::microseconds(exec_time - unow), [this, &newest_node]() { + return newest_node.load(std::memory_order_relaxed) != nullptr || UNLIKELY(should_stop()); + }); lock.unlock(); + if (res) { + // re-push the timer tasks + ReDelaySchedule(time_first); + goto retry; + } time_first->Exec(); } tmp = time_first; @@ -305,6 +313,19 @@ void ThreadPool::runInThread(const int idx) { } } +void ThreadPool::ReDelaySchedule(Node* nodes) { + while (LIKELY(!should_stop()) && nodes != nullptr) { + auto idx = ++task_idx_; + auto nxt = nodes->Next(); + nodes->link_newer = nullptr; + // auto node = new Node(exec_time, func, arg); + LinkOne(nodes, &newest_node_[idx % nlinks_]); + time_node_cnt_++; + rsignal_[idx % nlinks_].notify_all(); + nodes = nxt; + } +} + ThreadPool::Node* ThreadPool::CreateMissingNewerLinks(Node* head, int* cnt) { assert(head != nullptr); assert(cnt != nullptr && *cnt == 1); From 6e6b808cad731426b0f7fd5d76abc12fcf8142c0 Mon Sep 17 00:00:00 2001 From: QlQl <2458371920@qq.com> Date: Sat, 13 Jul 2024 11:44:54 +0800 Subject: [PATCH 25/25] change default queue_slow_size_ and nworkers_per_link_ --- src/net/include/thread_pool.h | 2 +- src/net/src/thread_pool.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index fed8f42ddb..8d20957194 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -113,7 +113,7 @@ class ThreadPool : public pstd::noncopyable { uint16_t task_idx_; - const uint8_t nworkers_per_link_ = 2; // numer of workers per link + const uint8_t nworkers_per_link_ = 1; // numer of workers per link const uint8_t nlinks_; // number of links (upper around) std::vector> newest_node_; std::atomic node_cnt_; // for task diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index 5f067b1148..c37f08df15 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -52,7 +52,7 @@ ThreadPool::ThreadPool(size_t worker_num, size_t max_queue_size, std::string thr node_cnt_(0), time_newest_node_(nlinks_), time_node_cnt_(0), - queue_slow_size_(std::min(worker_num * 10, max_queue_size)), + queue_slow_size_(std::max(10UL, std::min(worker_num * max_queue_size / 100, max_queue_size))), max_queue_size_(max_queue_size), max_yield_usec_(100), slow_yield_usec_(3),