diff --git a/java/openmldb-batch/src/main/scala/com/_4paradigm/openmldb/batch/OpenmldbBatchConfig.scala b/java/openmldb-batch/src/main/scala/com/_4paradigm/openmldb/batch/OpenmldbBatchConfig.scala index febe53c580e..3d7c2c6adc7 100755 --- a/java/openmldb-batch/src/main/scala/com/_4paradigm/openmldb/batch/OpenmldbBatchConfig.scala +++ b/java/openmldb-batch/src/main/scala/com/_4paradigm/openmldb/batch/OpenmldbBatchConfig.scala @@ -159,7 +159,7 @@ class OpenmldbBatchConfig extends Serializable { var openmldbUser = "root" @ConfigOption(name = "openmldb.password", doc = "The password of OpenMLDB") - var openmldbPassword = "root" + var openmldbPassword = "" @ConfigOption(name = "openmldb.default.db", doc = "The default database for OpenMLDB SQL") var defaultDb = "default_db" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index dfd9d589c9f..abf7ecdf727 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -60,7 +60,7 @@ endfunction(compile_lib) set(TEST_LIBS openmldb_test_base apiserver nameserver tablet query_response_time openmldb_sdk - openmldb_catalog client zk_client storage schema replica openmldb_codec base openmldb_proto log + openmldb_catalog client zk_client storage schema replica openmldb_codec base auth openmldb_proto log common zookeeper_mt tcmalloc_minimal ${RocksDB_LIB} ${VM_LIBS} ${LLVM_LIBS} ${ZETASQL_LIBS} ${BRPC_LIBS}) if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.1") # GNU implementation prior to 9.1 requires linking with -lstdc++fs @@ -123,7 +123,7 @@ set_property( ) add_library(openmldb_flags flags.cc) - +compile_lib(auth auth "") compile_lib(openmldb_codec codec "") compile_lib(openmldb_catalog catalog "") compile_lib(schema schema "") @@ -141,7 +141,7 @@ compile_lib(apiserver apiserver "") find_package(yaml-cpp REQUIRED) set(yaml_libs yaml-cpp) -set(BUILTIN_LIBS apiserver nameserver tablet query_response_time openmldb_sdk openmldb_catalog client zk_client replica base storage openmldb_codec schema openmldb_proto log ${RocksDB_LIB}) +set(BUILTIN_LIBS apiserver nameserver tablet query_response_time openmldb_sdk openmldb_catalog client zk_client replica base storage openmldb_codec schema openmldb_proto log auth ${RocksDB_LIB}) set(BIN_LIBS ${BUILTIN_LIBS} common zookeeper_mt tcmalloc_minimal ${VM_LIBS} @@ -152,6 +152,7 @@ ${BRPC_LIBS}) if(TESTING_ENABLE) add_subdirectory(test) compile_test(cmd) + compile_test(auth) compile_test(base) compile_test(codec) compile_test(zk) diff --git a/src/auth/auth_utils.cc b/src/auth/auth_utils.cc new file mode 100644 index 00000000000..0f79a3bf2c9 --- /dev/null +++ b/src/auth/auth_utils.cc @@ -0,0 +1,21 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "auth_utils.h" + +namespace openmldb::auth { +std::string FormUserHost(const std::string& username, const std::string& host) { return username + "@" + host; } +} // namespace openmldb::auth diff --git a/src/auth/auth_utils.h b/src/auth/auth_utils.h new file mode 100644 index 00000000000..5cbf5751c58 --- /dev/null +++ b/src/auth/auth_utils.h @@ -0,0 +1,26 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SRC_AUTH_AUTH_UTILS_H_ +#define SRC_AUTH_AUTH_UTILS_H_ + +#include + +namespace openmldb::auth { +std::string FormUserHost(const std::string& username, const std::string& host); +} // namespace openmldb::auth + +#endif // SRC_AUTH_AUTH_UTILS_H_ diff --git a/src/auth/brpc_authenticator.cc b/src/auth/brpc_authenticator.cc new file mode 100644 index 00000000000..f1964334c3f --- /dev/null +++ b/src/auth/brpc_authenticator.cc @@ -0,0 +1,70 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "brpc_authenticator.h" + +#include "auth_utils.h" +#include "butil/endpoint.h" + +namespace openmldb::authn { + +int BRPCAuthenticator::GenerateCredential(std::string* auth_str) const { + std::visit( + [auth_str](const auto& s) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + *auth_str = "u" + s.user + ":" + s.password; + } else if constexpr (std::is_same_v) { + *auth_str = "s" + s.token; + } + }, + auth_token_); + return 0; +} + +int BRPCAuthenticator::VerifyCredential(const std::string& auth_str, const butil::EndPoint& client_addr, + brpc::AuthContext* out_ctx) const { + if (auth_str.length() < 2) { + return -1; + } + + char auth_type = auth_str[0]; + std::string credential = auth_str.substr(1); + if (auth_type == 'u') { + size_t pos = credential.find(':'); + if (pos == std::string::npos) { + return -1; + } + auto host = butil::ip2str(client_addr.ip).c_str(); + std::string username = credential.substr(0, pos); + std::string password = credential.substr(pos + 1); + if (is_authenticated_(host, username, password)) { + out_ctx->set_user(auth::FormUserHost(username, host)); + out_ctx->set_is_service(false); + return 0; + } + } else if (auth_type == 's') { + if (VerifyToken(credential)) { + out_ctx->set_is_service(true); + return 0; + } + } + return -1; +} + +bool BRPCAuthenticator::VerifyToken(const std::string& token) const { return token == "default"; } + +} // namespace openmldb::authn diff --git a/src/auth/brpc_authenticator.h b/src/auth/brpc_authenticator.h new file mode 100644 index 00000000000..2a15d13589d --- /dev/null +++ b/src/auth/brpc_authenticator.h @@ -0,0 +1,63 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SRC_AUTH_BRPC_AUTHENTICATOR_H_ +#define SRC_AUTH_BRPC_AUTHENTICATOR_H_ +#include +#include +#include +#include + +#include "brpc/authenticator.h" + +namespace openmldb::authn { + +struct ServiceToken { + std::string token; +}; + +struct UserToken { + std::string user, password; +}; + +using AuthToken = std::variant; + +class BRPCAuthenticator : public brpc::Authenticator { + public: + using IsAuthenticatedFunc = std::function; + + BRPCAuthenticator() { + is_authenticated_ = [](const std::string& host, const std::string& username, const std::string& password) { + return true; + }; + } + + explicit BRPCAuthenticator(const AuthToken auth_token) : auth_token_(auth_token) {} + + explicit BRPCAuthenticator(IsAuthenticatedFunc is_authenticated) : is_authenticated_(std::move(is_authenticated)) {} + + int GenerateCredential(std::string* auth_str) const override; + int VerifyCredential(const std::string& auth_str, const butil::EndPoint& client_addr, + brpc::AuthContext* out_ctx) const override; + + private: + AuthToken auth_token_ = openmldb::authn::ServiceToken{"default"}; + IsAuthenticatedFunc is_authenticated_; + bool VerifyToken(const std::string& token) const; +}; + +} // namespace openmldb::authn +#endif // SRC_AUTH_BRPC_AUTHENTICATOR_H_ diff --git a/src/auth/refreshable_map.h b/src/auth/refreshable_map.h new file mode 100644 index 00000000000..189360efd73 --- /dev/null +++ b/src/auth/refreshable_map.h @@ -0,0 +1,52 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SRC_AUTH_REFRESHABLE_MAP_H_ +#define SRC_AUTH_REFRESHABLE_MAP_H_ + +#include +#include +#include +#include +#include +#include + +namespace openmldb::auth { + +template +class RefreshableMap { + public: + std::optional Get(const Key& key) const { + std::shared_lock lock(mutex_); + if (auto it = map_->find(key); it != map_->end()) { + return it->second; + } + return std::nullopt; + } + + void Refresh(std::unique_ptr> new_map) { + std::unique_lock lock(mutex_); + map_ = std::move(new_map); + } + + private: + mutable std::shared_mutex mutex_; + std::shared_ptr> map_; +}; + +} // namespace openmldb::auth + +#endif // SRC_AUTH_REFRESHABLE_MAP_H_ diff --git a/src/auth/refreshable_map_test.cc b/src/auth/refreshable_map_test.cc new file mode 100644 index 00000000000..e57390b6b06 --- /dev/null +++ b/src/auth/refreshable_map_test.cc @@ -0,0 +1,119 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "refreshable_map.h" + +#include + +#include +#include +#include +#include +#include +namespace openmldb::auth { + +class RefreshableMapTest : public ::testing::Test { + protected: + virtual void SetUp() {} + virtual void TearDown() {} +}; + +TEST_F(RefreshableMapTest, GetExistingKey) { + auto initialMap = std::make_unique>(); + (*initialMap)["key1"] = 100; + RefreshableMap map; + map.Refresh(std::move(initialMap)); + + auto value = map.Get("key1"); + ASSERT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 100); +} + +TEST_F(RefreshableMapTest, GetNonExistingKey) { + auto initialMap = std::make_unique>(); + (*initialMap)["key1"] = 100; + RefreshableMap map; + map.Refresh(std::move(initialMap)); + + auto value = map.Get("non_existing_key"); + ASSERT_FALSE(value.has_value()); +} + +TEST_F(RefreshableMapTest, RefreshMap) { + auto initialMap = std::make_unique>(); + (*initialMap)["key1"] = 100; + RefreshableMap map; + map.Refresh(std::move(initialMap)); + + auto newMap = std::make_unique>(); + (*newMap)["key2"] = 200; + map.Refresh(std::move(newMap)); + + auto oldKeyValue = map.Get("key1"); + ASSERT_FALSE(oldKeyValue.has_value()); + + auto newKeyValue = map.Get("key2"); + ASSERT_TRUE(newKeyValue.has_value()); + EXPECT_EQ(newKeyValue.value(), 200); +} + +TEST_F(RefreshableMapTest, ConcurrencySafety) { + auto initialMap = std::make_unique>(); + for (int i = 0; i < 100; ++i) { + (*initialMap)[i] = i; + } + RefreshableMap map; + map.Refresh(std::move(initialMap)); + + constexpr int numReaders = 10; + constexpr int numWrites = 5; + std::vector threads; + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> distrib(0, 99); + + threads.reserve(numReaders); + for (int i = 0; i < numReaders; ++i) { + threads.emplace_back([&map, &gen, &distrib]() { + for (int j = 0; j < 1000; ++j) { + auto value = map.Get(distrib(gen)); + } + }); + } + + threads.emplace_back([&map]() { + for (int i = 0; i < numWrites; ++i) { + auto newMap = std::make_unique>(); + for (int j = 0; j < 100; ++j) { + (*newMap)[j] = j + i + 1; + } + map.Refresh(std::move(newMap)); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + }); + + for (auto& thread : threads) { + thread.join(); + } +} + +} // namespace openmldb::auth + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/auth/user_access_manager.cc b/src/auth/user_access_manager.cc new file mode 100644 index 00000000000..1a882f21d5d --- /dev/null +++ b/src/auth/user_access_manager.cc @@ -0,0 +1,82 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "user_access_manager.h" + +#include +#include + +#include "auth_utils.h" +#include "nameserver/system_table.h" + +namespace openmldb::auth { +UserAccessManager::UserAccessManager(IteratorFactory iterator_factory, + std::shared_ptr user_table_info) + : user_table_iterator_factory_(std::move(iterator_factory)), user_table_info_(user_table_info) { + StartSyncTask(); +} + +UserAccessManager::~UserAccessManager() { StopSyncTask(); } + +void UserAccessManager::StartSyncTask() { + sync_task_running_ = true; + sync_task_thread_ = std::thread([this] { + while (sync_task_running_) { + SyncWithDB(); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + }); +} + +void UserAccessManager::StopSyncTask() { + sync_task_running_ = false; + if (sync_task_thread_.joinable()) { + sync_task_thread_.join(); + } +} + +void UserAccessManager::SyncWithDB() { + auto new_user_map = std::make_unique>(); + auto it = user_table_iterator_factory_(::openmldb::nameserver::USER_INFO_NAME); + it->SeekToFirst(); + while (it->Valid()) { + auto row = it->GetValue(); + auto buf = it->GetValue().buf(); + auto size = it->GetValue().size(); + codec::RowView row_view(user_table_info_->column_desc(), buf, size); + std::string host, user, password; + row_view.GetStrValue(0, &host); + row_view.GetStrValue(1, &user); + row_view.GetStrValue(2, &password); + if (host == "%") { + new_user_map->emplace(user, password); + } else { + new_user_map->emplace(FormUserHost(user, host), password); + } + it->Next(); + } + user_map_.Refresh(std::move(new_user_map)); +} + +bool UserAccessManager::IsAuthenticated(const std::string& host, const std::string& user, const std::string& password) { + if (auto stored_password = user_map_.Get(FormUserHost(user, host)); stored_password.has_value()) { + return stored_password.value() == password; + } else if (auto stored_password = user_map_.Get(user); stored_password.has_value()) { + return stored_password.value() == password; + } + return false; +} +} // namespace openmldb::auth diff --git a/src/auth/user_access_manager.h b/src/auth/user_access_manager.h new file mode 100644 index 00000000000..c49ec6ced81 --- /dev/null +++ b/src/auth/user_access_manager.h @@ -0,0 +1,52 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SRC_AUTH_USER_ACCESS_MANAGER_H_ +#define SRC_AUTH_USER_ACCESS_MANAGER_H_ + +#include +#include +#include +#include +#include + +#include "catalog/distribute_iterator.h" +#include "refreshable_map.h" + +namespace openmldb::auth { +class UserAccessManager { + public: + using IteratorFactory = + std::function(const std::string& table_name)>; + + UserAccessManager(IteratorFactory iterator_factory, std::shared_ptr user_table_info); + ~UserAccessManager(); + bool IsAuthenticated(const std::string& host, const std::string& username, const std::string& password); + + private: + IteratorFactory user_table_iterator_factory_; + std::shared_ptr user_table_info_; + RefreshableMap user_map_; + std::atomic sync_task_running_{false}; + std::thread sync_task_thread_; + + void SyncWithDB(); + void StartSyncTask(); + void StopSyncTask(); +}; +} // namespace openmldb::auth + +#endif // SRC_AUTH_USER_ACCESS_MANAGER_H_ diff --git a/src/client/ns_client.cc b/src/client/ns_client.cc index 336d30ca0e8..9a4baa549bc 100644 --- a/src/client/ns_client.cc +++ b/src/client/ns_client.cc @@ -24,8 +24,9 @@ DECLARE_int32(request_timeout_ms); namespace openmldb { namespace client { -NsClient::NsClient(const std::string& endpoint, const std::string& real_endpoint) - : Client(endpoint, real_endpoint), client_(real_endpoint.empty() ? endpoint : real_endpoint) {} +NsClient::NsClient(const std::string& endpoint, const std::string& real_endpoint, + const openmldb::authn::AuthToken auth_token) + : Client(endpoint, real_endpoint), client_(real_endpoint.empty() ? endpoint : real_endpoint, auth_token) {} int NsClient::Init() { return client_.Init(); } @@ -484,7 +485,7 @@ base::Status NsClient::ChangeLeader(const std::string& name, uint32_t pid, std:: } request.set_db(GetDb()); auto st = client_.SendRequestSt(&::openmldb::nameserver::NameServer_Stub::ChangeLeader, &request, &response, - FLAGS_request_timeout_ms, 1); + FLAGS_request_timeout_ms, 1); if (st.OK()) { return {response.code(), response.msg()}; } @@ -519,7 +520,7 @@ base::Status NsClient::Migrate(const std::string& src_endpoint, const std::strin request.add_pid(pid); } auto st = client_.SendRequestSt(&::openmldb::nameserver::NameServer_Stub::Migrate, &request, &response, - FLAGS_request_timeout_ms, 1); + FLAGS_request_timeout_ms, 1); if (st.OK()) { return {response.code(), response.msg()}; } @@ -551,7 +552,7 @@ base::Status NsClient::RecoverTable(const std::string& name, uint32_t pid, const request.set_endpoint(endpoint); request.set_db(GetDb()); auto st = client_.SendRequestSt(&::openmldb::nameserver::NameServer_Stub::RecoverTable, &request, &response, - FLAGS_request_timeout_ms, 1); + FLAGS_request_timeout_ms, 1); if (st.OK()) { return {response.code(), response.msg()}; } @@ -641,7 +642,7 @@ bool NsClient::UpdateTTL(const std::string& name, const ::openmldb::type::TTLTyp } bool NsClient::UpdateTTL(const std::string& db, const std::string& name, const ::openmldb::type::TTLType& type, - uint64_t abs_ttl, uint64_t lat_ttl, const std::string& index_name, std::string& msg) { + uint64_t abs_ttl, uint64_t lat_ttl, const std::string& index_name, std::string& msg) { ::openmldb::nameserver::UpdateTTLRequest request; ::openmldb::nameserver::UpdateTTLResponse response; request.set_name(name); diff --git a/src/client/ns_client.h b/src/client/ns_client.h index 189b0d6ee64..15a19f48ae7 100644 --- a/src/client/ns_client.h +++ b/src/client/ns_client.h @@ -46,7 +46,8 @@ struct TabletInfo { class NsClient : public Client { public: - explicit NsClient(const std::string& endpoint, const std::string& real_endpoint); + explicit NsClient(const std::string& endpoint, const std::string& real_endpoint, + const openmldb::authn::AuthToken auth_token = openmldb::authn::ServiceToken{"default"}); ~NsClient() override = default; int Init() override; @@ -191,7 +192,7 @@ class NsClient : public Client { const std::string& ts_name, std::string& msg); // NOLINT bool UpdateTTL(const std::string& db, const std::string& name, const ::openmldb::type::TTLType& type, - uint64_t abs_ttl, uint64_t lat_ttl, const std::string& ts_name, std::string& msg); // NOLINT + uint64_t abs_ttl, uint64_t lat_ttl, const std::string& ts_name, std::string& msg); // NOLINT bool AddReplicaClusterByNs(const std::string& alias, const std::string& name, uint64_t term, std::string& msg); // NOLINT diff --git a/src/cmd/openmldb.cc b/src/cmd/openmldb.cc index cf2f6371491..7dfc9f410ff 100644 --- a/src/cmd/openmldb.cc +++ b/src/cmd/openmldb.cc @@ -37,6 +37,8 @@ #include "tablet/tablet_impl.h" #endif #include "apiserver/api_server_impl.h" +#include "auth/brpc_authenticator.h" +#include "auth/user_access_manager.h" #include "boost/algorithm/string.hpp" #include "boost/lexical_cast.hpp" #include "brpc/server.h" @@ -143,7 +145,20 @@ void StartNameServer() { PDLOG(WARNING, "Fail to register name"); exit(1); } + std::shared_ptr<::openmldb::nameserver::TableInfo> table_info; + if (!name_server->GetTableInfo(::openmldb::nameserver::USER_INFO_NAME, ::openmldb::nameserver::INTERNAL_DB, + &table_info)) { + PDLOG(WARNING, "Failed to get table info for user table"); + exit(1); + } + openmldb::auth::UserAccessManager user_access_manager(name_server->GetSystemTableIterator(), table_info); brpc::ServerOptions options; + openmldb::authn::BRPCAuthenticator server_authenticator( + [&user_access_manager](const std::string& host, const std::string& username, const std::string& password) { + return user_access_manager.IsAuthenticated(host, username, password); + }); + options.auth = &server_authenticator; + options.num_threads = FLAGS_thread_pool_size; brpc::Server server; if (server.AddService(name_server, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { @@ -241,6 +256,8 @@ void StartTablet() { exit(1); } brpc::ServerOptions options; + openmldb::authn::BRPCAuthenticator server_authenticator; + options.auth = &server_authenticator; options.num_threads = FLAGS_thread_pool_size; brpc::Server server; if (server.AddService(tablet, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { @@ -1592,8 +1609,7 @@ void HandleNSScan(const std::vector& parts, ::openmldb::client::NsC } it = tb_client->Scan(tid, pid, key, "", st, et, limit, msg); } catch (std::exception const& e) { - std::cout << "Invalid args. st and et should be uint64_t, limit should" - << "be uint32_t" << std::endl; + std::cout << "Invalid args. st and et should be uint64_t, limit should" << "be uint32_t" << std::endl; return; } } @@ -3692,8 +3708,8 @@ void StartNsClient() { } std::shared_ptr<::openmldb::zk::ZkClient> zk_client; if (!FLAGS_zk_cluster.empty()) { - zk_client = std::make_shared<::openmldb::zk::ZkClient>(FLAGS_zk_cluster, "", - FLAGS_zk_session_timeout, "", FLAGS_zk_root_path, FLAGS_zk_auth_schema, FLAGS_zk_cert); + zk_client = std::make_shared<::openmldb::zk::ZkClient>(FLAGS_zk_cluster, "", FLAGS_zk_session_timeout, "", + FLAGS_zk_root_path, FLAGS_zk_auth_schema, FLAGS_zk_cert); if (!zk_client->Init()) { std::cout << "zk client init failed" << std::endl; return; diff --git a/src/cmd/sql_cmd_test.cc b/src/cmd/sql_cmd_test.cc index fe8faa21504..cedda42a6cd 100644 --- a/src/cmd/sql_cmd_test.cc +++ b/src/cmd/sql_cmd_test.cc @@ -245,6 +245,7 @@ TEST_P(DBSDKTest, TestUser) { ASSERT_TRUE(status.IsOK()); ASSERT_TRUE(true); auto opt = sr->GetRouterOptions(); + std::this_thread::sleep_for(std::chrono::seconds(1)); // TODO(oh2024): Remove when CREATE USER becomes strongly if (cs->IsClusterMode()) { auto real_opt = std::dynamic_pointer_cast(opt); sdk::SQLRouterOptions opt1; @@ -256,6 +257,7 @@ TEST_P(DBSDKTest, TestUser) { ASSERT_TRUE(router != nullptr); sr->ExecuteSQL(absl::StrCat("ALTER USER user1 SET OPTIONS(password='abc')"), &status); ASSERT_TRUE(status.IsOK()); + std::this_thread::sleep_for(std::chrono::seconds(1)); // TODO(oh2024): Remove when CREATE USER becomes strongly router = NewClusterSQLRouter(opt1); ASSERT_FALSE(router != nullptr); } else { @@ -269,6 +271,7 @@ TEST_P(DBSDKTest, TestUser) { ASSERT_TRUE(router != nullptr); sr->ExecuteSQL(absl::StrCat("ALTER USER user1 SET OPTIONS(password='abc')"), &status); ASSERT_TRUE(status.IsOK()); + std::this_thread::sleep_for(std::chrono::seconds(1)); // TODO(oh2024): Remove when CREATE USER becomes strongly router = NewStandaloneSQLRouter(opt1); ASSERT_FALSE(router != nullptr); } diff --git a/src/nameserver/name_server_create_remote_test.cc b/src/nameserver/name_server_create_remote_test.cc index 5b6d1a7a05d..4560f9dade6 100644 --- a/src/nameserver/name_server_create_remote_test.cc +++ b/src/nameserver/name_server_create_remote_test.cc @@ -268,38 +268,38 @@ TEST_F(NameServerImplRemoteTest, CreateTableRemoteBeforeAddRepCluster) { // local ns and tablet // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9631"; ::openmldb::test::TempPath tmp_path; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9931"; + brpc::Server server1; + StartTablet(&server1); + + FLAGS_endpoint = "127.0.0.1:9631"; NameServerImpl* nameserver_1 = new NameServerImpl(); brpc::Server server; StartNameServer(server, nameserver_1); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); name_server_client_1.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9931"; - brpc::Server server1; - StartTablet(&server1); - // remote ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9932"; + brpc::Server server3; + StartTablet(&server3); + + // ns + FLAGS_endpoint = "127.0.0.1:9632"; NameServerImpl* nameserver_2 = new NameServerImpl(); brpc::Server server2; StartNameServer(server2, nameserver_2); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); name_server_client_2.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9932"; - brpc::Server server3; - StartTablet(&server3); - // test remote without db CreateTableRemoteBeforeAddRepClusterFunc(nameserver_1, nameserver_2, name_server_client_1, name_server_client_2, ""); @@ -307,40 +307,40 @@ TEST_F(NameServerImplRemoteTest, CreateTableRemoteBeforeAddRepCluster) { TEST_F(NameServerImplRemoteTest, CreateTableRemoteBeforeAddRepClusterWithDb) { // local ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9631"; ::openmldb::test::TempPath tmp_path; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9931"; + brpc::Server server1; + StartTablet(&server1); + + // ns + FLAGS_endpoint = "127.0.0.1:9631"; NameServerImpl* nameserver_1 = new NameServerImpl(); brpc::Server server; StartNameServer(server, nameserver_1); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); name_server_client_1.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9931"; - brpc::Server server1; - StartTablet(&server1); - // remote ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9932"; + brpc::Server server3; + StartTablet(&server3); + + // ns + FLAGS_endpoint = "127.0.0.1:9632"; NameServerImpl* nameserver_2 = new NameServerImpl(); brpc::Server server2; StartNameServer(server2, nameserver_2); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); name_server_client_2.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9932"; - brpc::Server server3; - StartTablet(&server3); - // create db std::string db = "db" + ::openmldb::test::GenRand(); { @@ -503,95 +503,86 @@ void NameServerImplRemoteTest::CreateAndDropTableRemoteFunc( TEST_F(NameServerImplRemoteTest, CreateAndDropTableRemoteWithDb) { // local ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9631"; ::openmldb::test::TempPath tmp_path; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9931"; + brpc::Server server1; + StartTablet(&server1); + + // ns + FLAGS_endpoint = "127.0.0.1:9631"; NameServerImpl* nameserver_1 = new NameServerImpl(); brpc::Server server; StartNameServer(server, nameserver_1); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); name_server_client_1.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9931"; - brpc::Server server1; - StartTablet(&server1); - // remote ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9932"; + brpc::Server server3; + StartTablet(&server3); + + // ns + FLAGS_endpoint = "127.0.0.1:9632"; NameServerImpl* nameserver_2 = new NameServerImpl(); brpc::Server server2; StartNameServer(server2, nameserver_2); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); name_server_client_2.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9932"; - brpc::Server server3; - StartTablet(&server3); - std::string db = "db" + ::openmldb::test::GenRand(); CreateAndDropTableRemoteFunc(nameserver_1, nameserver_2, name_server_client_1, name_server_client_2, db); } TEST_F(NameServerImplRemoteTest, CreateAndDropTableRemote) { // local ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9631"; ::openmldb::test::TempPath tmp_path; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9931"; + brpc::Server server1; + StartTablet(&server1); + + // ns + FLAGS_endpoint = "127.0.0.1:9631"; NameServerImpl* nameserver_1 = new NameServerImpl(); brpc::Server server; StartNameServer(server, nameserver_1); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); name_server_client_1.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9931"; - brpc::Server server1; - StartTablet(&server1); - // remote ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; FLAGS_db_root_path = tmp_path.GetTempPath(); + // tablet + FLAGS_endpoint = "127.0.0.1:9932"; + brpc::Server server3; + StartTablet(&server3); + + // ns + FLAGS_endpoint = "127.0.0.1:9632"; NameServerImpl* nameserver_2 = new NameServerImpl(); brpc::Server server2; StartNameServer(server2, nameserver_2); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); name_server_client_2.Init(); - // tablet - FLAGS_endpoint = "127.0.0.1:9932"; - brpc::Server server3; - StartTablet(&server3); - CreateAndDropTableRemoteFunc(nameserver_1, nameserver_2, name_server_client_1, name_server_client_2, ""); } TEST_F(NameServerImplRemoteTest, CreateTableInfo) { // local ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9631"; - - brpc::Server server; - NameServerImpl* nameserver_1 = new NameServerImpl(); - StartNameServer(server, nameserver_1); - - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); - name_server_client_1.Init(); // tablet FLAGS_endpoint = "127.0.0.1:9931"; @@ -610,15 +601,16 @@ TEST_F(NameServerImplRemoteTest, CreateTableInfo) { brpc::Server server3; StartTablet(&server3); - // remote ns and tablet // ns - FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; + FLAGS_endpoint = "127.0.0.1:9631"; + brpc::Server server; + NameServerImpl* nameserver_1 = new NameServerImpl(); + StartNameServer(server, nameserver_1); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); + name_server_client_1.Init(); - brpc::Server server4; - StartNameServer(server4); - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); - name_server_client_2.Init(); + // remote ns and tablet + FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); // tablet FLAGS_endpoint = "127.0.0.1:9932"; @@ -631,6 +623,13 @@ TEST_F(NameServerImplRemoteTest, CreateTableInfo) { brpc::Server server6; StartTablet(&server6); + // ns + FLAGS_endpoint = "127.0.0.1:9632"; + brpc::Server server4; + StartNameServer(server4); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); + name_server_client_2.Init(); + bool ok = false; { ::openmldb::nameserver::SwitchModeRequest request; @@ -1011,15 +1010,7 @@ TEST_F(NameServerImplRemoteTest, CreateTableInfo) { TEST_F(NameServerImplRemoteTest, CreateTableInfoSimply) { // local ns and tablet - // ns FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9631"; - - NameServerImpl* nameserver_1 = new NameServerImpl(); - brpc::Server server; - StartNameServer(server, nameserver_1); - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); - name_server_client_1.Init(); // tablet FLAGS_endpoint = "127.0.0.1:9931"; @@ -1038,15 +1029,16 @@ TEST_F(NameServerImplRemoteTest, CreateTableInfoSimply) { brpc::Server server3; StartTablet(&server3); - // remote ns and tablet // ns - FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; + FLAGS_endpoint = "127.0.0.1:9631"; + NameServerImpl* nameserver_1 = new NameServerImpl(); + brpc::Server server; + StartNameServer(server, nameserver_1); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_1(FLAGS_endpoint, ""); + name_server_client_1.Init(); - brpc::Server server4; - StartNameServer(server4); - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); - name_server_client_2.Init(); + // remote ns and tablet + FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); // tablet FLAGS_endpoint = "127.0.0.1:9932"; @@ -1059,6 +1051,13 @@ TEST_F(NameServerImplRemoteTest, CreateTableInfoSimply) { brpc::Server server6; StartTablet(&server6); + // ns + FLAGS_endpoint = "127.0.0.1:9632"; + brpc::Server server4; + StartNameServer(server4); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client_2(FLAGS_endpoint, ""); + name_server_client_2.Init(); + bool ok = false; { ::openmldb::nameserver::SwitchModeRequest request; diff --git a/src/nameserver/name_server_impl.cc b/src/nameserver/name_server_impl.cc index 540e4b13ff8..07f4b12d942 100644 --- a/src/nameserver/name_server_impl.cc +++ b/src/nameserver/name_server_impl.cc @@ -1029,8 +1029,8 @@ int NameServerImpl::CreateMakeSnapshotOPTask(std::shared_ptr op_data) { if (request.has_offset() && request.offset() > 0) { end_offset = request.offset(); } - auto task = CreateTask(op_data->op_info_.op_id(), - ::openmldb::api::OPType::kMakeSnapshotOP, endpoint, tid, pid, end_offset); + auto task = CreateTask(op_data->op_info_.op_id(), ::openmldb::api::OPType::kMakeSnapshotOP, + endpoint, tid, pid, end_offset); if (!task) { PDLOG(WARNING, "create makesnapshot task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -1377,6 +1377,47 @@ void NameServerImpl::ShowTablet(RpcController* controller, const ShowTabletReque response->set_msg("ok"); } +base::Status NameServerImpl::InsertUserRecord(const std::string& host, const std::string& user, + const std::string& password) { + std::shared_ptr table_info; + if (!GetTableInfo(USER_INFO_NAME, INTERNAL_DB, &table_info)) { + return {ReturnCode::kTableIsNotExist, "user table does not exist"}; + } + + std::vector row_values; + row_values.push_back(host); + row_values.push_back(user); + row_values.push_back(password); + row_values.push_back(""); // password_last_changed + row_values.push_back(""); // password_expired_time + row_values.push_back(""); // create_time + row_values.push_back(""); // update_time + row_values.push_back(""); // account_type + row_values.push_back(""); // privileges + row_values.push_back(""); // extra_info + + std::string encoded_row; + codec::RowCodec::EncodeRow(row_values, table_info->column_desc(), 1, encoded_row); + std::vector> dimensions; + dimensions.push_back({host + "|" + user, 0}); + + uint32_t tid = table_info->tid(); + auto table_partition = table_info->table_partition(0); // only one partition for system table + for (int meta_idx = 0; meta_idx < table_partition.partition_meta_size(); meta_idx++) { + if (table_partition.partition_meta(meta_idx).is_leader() && + table_partition.partition_meta(meta_idx).is_alive()) { + uint64_t cur_ts = ::baidu::common::timer::get_micros() / 1000; + std::string endpoint = table_partition.partition_meta(meta_idx).endpoint(); + auto table_ptr = GetTablet(endpoint); + if (!table_ptr->client_->Put(tid, 0, cur_ts, encoded_row, dimensions).OK()) { + return {ReturnCode::kPutFailed, "failed to create initial user entry"}; + } + break; + } + } + return {}; +} + bool NameServerImpl::Init(const std::string& zk_cluster, const std::string& zk_path, const std::string& endpoint, const std::string& real_endpoint) { if (zk_cluster.empty() && FLAGS_tablet.empty()) { @@ -1413,7 +1454,7 @@ bool NameServerImpl::Init(const std::string& zk_cluster, const std::string& zk_p zone_info_.set_zone_term(1); LOG(INFO) << "zone name " << zone_info_.zone_name(); zk_client_ = new ZkClient(zk_cluster, real_endpoint, FLAGS_zk_session_timeout, endpoint, zk_path, - FLAGS_zk_auth_schema, FLAGS_zk_cert); + FLAGS_zk_auth_schema, FLAGS_zk_cert); if (!zk_client_->Init()) { PDLOG(WARNING, "fail to init zookeeper with cluster[%s]", zk_cluster.c_str()); return false; @@ -1443,7 +1484,14 @@ bool NameServerImpl::Init(const std::string& zk_cluster, const std::string& zk_p dist_lock_ = new DistLock(zk_path + "/leader", zk_client_, boost::bind(&NameServerImpl::OnLocked, this), boost::bind(&NameServerImpl::OnLostLock, this), endpoint); dist_lock_->Lock(); - + if (!RecoverDb()) { + PDLOG(WARNING, "recover db failed!"); + exit(1); + } + if (!RecoverTableInfo()) { + PDLOG(WARNING, "recover table info failed!"); + exit(1); + } } else { const std::string& tablet_endpoint = FLAGS_tablet; startup_mode_ = ::openmldb::type::StartupMode::kStandalone; @@ -1472,6 +1520,10 @@ bool NameServerImpl::Init(const std::string& zk_cluster, const std::string& zk_p task_vec_.resize(FLAGS_name_server_task_max_concurrency + FLAGS_name_server_task_concurrency_for_replica_cluster); task_thread_pool_.DelayTask(FLAGS_make_snapshot_check_interval, boost::bind(&NameServerImpl::SchedMakeSnapshot, this)); + std::shared_ptr<::openmldb::nameserver::TableInfo> table_info; + while (!GetTableInfo(::openmldb::nameserver::USER_INFO_NAME, ::openmldb::nameserver::INTERNAL_DB, &table_info)) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } return true; } @@ -1524,7 +1576,8 @@ int NameServerImpl::UpdateTaskStatus(bool is_recover_op) { continue; } if (task->task_info_->has_endpoint() && task->task_info_->endpoint() == iter->first) { - PDLOG(WARNING, "tablet is offline. update task status from[kDoing] to[kFailed]. " + PDLOG(WARNING, + "tablet is offline. update task status from[kDoing] to[kFailed]. " "op_id[%lu], task_type[%s] endpoint[%s]", op_data->op_info_.op_id(), ::openmldb::api::TaskType_Name(task->task_info_->task_type()).c_str(), @@ -1577,7 +1630,7 @@ int NameServerImpl::UpdateTaskStatusRemote(bool is_recover_op) { continue; } client_map.emplace(iter->first, - std::atomic_load_explicit(&iter->second->client_, std::memory_order_relaxed)); + std::atomic_load_explicit(&iter->second->client_, std::memory_order_relaxed)); } } uint64_t last_task_rpc_version = task_rpc_version_.load(std::memory_order_acquire); @@ -1650,9 +1703,8 @@ int NameServerImpl::UpdateZKTaskStatus() { } // revert task index op_data->op_info_.set_task_index(cur_task_index); - PDLOG(WARNING, "set zk status value failed! node[%s] op_id[%lu] op_type[%s] task_index[%u]", - node.c_str(), op_data->GetOpId(), op_data->GetReadableType().c_str(), - op_data->op_info_.task_index()); + PDLOG(WARNING, "set zk status value failed! node[%s] op_id[%lu] op_type[%s] task_index[%u]", node.c_str(), + op_data->GetOpId(), op_data->GetReadableType().c_str(), op_data->op_info_.task_index()); } } return 0; @@ -1673,10 +1725,10 @@ void NameServerImpl::UpdateTaskMapStatus(uint64_t remote_op_id, uint64_t op_id, task_info->set_status(status); if (status == ::openmldb::api::kFailed) { DEBUGLOG("update task status from[kDoing] to[kFailed]. op_id[%lu], task_type[%s]", - task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); + task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); } else { DEBUGLOG("update task status from[kDoing] to[kCanceled]. op_id[%lu], task_type[%s]", - task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); + task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); } } if (idx == task_info->rep_cluster_op_id_size() - 1) { @@ -1684,7 +1736,7 @@ void NameServerImpl::UpdateTaskMapStatus(uint64_t remote_op_id, uint64_t op_id, task_info->status() != ::openmldb::api::kCanceled) { task_info->set_status(status); DEBUGLOG("update task status from[kDoing] to[kDone]. op_id[%lu], task_type[%s]", - task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); + task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); } } } @@ -1815,8 +1867,7 @@ void NameServerImpl::DeleteTask(const std::vector& done_task_vec) { continue; } std::string node = absl::StrCat(zk_path_.op_data_path_, "/", op_id); - if (!op_data->task_list_.empty() && - op_data->task_list_.front()->GetStatus() == ::openmldb::api::kFailed) { + if (!op_data->task_list_.empty() && op_data->task_list_.front()->GetStatus() == ::openmldb::api::kFailed) { op_data->SetTaskStatus(::openmldb::api::kFailed); op_data->op_info_.set_end_time(::baidu::common::timer::now_time()); PDLOG(WARNING, "set op[%s] status failed. op_id[%lu]", op_data->GetReadableType().c_str(), op_id); @@ -1879,27 +1930,28 @@ void NameServerImpl::ProcessTask() { op_data->SetTaskStatus(::openmldb::api::kDoing); std::string value; op_data->op_info_.SerializeToString(&value); - std::string node = absl::StrCat(zk_path_.op_data_path_ , "/", op_data->GetOpId()); + std::string node = absl::StrCat(zk_path_.op_data_path_, "/", op_data->GetOpId()); if (!zk_client_->SetNodeValue(node, value)) { - PDLOG(WARNING, "set zk op status value failed. node[%s] value[%s]", - node.c_str(), value.c_str()); + PDLOG(WARNING, "set zk op status value failed. node[%s] value[%s]", node.c_str(), + value.c_str()); op_data->SetTaskStatus(::openmldb::api::kInited); continue; } } std::shared_ptr task = op_data->task_list_.front(); if (task->GetStatus() == ::openmldb::api::kFailed) { - PDLOG(WARNING, "task[%s] run failed, terminate op[%s]. op_id[%lu]", - task->GetReadableType().c_str(), task->GetReadableOpType().c_str(), task->GetOpId()); + PDLOG(WARNING, "task[%s] run failed, terminate op[%s]. op_id[%lu]", task->GetReadableType().c_str(), + task->GetReadableOpType().c_str(), task->GetOpId()); } else if (task->task_info_->status() == ::openmldb::api::kInited) { - DEBUGLOG("run task. opid[%lu] op_type[%s] task_type[%s]", - task->GetOpId(), task->GetReadableOpType().c_str(), task->GetReadableType().c_str()); + DEBUGLOG("run task. opid[%lu] op_type[%s] task_type[%s]", task->GetOpId(), + task->GetReadableOpType().c_str(), task->GetReadableType().c_str()); task_thread_pool_.AddTask(task->fun_); task->SetStatus(::openmldb::api::kDoing); } else if (task->GetStatus() == ::openmldb::api::kDoing) { uint64_t cur_ts = ::baidu::common::timer::now_time(); if (cur_ts - op_data->op_info_.start_time() > FLAGS_name_server_op_execute_timeout / 1000) { - PDLOG(INFO, "The execution time of op is too long. opid[%lu] op_type[%s] cur task_type[%s] " + PDLOG(INFO, + "The execution time of op is too long. opid[%lu] op_type[%s] cur task_type[%s] " "start_time[%lu] cur_time[%lu]", task->GetOpId(), task->GetReadableOpType().c_str(), task->GetReadableType().c_str(), op_data->op_info_.start_time(), cur_ts); @@ -2049,7 +2101,7 @@ void NameServerImpl::MakeSnapshotNS(RpcController* controller, const MakeSnapsho return; } else { thread_pool_.AddTask(boost::bind(&NameServerImpl::MakeTablePartitionSnapshot, this, request->pid(), - request->offset(), table_info)); + request->offset(), table_info)); response->set_code(::openmldb::base::ReturnCode::kOk); return; } @@ -2183,8 +2235,8 @@ int NameServerImpl::SetPartitionInfo(TableInfo& table_info) { } base::Status NameServerImpl::CreateTableOnTablet(const std::shared_ptr<::openmldb::nameserver::TableInfo>& table_info, - bool is_leader, uint64_t term, - std::map>* endpoint_map) { + bool is_leader, uint64_t term, + std::map>* endpoint_map) { ::openmldb::type::CompressType compress_type = ::openmldb::type::CompressType::kNoCompress; if (table_info->compress_type() == ::openmldb::type::kSnappy) { compress_type = ::openmldb::type::CompressType::kSnappy; @@ -2249,8 +2301,8 @@ base::Status NameServerImpl::CreateTableOnTablet(const std::shared_ptr<::openmld table_meta.set_mode(::openmldb::api::TableMode::kTableFollower); } if (auto status = tablet_ptr->client_->CreateTable(table_meta); !status.OK()) { - PDLOG(WARNING, "create table failed. tid[%u] pid[%u] endpoint[%s] msg[%s]", - table_info->tid(), pid, endpoint.c_str(), status.GetMsg().c_str()); + PDLOG(WARNING, "create table failed. tid[%u] pid[%u] endpoint[%s] msg[%s]", table_info->tid(), pid, + endpoint.c_str(), status.GetMsg().c_str()); return status; } PDLOG(INFO, "create table success. tid[%u] pid[%u] endpoint[%s] idx[%d]", table_info->tid(), pid, @@ -2661,7 +2713,7 @@ void NameServerImpl::DeleteOP(RpcController* controller, const DeleteOPRequest* return; } if (!request->has_op_id() && (request->status() == ::openmldb::api::TaskStatus::kInited || - request->status() == ::openmldb::api::TaskStatus::kDoing)) { + request->status() == ::openmldb::api::TaskStatus::kDoing)) { response->set_code(::openmldb::base::ReturnCode::kInvalidParameter); response->set_msg("cannot delete the Inited OP"); PDLOG(WARNING, "cannot delete the Inited OP"); @@ -2669,13 +2721,12 @@ void NameServerImpl::DeleteOP(RpcController* controller, const DeleteOPRequest* } response->set_code(::openmldb::base::ReturnCode::kOk); response->set_msg("ok"); - auto need_delete = [] (const DeleteOPRequest* request, const ::openmldb::api::OPInfo& op_info) -> bool { + auto need_delete = [](const DeleteOPRequest* request, const ::openmldb::api::OPInfo& op_info) -> bool { if (request->has_op_id()) { if (op_info.op_id() != request->op_id()) { return false; } - } else if (op_info.task_status() != request->status() || - (request->has_db() && request->db() != op_info.db())) { + } else if (op_info.task_status() != request->status() || (request->has_db() && request->db() != op_info.db())) { return false; } return true; @@ -2695,7 +2746,7 @@ void NameServerImpl::DeleteOP(RpcController* controller, const DeleteOPRequest* const auto& op_info = (*iter)->op_info_; if (need_delete(request, op_info)) { if (op_info.task_status() != api::TaskStatus::kDone && - !delete_zk_op(zk_client_, zk_path_.op_data_path_, op_info.op_id())) { + !delete_zk_op(zk_client_, zk_path_.op_data_path_, op_info.op_id())) { response->set_code(base::ReturnCode::kDelZkFailed); response->set_msg("delete zk op_node failed"); return; @@ -2716,7 +2767,7 @@ void NameServerImpl::DeleteOP(RpcController* controller, const DeleteOPRequest* const auto& op_info = (*iter)->op_info_; if (need_delete(request, op_info)) { if (op_info.task_status() != api::TaskStatus::kDone && - !delete_zk_op(zk_client_, zk_path_.op_data_path_, op_info.op_id())) { + !delete_zk_op(zk_client_, zk_path_.op_data_path_, op_info.op_id())) { response->set_code(base::ReturnCode::kDelZkFailed); response->set_msg("delete zk op_node failed"); return; @@ -2763,7 +2814,7 @@ void NameServerImpl::CancelOP(RpcController* controller, const CancelOPRequest* for (auto& op_data : op_list) { if (op_data->op_info_.op_id() == request->op_id()) { if (op_data->op_info_.task_status() == ::openmldb::api::kInited || - (op_data->op_info_.task_status() == ::openmldb::api::kDoing)) { + (op_data->op_info_.task_status() == ::openmldb::api::kDoing)) { op_data->op_info_.set_task_status(::openmldb::api::kCanceled); for (auto& task : op_data->task_list_) { task->task_info_->set_status(::openmldb::api::kCanceled); @@ -2956,10 +3007,11 @@ void NameServerImpl::DropTableFun(const DropTableRequest* request, GeneralRespon ::openmldb::base::Status NameServerImpl::CheckZoneInfo(const ::openmldb::nameserver::ZoneInfo& zone_info) { std::lock_guard lock(mu_); if (zone_info.zone_name() != zone_info_.zone_name() || zone_info.zone_term() != zone_info_.zone_term()) { - PDLOG(WARNING, "zone_info mismathch, expect zone name[%s], zone term [%lu], " + PDLOG(WARNING, + "zone_info mismathch, expect zone name[%s], zone term [%lu], " "but zone name [%s], zone term [%u]", - zone_info_.zone_name().c_str(), zone_info_.zone_term(), - zone_info.zone_name().c_str(), zone_info.zone_term()); + zone_info_.zone_name().c_str(), zone_info_.zone_term(), zone_info.zone_name().c_str(), + zone_info.zone_term()); return {::openmldb::base::ReturnCode::kZoneInfoMismathch, "zone_info mismathch"}; } return {}; @@ -3114,12 +3166,12 @@ void NameServerImpl::DropTableInternel(const DropTableRequest& request, GeneralR } for (auto& op_data : op_list) { if (op_data->op_info_.for_replica_cluster() == 1 || - (task_ptr && task_ptr->op_id() == op_data->op_info_.op_id())) { + (task_ptr && task_ptr->op_id() == op_data->op_info_.op_id())) { continue; } if (op_data->op_info_.db() == db && op_data->op_info_.name() == name) { if (op_data->op_info_.task_status() == ::openmldb::api::kInited || - (op_data->op_info_.task_status() == ::openmldb::api::kDoing)) { + (op_data->op_info_.task_status() == ::openmldb::api::kDoing)) { op_data->op_info_.set_task_status(::openmldb::api::kCanceled); for (auto& task : op_data->task_list_) { task->task_info_->set_status(::openmldb::api::kCanceled); @@ -3725,8 +3777,7 @@ void NameServerImpl::CreateTable(RpcController* controller, const CreateTableReq auto status = schema::SchemaAdapter::CheckTableMeta(*table_info); if (!status.OK()) { PDLOG(WARNING, status.msg.c_str()); - base::SetResponseStatus(base::ReturnCode::kInvalidParameter, "check TableMeta failed! " + status.msg, - response); + base::SetResponseStatus(base::ReturnCode::kInvalidParameter, "check TableMeta failed! " + status.msg, response); return; } if (!request->has_zone_info()) { @@ -3800,7 +3851,7 @@ void NameServerImpl::CreateTable(RpcController* controller, const CreateTableReq } void NameServerImpl::TruncateTable(RpcController* controller, const TruncateTableRequest* request, - TruncateTableResponse* response, Closure* done) { + TruncateTableResponse* response, Closure* done) { brpc::ClosureGuard done_guard(done); const std::string& db = request->db(); const std::string& name = request->name(); @@ -3849,8 +3900,8 @@ void NameServerImpl::TruncateTable(RpcController* controller, const TruncateTabl } auto status = tablet_ptr->client_->TruncateTable(tid, pid); if (!status.OK()) { - PDLOG(WARNING, "truncate failed, tid[%u] pid[%u] endpoint[%s] msg [%s]", - tid, pid, endpoint.c_str(), status.GetMsg().c_str()); + PDLOG(WARNING, "truncate failed, tid[%u] pid[%u] endpoint[%s] msg [%s]", tid, pid, endpoint.c_str(), + status.GetMsg().c_str()); response->set_code(::openmldb::base::ReturnCode::kTruncateTableFailed); response->set_msg(status.GetMsg()); return; @@ -3909,15 +3960,15 @@ void NameServerImpl::CreateTableInternel(GeneralResponse& response, auto status = CreateTableOnTablet(table_info, false, cur_term, &endpoint_map); if (!status.OK()) { base::SetResponseStatus(status, &response); - PDLOG(WARNING, "create table failed. name[%s] tid[%u] msg[%s]", - table_info->name().c_str(), tid, status.GetMsg().c_str()); + PDLOG(WARNING, "create table failed. name[%s] tid[%u] msg[%s]", table_info->name().c_str(), tid, + status.GetMsg().c_str()); break; } status = CreateTableOnTablet(table_info, true, cur_term, &endpoint_map); if (!status.OK()) { base::SetResponseStatus(status, &response); - PDLOG(WARNING, "create table failed. name[%s] tid[%u] msg[%s]", - table_info->name().c_str(), tid, status.GetMsg().c_str()); + PDLOG(WARNING, "create table failed. name[%s] tid[%u] msg[%s]", table_info->name().c_str(), tid, + status.GetMsg().c_str()); break; } if (!IsClusterMode()) { @@ -4053,15 +4104,15 @@ int NameServerImpl::CreateAddReplicaSimplyRemoteOPTask(std::shared_ptr o uint64_t op_index = op_data->op_info_.op_id(); auto op_type = ::openmldb::api::OPType::kAddReplicaSimplyRemoteOP; auto task = CreateTask(op_index, op_type, leader_endpoint, tid, pid, - add_replica_data.endpoint(), add_replica_data.remote_tid()); + add_replica_data.endpoint(), add_replica_data.remote_tid()); if (!task) { - PDLOG(WARNING, "create addreplica task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", - tid, add_replica_data.remote_tid(), pid); + PDLOG(WARNING, "create addreplica task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", tid, + add_replica_data.remote_tid(), pid); return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, add_replica_data.name(), - add_replica_data.db(), pid, add_replica_data.endpoint(), alias, add_replica_data.remote_tid()); + task = CreateTask(op_index, op_type, add_replica_data.name(), add_replica_data.db(), pid, + add_replica_data.endpoint(), alias, add_replica_data.remote_tid()); if (!task) { PDLOG(WARNING, "create addtableinfo task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -4148,8 +4199,7 @@ int NameServerImpl::CreateAddReplicaRemoteOPTask(std::shared_ptr op_data return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, leader_endpoint, - tid, remote_tid, pid, endpoint); + task = CreateTask(op_index, op_type, leader_endpoint, tid, remote_tid, pid, endpoint); if (!task) { PDLOG(WARNING, "create sendsnapshot task failed. leader cluster tid[%u] replica " @@ -4169,8 +4219,8 @@ int NameServerImpl::CreateAddReplicaRemoteOPTask(std::shared_ptr op_data task = CreateTask(op_index, op_type, leader_endpoint, tid, pid, endpoint, remote_tid); if (!task) { - PDLOG(WARNING, "create addreplica task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", - tid, remote_tid, pid); + PDLOG(WARNING, "create addreplica task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", tid, + remote_tid, pid); return -1; } op_data->task_list_.push_back(task); @@ -4193,8 +4243,8 @@ int NameServerImpl::CreateAddReplicaRemoteOPTask(std::shared_ptr op_data task = CreateTask(op_index, op_type, name, alias, endpoint_vec, pid); if (!task) { PDLOG(WARNING, - "create addreplicaNS remote task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", - tid, remote_tid, pid); + "create addreplicaNS remote task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", tid, + remote_tid, pid); return -1; } op_data->task_list_.push_back(task); @@ -4448,15 +4498,14 @@ int NameServerImpl::CreateAddReplicaOPTask(std::shared_ptr op_data) { return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, leader_endpoint, - tid, tid, pid, request.endpoint()); + task = CreateTask(op_index, op_type, leader_endpoint, tid, tid, pid, request.endpoint()); if (!task) { PDLOG(WARNING, "create sendsnapshot task failed. tid[%u] pid[%u]", tid, pid); return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, request.endpoint(), - request.name(), tid, pid, seg_cnt, false, table_info->storage_mode()); + task = CreateTask(op_index, op_type, request.endpoint(), request.name(), tid, pid, seg_cnt, + false, table_info->storage_mode()); if (!task) { PDLOG(WARNING, "create loadtable task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -4475,22 +4524,21 @@ int NameServerImpl::CreateAddReplicaOPTask(std::shared_ptr op_data) { return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - request.name(), request.db(), pid, request.endpoint()); + task = CreateTask(op_index, op_type, request.name(), request.db(), pid, request.endpoint()); if (!task) { PDLOG(WARNING, "create addtableinfo task failed. tid[%u] pid[%u]", tid, pid); return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - request.name(), request.db(), pid, request.endpoint(), FLAGS_check_binlog_sync_progress_delta); + task = CreateTask(op_index, op_type, request.name(), request.db(), pid, + request.endpoint(), FLAGS_check_binlog_sync_progress_delta); if (!task) { PDLOG(WARNING, "create checkbinlogsyncprogress task failed. tid[%u] pid[%u]", tid, pid); return -1; } op_data->task_list_.push_back(task); - task = CreateTask( - op_index, op_type, request.name(), request.db(), pid, request.endpoint(), false, true); + task = CreateTask(op_index, op_type, request.name(), request.db(), pid, + request.endpoint(), false, true); if (!task) { PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", request.name().c_str(), pid, request.endpoint().c_str()); @@ -4686,20 +4734,20 @@ int NameServerImpl::CreateMigrateTask(std::shared_ptr op_data) { op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, leader_endpoint, tid, tid, pid, des_endpoint); if (!task) { - PDLOG(WARNING, "create sendsnapshot task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", - tid, pid, leader_endpoint.c_str(), des_endpoint.c_str()); + PDLOG(WARNING, "create sendsnapshot task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", tid, pid, + leader_endpoint.c_str(), des_endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, leader_endpoint, tid, pid); if (!task) { - PDLOG(WARNING, "create recoversnapshot task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", - tid, pid, leader_endpoint.c_str(), des_endpoint.c_str()); + PDLOG(WARNING, "create recoversnapshot task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", tid, pid, + leader_endpoint.c_str(), des_endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, des_endpoint, - name, tid, pid, table_info->seg_cnt(), false, table_info->storage_mode()); + task = CreateTask(op_index, op_type, des_endpoint, name, tid, pid, table_info->seg_cnt(), false, + table_info->storage_mode()); if (!task) { PDLOG(WARNING, "create loadtable task failed. tid[%u] pid[%u] endpoint[%s]", tid, pid, des_endpoint.c_str()); return -1; @@ -4716,13 +4764,13 @@ int NameServerImpl::CreateMigrateTask(std::shared_ptr op_data) { op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, des_endpoint); if (!task) { - PDLOG(WARNING, "create addtableinfo task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", - tid, pid, leader_endpoint.c_str(), des_endpoint.c_str()); + PDLOG(WARNING, "create addtableinfo task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", tid, pid, + leader_endpoint.c_str(), des_endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - name, db, pid, des_endpoint, FLAGS_check_binlog_sync_progress_delta); + task = CreateTask(op_index, op_type, name, db, pid, des_endpoint, + FLAGS_check_binlog_sync_progress_delta); if (!task) { PDLOG(WARNING, "create CheckBinlogSyncProgressTask failed. name[%s] pid[%u]", name.c_str(), pid); return -1; @@ -4730,15 +4778,15 @@ int NameServerImpl::CreateMigrateTask(std::shared_ptr op_data) { op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, leader_endpoint, tid, pid, src_endpoint); if (!task) { - PDLOG(WARNING, "create delreplica task failed. tid[%u] pid[%u] leader[%s] follower[%s]", - tid, pid, leader_endpoint.c_str(), src_endpoint.c_str()); + PDLOG(WARNING, "create delreplica task failed. tid[%u] pid[%u] leader[%s] follower[%s]", tid, pid, + leader_endpoint.c_str(), src_endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, src_endpoint, des_endpoint); if (!task) { - PDLOG(WARNING, "create update table info task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", - tid, pid, src_endpoint.c_str(), des_endpoint.c_str()); + PDLOG(WARNING, "create update table info task failed. tid[%u] pid[%u] endpoint[%s] des_endpoint[%s]", tid, pid, + src_endpoint.c_str(), des_endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -4748,8 +4796,8 @@ int NameServerImpl::CreateMigrateTask(std::shared_ptr op_data) { return -1; } op_data->task_list_.push_back(task); - PDLOG(INFO, "create migrate op task ok. src_endpoint[%s] name[%s] pid[%u] des_endpoint[%s]", - src_endpoint.c_str(), name.c_str(), pid, des_endpoint.c_str()); + PDLOG(INFO, "create migrate op task ok. src_endpoint[%s] name[%s] pid[%u] des_endpoint[%s]", src_endpoint.c_str(), + name.c_str(), pid, des_endpoint.c_str()); return 0; } @@ -4887,7 +4935,7 @@ int NameServerImpl::AddOPTask(const ::openmldb::api::TaskInfo& task_info, ::open } std::shared_ptr<::openmldb::api::TaskInfo> NameServerImpl::FindTask(uint64_t op_id, - ::openmldb::api::TaskType task_type) { + ::openmldb::api::TaskType task_type) { auto iter = task_map_.find(op_id); if (iter == task_map_.end()) { return std::shared_ptr<::openmldb::api::TaskInfo>(); @@ -4952,8 +5000,8 @@ int NameServerImpl::AddOPData(const std::shared_ptr& op_data, uint32_t c op_data->op_info_.SerializeToString(&value); std::string node = absl::StrCat(zk_path_.op_data_path_, "/", op_data->GetOpId()); if (!zk_client_->CreateNode(node, value)) { - PDLOG(WARNING, "create op node[%s] failed. op_index[%lu] op_type[%s]", - node.c_str(), op_data->GetOpId(), op_data->GetReadableType().c_str()); + PDLOG(WARNING, "create op node[%s] failed. op_index[%lu] op_type[%s]", node.c_str(), op_data->GetOpId(), + op_data->GetReadableType().c_str()); return -1; } uint64_t parent_id = op_data->op_info_.parent_id(); @@ -4968,8 +5016,8 @@ int NameServerImpl::AddOPData(const std::shared_ptr& op_data, uint32_t c iter++; task_vec_[idx].insert(iter, op_data); } else { - PDLOG(WARNING, "not found parent_id[%lu] with index[%u]. add op[%lu] failed, op_type[%s]", - parent_id, idx, op_data->GetOpId(), op_data->GetReadableType().c_str()); + PDLOG(WARNING, "not found parent_id[%lu] with index[%u]. add op[%lu] failed, op_type[%s]", parent_id, idx, + op_data->GetOpId(), op_data->GetReadableType().c_str()); return -1; } } else { @@ -4996,8 +5044,8 @@ void NameServerImpl::DeleteDoneOP() { break; } } - PDLOG(INFO, "done_op_list size[%u] is greater than the max_op_num[%u], delete op[%lu]", - done_op_list_.size(), (uint32_t)FLAGS_max_op_num, op_data->GetOpId()); + PDLOG(INFO, "done_op_list size[%u] is greater than the max_op_num[%u], delete op[%lu]", done_op_list_.size(), + (uint32_t)FLAGS_max_op_num, op_data->GetOpId()); done_op_list_.pop_front(); } } @@ -5164,7 +5212,7 @@ void NameServerImpl::UpdateTableStatus() { } for (int pos = 0; pos < tablet_status_response.all_table_status_size(); pos++) { std::string key = absl::StrCat(tablet_status_response.all_table_status(pos).tid(), "_", - tablet_status_response.all_table_status(pos).pid(), "_", kv.first); + tablet_status_response.all_table_status(pos).pid(), "_", kv.first); pos_response.emplace(key, tablet_status_response.all_table_status(pos)); } } @@ -5350,8 +5398,8 @@ int NameServerImpl::CreateOfflineReplicaOP(const std::string& name, const std::s return -1; } if (CreateOfflineReplicaTask(op_data) < 0) { - PDLOG(WARNING, "create offline replica task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create offline replica task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), pid, + endpoint.c_str()); return -1; } if (AddOPData(op_data, concurrency) < 0) { @@ -5392,11 +5440,10 @@ int NameServerImpl::CreateOfflineReplicaTask(std::shared_ptr op_data) { return -1; } op_data->task_list_.push_back(task); - task = CreateTask( - op_index, op_type, name, db, pid, endpoint, false, false); + task = CreateTask(op_index, op_type, name, db, pid, endpoint, false, false); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -5539,11 +5586,17 @@ int NameServerImpl::CreateChangeLeaderOPTask(std::shared_ptr op_data) { } void NameServerImpl::OnLocked() { - PDLOG(INFO, "become the leader name server"); if (!Recover()) { PDLOG(WARNING, "recover failed"); } CreateDatabaseOrExit(INTERNAL_DB); + if (db_table_info_[INTERNAL_DB].count(USER_INFO_NAME) == 0) { + auto temp = FLAGS_system_table_replica_num; + FLAGS_system_table_replica_num = temp == 0 ? 1 : temp; + CreateSystemTableOrExit(SystemTableType::kUser); + FLAGS_system_table_replica_num = temp; + InsertUserRecord("%", "root", "1e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); + } if (IsClusterMode()) { if (tablets_.size() < FLAGS_system_table_replica_num) { LOG(ERROR) << "tablet num " << tablets_.size() << " is less then system table replica num " @@ -5556,10 +5609,6 @@ void NameServerImpl::OnLocked() { } } - if (FLAGS_system_table_replica_num > 0 && db_table_info_[INTERNAL_DB].count(USER_INFO_NAME) == 0) { - CreateSystemTableOrExit(SystemTableType::kUser); - } - if (FLAGS_system_table_replica_num > 0 && db_table_info_[INTERNAL_DB].count(PRE_AGG_META_NAME) == 0) { CreateSystemTableOrExit(SystemTableType::kPreAggMetaInfo); } @@ -5669,8 +5718,7 @@ int NameServerImpl::CreateRecoverTableOPTask(std::shared_ptr op_data) { } op_data->task_list_.push_back(task); } - auto task = CreateTask( - op_index, op_type, name, db, pid, endpoint, offset_delta, concurrency); + auto task = CreateTask(op_index, op_type, name, db, pid, endpoint, offset_delta, concurrency); if (!task) { PDLOG(WARNING, "create RecoverTable task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), pid, endpoint.c_str()); @@ -5734,15 +5782,15 @@ void NameServerImpl::RecoverEndpointTable(const std::string& name, const std::st } if (partition_meta.endpoint() == endpoint) { if (partition_meta.is_alive()) { - PDLOG(INFO, "endpoint[%s] is alive, need not recover. name[%s] pid[%u]", - endpoint.c_str(), name.c_str(), pid); + PDLOG(INFO, "endpoint[%s] is alive, need not recover. name[%s] pid[%u]", endpoint.c_str(), + name.c_str(), pid); task_info->set_status(::openmldb::api::TaskStatus::kDone); return; } auto tablet_iter = tablets_.find(endpoint); if (tablet_iter == tablets_.end()) { - PDLOG(WARNING, "can not find the endpoint[%s]'s client. op_id[%lu]", - endpoint.c_str(), task_info->op_id()); + PDLOG(WARNING, "can not find the endpoint[%s]'s client. op_id[%lu]", endpoint.c_str(), + task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } @@ -5772,8 +5820,8 @@ void NameServerImpl::RecoverEndpointTable(const std::string& name, const std::st uint64_t term = 0; uint64_t offset = 0; if (!tablet_ptr->client_->GetTermPair(tid, pid, storage_mode, term, offset, has_table, is_leader)) { - PDLOG(WARNING, "GetTermPair failed. name[%s] tid[%u] pid[%u] endpoint[%s] op_id[%lu]", - name.c_str(), tid, pid, endpoint.c_str(), task_info->op_id()); + PDLOG(WARNING, "GetTermPair failed. name[%s] tid[%u] pid[%u] endpoint[%s] op_id[%lu]", name.c_str(), tid, pid, + endpoint.c_str(), task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } @@ -5785,14 +5833,14 @@ void NameServerImpl::RecoverEndpointTable(const std::string& name, const std::st CreateReLoadTableOP(name, db, pid, endpoint, task_info->op_id(), concurrency); } task_info->set_status(::openmldb::api::TaskStatus::kDone); - PDLOG(INFO, "update task status from[kDoing] to[kDone]. op_id[%lu], task_type[%s]", - task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); + PDLOG(INFO, "update task status from[kDoing] to[kDone]. op_id[%lu], task_type[%s]", task_info->op_id(), + ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); return; } if (has_table && is_leader) { if (!tablet_ptr->client_->ChangeRole(tid, pid, false, 0)) { - PDLOG(WARNING, "change role failed. name[%s] tid[%u] pid[%u] endpoint[%s] op_id[%lu]", - name.c_str(), tid, pid, endpoint.c_str(), task_info->op_id()); + PDLOG(WARNING, "change role failed. name[%s] tid[%u] pid[%u] endpoint[%s] op_id[%lu]", name.c_str(), tid, + pid, endpoint.c_str(), task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } @@ -5801,8 +5849,8 @@ void NameServerImpl::RecoverEndpointTable(const std::string& name, const std::st } if (!has_table) { if (!tablet_ptr->client_->DeleteBinlog(tid, pid, storage_mode)) { - PDLOG(WARNING, "delete binlog failed. name[%s] tid[%u] pid[%u] endpoint[%s] op_id[%lu]", - name.c_str(), tid, pid, endpoint.c_str(), task_info->op_id()); + PDLOG(WARNING, "delete binlog failed. name[%s] tid[%u] pid[%u] endpoint[%s] op_id[%lu]", name.c_str(), tid, + pid, endpoint.c_str(), task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } @@ -5916,8 +5964,8 @@ int NameServerImpl::CreateReAddReplicaTask(std::shared_ptr op_data) { return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, endpoint, - name, tid, pid, seg_cnt, false, table_info->storage_mode()); + task = CreateTask(op_index, op_type, endpoint, name, tid, pid, seg_cnt, false, + table_info->storage_mode()); if (!task) { PDLOG(WARNING, "create loadtable task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -5935,8 +5983,7 @@ int NameServerImpl::CreateReAddReplicaTask(std::shared_ptr op_data) { return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - name, db, pid, endpoint, offset_delta); + task = CreateTask(op_index, op_type, name, db, pid, endpoint, offset_delta); if (!task) { PDLOG(WARNING, "create CheckBinlogSyncProgressTask failed. name[%s] pid[%u]", name.c_str(), pid); return -1; @@ -5944,8 +5991,8 @@ int NameServerImpl::CreateReAddReplicaTask(std::shared_ptr op_data) { op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, endpoint, false, true); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -6035,8 +6082,8 @@ int NameServerImpl::CreateReAddReplicaWithDropTask(std::shared_ptr op_da return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, endpoint, - name, tid, pid, seg_cnt, false, table_info->storage_mode()); + task = CreateTask(op_index, op_type, endpoint, name, tid, pid, seg_cnt, false, + table_info->storage_mode()); if (!task) { PDLOG(WARNING, "create loadtable task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -6054,8 +6101,7 @@ int NameServerImpl::CreateReAddReplicaWithDropTask(std::shared_ptr op_da return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - name, db, pid, endpoint, offset_delta); + task = CreateTask(op_index, op_type, name, db, pid, endpoint, offset_delta); if (!task) { PDLOG(WARNING, "create CheckBinlogSyncProgressTask failed. name[%s] pid[%u]", name.c_str(), pid); return -1; @@ -6063,8 +6109,8 @@ int NameServerImpl::CreateReAddReplicaWithDropTask(std::shared_ptr op_da op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, endpoint, false, true); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -6145,8 +6191,8 @@ int NameServerImpl::CreateReAddReplicaNoSendTask(std::shared_ptr op_data return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, endpoint, - name, tid, pid, seg_cnt, false, table_info->storage_mode()); + task = CreateTask(op_index, op_type, endpoint, name, tid, pid, seg_cnt, false, + table_info->storage_mode()); if (!task) { PDLOG(WARNING, "create loadtable task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -6164,8 +6210,7 @@ int NameServerImpl::CreateReAddReplicaNoSendTask(std::shared_ptr op_data return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - name, db, pid, endpoint, offset_delta); + task = CreateTask(op_index, op_type, name, db, pid, endpoint, offset_delta); if (!task) { PDLOG(WARNING, "create CheckBinlogSyncProgressTask failed. name[%s] pid[%u]", name.c_str(), pid); return -1; @@ -6173,8 +6218,8 @@ int NameServerImpl::CreateReAddReplicaNoSendTask(std::shared_ptr op_data op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, endpoint, false, true); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -6270,8 +6315,7 @@ int NameServerImpl::CreateReAddReplicaSimplifyTask(std::shared_ptr op_da return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - name, db, pid, endpoint, offset_delta); + task = CreateTask(op_index, op_type, name, db, pid, endpoint, offset_delta); if (!task) { PDLOG(WARNING, "create CheckBinlogSyncProgressTask failed. name[%s] pid[%u]", name.c_str(), pid); return -1; @@ -6279,8 +6323,8 @@ int NameServerImpl::CreateReAddReplicaSimplifyTask(std::shared_ptr op_da op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, endpoint, false, true); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -6350,13 +6394,13 @@ int NameServerImpl::CreateTableRemoteOP(const ::openmldb::nameserver::TableInfo& uint32_t pid = INVALID_PID; std::shared_ptr op_data; if (CreateOPData(::openmldb::api::OPType::kCreateTableRemoteOP, value, op_data, name, db, pid, parent_id) < 0) { - PDLOG(WARNING, "create CreateTableRemoteOP data error. table[%s] pid[%u] alias[%s]", - name.c_str(), pid, alias.c_str()); + PDLOG(WARNING, "create CreateTableRemoteOP data error. table[%s] pid[%u] alias[%s]", name.c_str(), pid, + alias.c_str()); return -1; } if (CreateTableRemoteTask(op_data) < 0) { - PDLOG(WARNING, "create CreateTableRemote task failed. table[%s] pid[%u] alias[%s]", - table_info.name().c_str(), pid, alias.c_str()); + PDLOG(WARNING, "create CreateTableRemote task failed. table[%s] pid[%u] alias[%s]", table_info.name().c_str(), + pid, alias.c_str()); return -1; } op_data->op_info_.set_for_replica_cluster(1); @@ -6406,17 +6450,16 @@ int NameServerImpl::CreateTableRemoteTask(std::shared_ptr op_data) { PDLOG(WARNING, "get leader failed. table[%s] pid[%u]", name.c_str(), pid); return -1; } - task = CreateTask( - op_index, op_type, leader_endpoint, tid, pid, endpoint, remote_tid, idx); + task = CreateTask(op_index, op_type, leader_endpoint, tid, pid, endpoint, + remote_tid, idx); if (!task) { PDLOG(WARNING, - "create addreplica task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", - tid, remote_tid, pid); + "create addreplica task failed. leader cluster tid[%u] replica cluster tid[%u] pid[%u]", tid, + remote_tid, pid); return -1; } op_data->task_list_.push_back(task); - task = CreateTask(op_index, op_type, - name, db, pid, endpoint, alias, remote_tid); + task = CreateTask(op_index, op_type, name, db, pid, endpoint, alias, remote_tid); if (!task) { PDLOG(WARNING, "create addtableinfo task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -6500,8 +6543,8 @@ int NameServerImpl::CreateReLoadTableTask(std::shared_ptr op_data) { uint32_t seg_cnt = table_info->seg_cnt(); auto op_type = ::openmldb::api::OPType::kReLoadTableOP; uint64_t op_index = op_data->op_info_.op_id(); - auto task = CreateTask(op_index, op_type, endpoint, - name, tid, pid, seg_cnt, true, table_info->storage_mode()); + auto task = CreateTask(op_index, op_type, endpoint, name, tid, pid, seg_cnt, true, + table_info->storage_mode()); if (!task) { PDLOG(WARNING, "create loadtable task failed. tid[%u] pid[%u]", tid, pid); return -1; @@ -6509,8 +6552,8 @@ int NameServerImpl::CreateReLoadTableTask(std::shared_ptr op_data) { op_data->task_list_.push_back(task); task = CreateTask(op_index, op_type, name, db, pid, endpoint, true, true); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -6579,11 +6622,11 @@ int NameServerImpl::CreateUpdatePartitionStatusOPTask(std::shared_ptr op } uint64_t op_index = op_data->op_info_.op_id(); ::openmldb::api::OPType op_type = ::openmldb::api::OPType::kUpdatePartitionStatusOP; - auto task = CreateTask( - op_index, op_type, name, db, pid, endpoint, is_leader, is_alive); + auto task = + CreateTask(op_index, op_type, name, db, pid, endpoint, is_leader, is_alive); if (!task) { - PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", - name.c_str(), pid, endpoint.c_str()); + PDLOG(WARNING, "create update table alive status task failed. table[%s] pid[%u] endpoint[%s]", name.c_str(), + pid, endpoint.c_str()); return -1; } op_data->task_list_.push_back(task); @@ -6649,8 +6692,9 @@ int NameServerImpl::MatchTermOffset(const std::string& name, const std::string& void NameServerImpl::WrapTaskFun(const boost::function& fun, std::shared_ptr<::openmldb::api::TaskInfo> task_info) { - std::string msg = absl::StrCat("op_id ", task_info->op_id(), " type ", - ::openmldb::api::TaskType_Name(task_info->task_type()), " ", Task::GetAdditionalMsg(*task_info)); + std::string msg = + absl::StrCat("op_id ", task_info->op_id(), " type ", ::openmldb::api::TaskType_Name(task_info->task_type()), + " ", Task::GetAdditionalMsg(*task_info)); if (!fun()) { task_info->set_status(::openmldb::api::TaskStatus::kFailed); PDLOG(WARNING, "task run failed. %s", msg.c_str()); @@ -6661,9 +6705,10 @@ void NameServerImpl::WrapTaskFun(const boost::function& fun, } void NameServerImpl::WrapNormalTaskFun(const boost::function& fun, - std::shared_ptr<::openmldb::api::TaskInfo> task_info) { - std::string msg = absl::StrCat("op_id ", task_info->op_id(), " type ", - ::openmldb::api::TaskType_Name(task_info->task_type()), " ", Task::GetAdditionalMsg(*task_info)); + std::shared_ptr<::openmldb::api::TaskInfo> task_info) { + std::string msg = + absl::StrCat("op_id ", task_info->op_id(), " type ", ::openmldb::api::TaskType_Name(task_info->task_type()), + " ", Task::GetAdditionalMsg(*task_info)); auto status = fun(); if (!status.OK()) { task_info->set_status(::openmldb::api::TaskStatus::kFailed); @@ -6970,8 +7015,8 @@ void NameServerImpl::DelTableInfo(const std::string& name, const std::string& db } if (!has_found) { task_info->set_status(::openmldb::api::TaskStatus::kFailed); - PDLOG(INFO, "not found endpoint[%s] in partition_meta. name[%s] pid[%u] op_id[%lu]", - endpoint.c_str(), name.c_str(), pid, task_info->op_id()); + PDLOG(INFO, "not found endpoint[%s] in partition_meta. name[%s] pid[%u] op_id[%lu]", endpoint.c_str(), + name.c_str(), pid, task_info->op_id()); return; } break; @@ -7184,14 +7229,14 @@ void NameServerImpl::SelectLeader(const std::string& name, const std::string& db } ChangeLeaderData change_leader_data; if (!change_leader_data.ParseFromString(op_data->op_info_.data())) { - PDLOG(WARNING, "parse change leader data failed. name[%s] pid[%u] data[%s] op_id[%lu]", - name.c_str(), pid, op_data->op_info_.data().c_str(), task_info->op_id()); + PDLOG(WARNING, "parse change leader data failed. name[%s] pid[%u] data[%s] op_id[%lu]", name.c_str(), pid, + op_data->op_info_.data().c_str(), task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } if (change_leader_data.has_candidate_leader()) { - if (std::find(follower_endpoint.begin(), follower_endpoint.end(), change_leader_data.candidate_leader()) - == follower_endpoint.end()) { + if (std::find(follower_endpoint.begin(), follower_endpoint.end(), change_leader_data.candidate_leader()) == + follower_endpoint.end()) { PDLOG(WARNING, "candidate_leader[%s] is not follower. name[%s] pid[%u] op_id[%lu]", change_leader_data.candidate_leader().c_str(), name.c_str(), pid, task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); @@ -7215,7 +7260,7 @@ void NameServerImpl::SelectLeader(const std::string& name, const std::string& db } for (int meta_idx = 0; meta_idx < partition.partition_meta_size(); meta_idx++) { if (partition.partition_meta(meta_idx).is_alive() && - partition.partition_meta(meta_idx).is_leader()) { + partition.partition_meta(meta_idx).is_leader()) { PDLOG(WARNING, "leader is alive, need not changeleader. table name[%s] pid[%u] op_id[%lu]", name.c_str(), pid, task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); @@ -7226,8 +7271,8 @@ void NameServerImpl::SelectLeader(const std::string& name, const std::string& db } } if (!zk_client_->SetNodeValue(zk_path_.term_node_, std::to_string(term_ + 2))) { - PDLOG(WARNING, "update leader id node failed. table name[%s] pid[%u] op_id[%lu]", - name.c_str(), pid, task_info->op_id()); + PDLOG(WARNING, "update leader id node failed. table name[%s] pid[%u] op_id[%lu]", name.c_str(), pid, + task_info->op_id()); task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } @@ -7257,8 +7302,8 @@ void NameServerImpl::SelectLeader(const std::string& name, const std::string& db task_info->set_status(::openmldb::api::TaskStatus::kFailed); return; } - PDLOG(INFO, "FollowOfNoOne ok. term[%lu] offset[%lu] name[%s] tid[%u] pid[%u] endpoint[%s]", - cur_term, offset, name.c_str(), tid, pid, endpoint.c_str()); + PDLOG(INFO, "FollowOfNoOne ok. term[%lu] offset[%lu] name[%s] tid[%u] pid[%u] endpoint[%s]", cur_term, offset, + name.c_str(), tid, pid, endpoint.c_str()); if (offset > max_offset || leader_endpoint_vec.empty()) { max_offset = offset; leader_endpoint_vec.clear(); @@ -7470,8 +7515,8 @@ void NameServerImpl::UpdateLeaderInfo(std::shared_ptr<::openmldb::api::TaskInfo> leader_endpoint.c_str()); task_info->set_status(::openmldb::api::TaskStatus::kDone); // notify client to update table partition information - PDLOG(INFO, "update task status from[kDoing] to[kDone]. op_id[%lu], task_type[%s]", - task_info->op_id(), ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); + PDLOG(INFO, "update task status from[kDoing] to[kDone]. op_id[%lu], task_type[%s]", task_info->op_id(), + ::openmldb::api::TaskType_Name(task_info->task_type()).c_str()); return; } PDLOG(WARNING, "partition[%u] does not exist. name[%s] op_id[%lu]", pid, name.c_str(), task_info->op_id()); @@ -7565,11 +7610,11 @@ bool NameServerImpl::UpdateTTLOnTablet(const std::string& endpoint, int32_t tid, } bool ok = tablet->client_->UpdateTTL(tid, pid, ttl.ttl_type(), ttl.abs_ttl(), ttl.lat_ttl(), index_name); if (!ok) { - PDLOG(WARNING, "fail to update ttl with tid %d, pid %d, abs_ttl %lu, lat_ttl %lu, endpoint %s", - tid, pid, ttl.abs_ttl(), ttl.lat_ttl(), endpoint.c_str()); + PDLOG(WARNING, "fail to update ttl with tid %d, pid %d, abs_ttl %lu, lat_ttl %lu, endpoint %s", tid, pid, + ttl.abs_ttl(), ttl.lat_ttl(), endpoint.c_str()); } else { - PDLOG(INFO, "update ttl with tid %d pid %d abs_ttl %lu, lat_ttl %lu endpoint %s ok", - tid, pid, ttl.abs_ttl(), ttl.lat_ttl(), endpoint.c_str()); + PDLOG(INFO, "update ttl with tid %d pid %d abs_ttl %lu, lat_ttl %lu endpoint %s ok", tid, pid, ttl.abs_ttl(), + ttl.lat_ttl(), endpoint.c_str()); } return ok; } @@ -8447,10 +8492,10 @@ void NameServerImpl::DeleteIndex(RpcController* controller, const DeleteIndexReq for (const auto& partition_meta : table_partition.partition_meta()) { const std::string& endpoint = partition_meta.endpoint(); std::string msg; - if (!tablet_client_map[endpoint]->DeleteIndex(table_info->tid(), table_partition.pid(), - request->idx_name(), &msg)) { - PDLOG(WARNING, "delete index failed. name %s pid %u endpoint %s msg %s", - request->table_name().c_str(), table_partition.pid(), endpoint.c_str(), msg.c_str()); + if (!tablet_client_map[endpoint]->DeleteIndex(table_info->tid(), table_partition.pid(), request->idx_name(), + &msg)) { + PDLOG(WARNING, "delete index failed. name %s pid %u endpoint %s msg %s", request->table_name().c_str(), + table_partition.pid(), endpoint.c_str(), msg.c_str()); delete_failed = true; } } @@ -8498,9 +8543,9 @@ bool NameServerImpl::UpdateZkTableNodeWithoutNotify(const TableInfo* table_info) return true; } -base::Status NameServerImpl::AddMultiIndexs(const std::string& db, const std::string& name, - std::shared_ptr table_info, - const ::google::protobuf::RepeatedPtrField& column_keys) { +base::Status NameServerImpl::AddMultiIndexs( + const std::string& db, const std::string& name, std::shared_ptr table_info, + const ::google::protobuf::RepeatedPtrField& column_keys) { auto status = schema::IndexUtil::CheckUnique(column_keys); if (!status.OK()) { return status; @@ -8535,8 +8580,7 @@ base::Status NameServerImpl::AddMultiIndexs(const std::string& db, const std::st return {base::ReturnCode::kError, "endpoint" + meta.endpoint() + ""}; } if (!tablet->client_->AddMultiIndex(tid, pid, indexs, nullptr)) { - LOG(WARNING) << "add index failed. tid " << tid << " pid " << pid << - " endpoint " << meta.endpoint(); + LOG(WARNING) << "add index failed. tid " << tid << " pid " << pid << " endpoint " << meta.endpoint(); return {base::ReturnCode::kError, "add index failed"}; } endpoint_set.insert(meta.endpoint()); @@ -8671,8 +8715,8 @@ void NameServerImpl::AddIndex(RpcController* controller, const AddIndexRequest* openmldb::common::VersionPair* pair = table_info->add_schema_versions(); pair->CopyFrom(new_pair); } - if (auto status = schema::IndexUtil::CheckIndex(col_map, - schema::IndexUtil::Convert2PB(column_key_vec)); !status.OK()) { + if (auto status = schema::IndexUtil::CheckIndex(col_map, schema::IndexUtil::Convert2PB(column_key_vec)); + !status.OK()) { base::SetResponseStatus(ReturnCode::kCheckIndexFailed, status.msg, response); LOG(WARNING) << status.msg; return; @@ -8680,10 +8724,8 @@ void NameServerImpl::AddIndex(RpcController* controller, const AddIndexRequest* if (IsClusterMode() && !request->skip_load_data()) { std::lock_guard lock(mu_); if (IsExistActiveOp(db, name, api::kAddIndexOP)) { - LOG(WARNING) << "create AddIndexOP failed. there is already a task running. db " - << db << " table " << name; - base::SetResponseStatus(ReturnCode::kOPAlreadyExists, - "there is already a task running", response); + LOG(WARNING) << "create AddIndexOP failed. there is already a task running. db " << db << " table " << name; + base::SetResponseStatus(ReturnCode::kOPAlreadyExists, "there is already a task running", response); return; } auto status = CreateAddIndexOP(name, db, column_key_vec); @@ -8708,7 +8750,8 @@ void NameServerImpl::AddIndex(RpcController* controller, const AddIndexRequest* } if (!request->skip_load_data()) { auto ret = tablet_ptr->client_->ExtractIndexData(table_info->tid(), pid, - (uint32_t)table_info->table_partition_size(), column_key_vec, 0, false, nullptr); + (uint32_t)table_info->table_partition_size(), + column_key_vec, 0, false, nullptr); if (!ret) { base::SetResponseStatus(ReturnCode::kAddIndexFailed, "extract multi index failed", response); return; @@ -8726,8 +8769,8 @@ void NameServerImpl::AddIndex(RpcController* controller, const AddIndexRequest* } bool NameServerImpl::AddIndexToTableInfo(const std::string& name, const std::string& db, - const std::vector<::openmldb::common::ColumnKey>& column_key, - std::shared_ptr<::openmldb::api::TaskInfo> task_info) { + const std::vector<::openmldb::common::ColumnKey>& column_key, + std::shared_ptr<::openmldb::api::TaskInfo> task_info) { std::shared_ptr<::openmldb::nameserver::TableInfo> table_info; std::lock_guard lock(mu_); if (!GetTableInfoUnlock(name, db, &table_info)) { @@ -8772,7 +8815,7 @@ bool NameServerImpl::AddIndexToTableInfo(const std::string& name, const std::str } base::Status NameServerImpl::CreateAddIndexOP(const std::string& name, const std::string& db, - const std::vector<::openmldb::common::ColumnKey>& column_key) { + const std::vector<::openmldb::common::ColumnKey>& column_key) { std::shared_ptr<::openmldb::nameserver::TableInfo> table_info; if (!GetTableInfoUnlock(name, db, &table_info)) { return {-1, "table does not exist"}; @@ -8814,10 +8857,10 @@ base::Status NameServerImpl::CreateAddIndexOPTask(std::shared_ptr op_dat return FillAddIndexTask(op_index, op_type, name, db, column_key_vec, &op_data->task_list_); } -base::Status NameServerImpl::FillAddIndexTask(uint64_t op_index, api::OPType op_type, - const std::string& name, const std::string& db, - const std::vector<::openmldb::common::ColumnKey>& column_key, - std::list>* task_list) { +base::Status NameServerImpl::FillAddIndexTask(uint64_t op_index, api::OPType op_type, const std::string& name, + const std::string& db, + const std::vector<::openmldb::common::ColumnKey>& column_key, + std::list>* task_list) { std::shared_ptr<::openmldb::nameserver::TableInfo> table_info; if (!GetTableInfoUnlock(name, db, &table_info)) { return {-1, absl::StrCat("get table info failed, db ", db, " name ", name)}; @@ -8857,8 +8900,8 @@ base::Status NameServerImpl::FillAddIndexTask(uint64_t op_index, api::OPType op_ return {-1, "create add index to table info task failed"}; } task_list->push_back(task); - task = CreateTask(op_index, op_type, tid, part_size, - column_key, pid_offset_map, pid_endpoint_map); + task = CreateTask(op_index, op_type, tid, part_size, column_key, pid_offset_map, + pid_endpoint_map); if (!task) { return {-1, "create extract index task failed"}; } @@ -8878,8 +8921,8 @@ base::Status NameServerImpl::FillAddIndexTask(uint64_t op_index, api::OPType op_ void NameServerImpl::RunSubTask(std::shared_ptr task) { for (const auto& cur_task : task->sub_task_) { - PDLOG(INFO, "task starts running. op_id %lu task type %s %s", - cur_task->GetOpId(), cur_task->GetReadableType().c_str(), cur_task->GetAdditionalMsg().c_str()); + PDLOG(INFO, "task starts running. op_id %lu task type %s %s", cur_task->GetOpId(), + cur_task->GetReadableType().c_str(), cur_task->GetAdditionalMsg().c_str()); cur_task->SetStatus(::openmldb::api::TaskStatus::kDoing); cur_task->fun_(); } @@ -8887,24 +8930,23 @@ void NameServerImpl::RunSubTask(std::shared_ptr task) { void NameServerImpl::RunSeqTask(std::shared_ptr task) { if (task->seq_task_.empty()) { - PDLOG(INFO, "update task status from %s to kDone. op_id %lu task_type %s %s", - task->GetReadableStatus().c_str(), task->GetOpId(), - task->GetReadableType().c_str(), task->GetAdditionalMsg().c_str()); + PDLOG(INFO, "update task status from %s to kDone. op_id %lu task_type %s %s", task->GetReadableStatus().c_str(), + task->GetOpId(), task->GetReadableType().c_str(), task->GetAdditionalMsg().c_str()); task->SetStatus(::openmldb::api::TaskStatus::kDone); return; } auto cur_task = task->seq_task_.front(); auto task_status = cur_task->GetStatus(); if (task_status == ::openmldb::api::TaskStatus::kInited) { - PDLOG(INFO, "seq task starts running. op_id %lu task type %s %s", - cur_task->GetOpId(), cur_task->GetReadableType().c_str(), cur_task->GetAdditionalMsg().c_str()); + PDLOG(INFO, "seq task starts running. op_id %lu task type %s %s", cur_task->GetOpId(), + cur_task->GetReadableType().c_str(), cur_task->GetAdditionalMsg().c_str()); cur_task->SetStatus(::openmldb::api::TaskStatus::kDoing); cur_task->fun_(); } else if (task_status == ::openmldb::api::TaskStatus::kFailed || - task_status == ::openmldb::api::TaskStatus::kCanceled) { - PDLOG(INFO, "update task status from %s to %s. op_id %lu task_type %s %s", - task->GetReadableStatus().c_str(), cur_task->GetReadableStatus().c_str(), - task->GetOpId(), task->GetReadableType().c_str(), task->GetAdditionalMsg().c_str()); + task_status == ::openmldb::api::TaskStatus::kCanceled) { + PDLOG(INFO, "update task status from %s to %s. op_id %lu task_type %s %s", task->GetReadableStatus().c_str(), + cur_task->GetReadableStatus().c_str(), task->GetOpId(), task->GetReadableType().c_str(), + task->GetAdditionalMsg().c_str()); task->SetStatus(task_status); return; } else if (task_status == ::openmldb::api::TaskStatus::kDone) { @@ -8971,7 +9013,7 @@ base::Status NameServerImpl::CreateDatabase(const std::string& db_name, bool if_ continue; } auto status = std::atomic_load_explicit(&kv.second->client_, std::memory_order_relaxed) - ->CreateDatabaseRemote(db_name, zone_info_); + ->CreateDatabaseRemote(db_name, zone_info_); if (!status.OK()) { PDLOG(WARNING, "create remote database failed, msg is [%s]", status.msg.c_str()); return status; @@ -9016,7 +9058,7 @@ void NameServerImpl::ShowDatabase(RpcController* controller, const GeneralReques { std::lock_guard lock(mu_); for (const auto& db : databases_) { - if (db != INTERNAL_DB && db != INFORMATION_SCHEMA_DB && db!= PRE_AGG_DB) { + if (db != INTERNAL_DB && db != INFORMATION_SCHEMA_DB && db != PRE_AGG_DB) { response->add_db(db); } } @@ -9087,7 +9129,7 @@ void NameServerImpl::DropDatabase(RpcController* controller, const DropDatabaseR continue; } auto status = std::atomic_load_explicit(&kv.second->client_, std::memory_order_relaxed) - ->DropDatabaseRemote(request->db(), zone_info_); + ->DropDatabaseRemote(request->db(), zone_info_); if (!status.OK()) { PDLOG(WARNING, "drop remote database failed, msg is [%s]", status.msg.c_str()); ::openmldb::base::SetResponseStatus(status, response); @@ -9386,7 +9428,7 @@ base::Status NameServerImpl::CreateProcedureInternal(const api::CreateProcedureR auto sp_info = std::make_shared(sp_request.sp_info()); const std::string& sp_db_name = sp_info->db_name(); const std::string& sp_name = sp_info->sp_name(); - const std::string sp_data_path = absl::StrCat(zk_path_.db_sp_data_path_ , "/", sp_db_name, ".", sp_name); + const std::string sp_data_path = absl::StrCat(zk_path_.db_sp_data_path_, "/", sp_db_name, ".", sp_name); auto status = CreateProcedureOnTablet(sp_request); do { if (!status.OK()) { @@ -9446,8 +9488,8 @@ base::Status NameServerImpl::CreateProcedureOnTablet(const ::openmldb::api::Crea ", endpoint: ", tb_client->GetEndpoint(), ", msg: ", status.GetMsg())}; } DLOG(INFO) << "create procedure on tablet success. db_name: " << sp_info.db_name() << ", " - << "sp_name: " << sp_info.sp_name() << ", " - << "sql: " << sp_info.sql() << "endpoint: " << tb_client->GetEndpoint(); + << "sp_name: " << sp_info.sp_name() << ", " << "sql: " << sp_info.sql() + << "endpoint: " << tb_client->GetEndpoint(); } return {}; } @@ -9543,6 +9585,29 @@ void NameServerImpl::DropProcedure(RpcController* controller, const api::DropPro response->set_msg("ok"); } +std::function(const std::string& table_name)> +NameServerImpl::GetSystemTableIterator() { + return [this](const std::string& table_name) -> std::unique_ptr<::openmldb::catalog::FullTableIterator> { + std::shared_ptr table_info; + if (!GetTableInfo(table_name, INTERNAL_DB, &table_info)) { + return nullptr; + } + auto tid = table_info->tid(); + auto table_partition = table_info->table_partition(0); // only one partition for system table + for (int meta_idx = 0; meta_idx < table_partition.partition_meta_size(); meta_idx++) { + if (table_partition.partition_meta(meta_idx).is_leader() && + table_partition.partition_meta(meta_idx).is_alive()) { + auto endpoint = table_partition.partition_meta(meta_idx).endpoint(); + auto table_ptr = GetTablet(endpoint); + std::map> tablet_clients = { + {0, table_ptr->client_}}; + return std::make_unique(tid, nullptr, tablet_clients); + } + } + return nullptr; + }; +} + bool NameServerImpl::RecoverProcedureInfo() { db_table_sp_map_.clear(); db_sp_table_map_.clear(); @@ -10009,8 +10074,7 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me case ::openmldb::api::TaskType::kMakeSnapshot: { auto meta = dynamic_cast(task_meta); boost::function fun = - boost::bind(&TabletClient::MakeSnapshot, client, - meta->tid, meta->pid, meta->end_offset, task_info); + boost::bind(&TabletClient::MakeSnapshot, client, meta->tid, meta->pid, meta->end_offset, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } @@ -10030,9 +10094,8 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me } case ::openmldb::api::TaskType::kSendSnapshot: { auto meta = dynamic_cast(task_meta); - boost::function fun = - boost::bind(&TabletClient::SendSnapshot, client, meta->tid, meta->remote_tid, - meta->pid, meta->des_endpoint, task_info); + boost::function fun = boost::bind(&TabletClient::SendSnapshot, client, meta->tid, meta->remote_tid, + meta->pid, meta->des_endpoint, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } @@ -10049,8 +10112,7 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me } else { table_meta.set_mode(::openmldb::api::TableMode::kTableFollower); } - boost::function fun = - boost::bind(&TabletClient::LoadTable, client, table_meta, task_info); + boost::function fun = boost::bind(&TabletClient::LoadTable, client, table_meta, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } @@ -10061,11 +10123,11 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me if (meta->task_id != INVALID_PARENT_ID) { task_info->set_task_id(meta->task_id); } - fun = boost::bind(&TabletClient::AddReplica, client, - meta->tid, meta->pid, meta->des_endpoint, meta->remote_tid, task_info); + fun = boost::bind(&TabletClient::AddReplica, client, meta->tid, meta->pid, meta->des_endpoint, + meta->remote_tid, task_info); } else { - fun = boost::bind(&TabletClient::AddReplica, client, - meta->tid, meta->pid, meta->des_endpoint, task_info); + fun = + boost::bind(&TabletClient::AddReplica, client, meta->tid, meta->pid, meta->des_endpoint, task_info); } task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; @@ -10087,46 +10149,44 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me case ::openmldb::api::TaskType::kAddTableInfo: { auto meta = dynamic_cast(task_meta); if (meta->is_remote) { - task->fun_ = boost::bind(&NameServerImpl::AddTableInfo, this, - meta->alias, meta->endpoint, meta->name, meta->db, - meta->remote_tid, meta->pid, task_info); + task->fun_ = boost::bind(&NameServerImpl::AddTableInfo, this, meta->alias, meta->endpoint, meta->name, + meta->db, meta->remote_tid, meta->pid, task_info); } else { - task->fun_ = boost::bind(&NameServerImpl::AddTableInfo, this, - meta->name, meta->db, meta->endpoint, meta->pid, task_info); + task->fun_ = boost::bind(&NameServerImpl::AddTableInfo, this, meta->name, meta->db, meta->endpoint, + meta->pid, task_info); } break; } case ::openmldb::api::TaskType::kDelTableInfo: { auto meta = dynamic_cast(task_meta); if (meta->has_flag) { - task->fun_ = boost::bind(&NameServerImpl::DelTableInfo, this, - meta->name, meta->db, meta->endpoint, meta->pid, task_info, meta->flag); + task->fun_ = boost::bind(&NameServerImpl::DelTableInfo, this, meta->name, meta->db, meta->endpoint, + meta->pid, task_info, meta->flag); } else { - task->fun_ = boost::bind(&NameServerImpl::DelTableInfo, this, - meta->name, meta->db, meta->endpoint, meta->pid, task_info); + task->fun_ = boost::bind(&NameServerImpl::DelTableInfo, this, meta->name, meta->db, meta->endpoint, + meta->pid, task_info); } break; } case ::openmldb::api::TaskType::kUpdateTableInfo: { auto meta = dynamic_cast(task_meta); - task->fun_ = boost::bind(&NameServerImpl::UpdateTableInfo, this, - meta->src_endpoint, meta->name, meta->db, meta->pid, meta->des_endpoint, task_info); + task->fun_ = boost::bind(&NameServerImpl::UpdateTableInfo, this, meta->src_endpoint, meta->name, meta->db, + meta->pid, meta->des_endpoint, task_info); break; } case ::openmldb::api::TaskType::kSendIndexRequest: { auto meta = dynamic_cast(task_meta); - boost::function fun = - boost::bind(&TabletClient::SendIndexData, client, meta->tid, meta->pid, - meta->pid_endpoint_map, task_info); + boost::function fun = boost::bind(&TabletClient::SendIndexData, client, meta->tid, meta->pid, + meta->pid_endpoint_map, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } case ::openmldb::api::TaskType::kSendIndexData: { auto meta = dynamic_cast(task_meta); for (const auto& kv : meta->pid_endpoint_map) { - auto sub_task = CreateTask( - meta->task_info->op_id(), meta->task_info->op_type(), kv.second, - meta->tid, kv.first, meta->pid_endpoint_map); + auto sub_task = + CreateTask(meta->task_info->op_id(), meta->task_info->op_type(), + kv.second, meta->tid, kv.first, meta->pid_endpoint_map); task->sub_task_.push_back(sub_task); PDLOG(INFO, "add subtask kSendIndexData. op_id[%lu] tid[%u] pid[%u] endpoint[%s]", meta->task_info->op_id(), meta->tid, kv.first, kv.second.c_str()); @@ -10137,17 +10197,16 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me case ::openmldb::api::TaskType::kLoadIndexRequest: { auto meta = dynamic_cast(task_meta); boost::function fun = - boost::bind(&TabletClient::LoadIndexData, client, meta->tid, meta->pid, - meta->partition_num, task_info); + boost::bind(&TabletClient::LoadIndexData, client, meta->tid, meta->pid, meta->partition_num, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } case ::openmldb::api::TaskType::kLoadIndexData: { auto meta = dynamic_cast(task_meta); for (const auto& kv : meta->pid_endpoint_map) { - auto sub_task = CreateTask( - meta->task_info->op_id(), meta->task_info->op_type(), kv.second, - meta->tid, kv.first, meta->pid_endpoint_map.size()); + auto sub_task = + CreateTask(meta->task_info->op_id(), meta->task_info->op_type(), + kv.second, meta->tid, kv.first, meta->pid_endpoint_map.size()); task->sub_task_.push_back(sub_task); PDLOG(INFO, "add subtask kLoadIndexData. op_id[%lu] tid[%u] pid[%u] endpoint[%s]", meta->task_info->op_id(), meta->tid, kv.first, kv.second.c_str()); @@ -10158,8 +10217,8 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me case ::openmldb::api::TaskType::kExtractIndexRequest: { auto meta = dynamic_cast(task_meta); boost::function fun = - boost::bind(&TabletClient::ExtractIndexData, client, meta->tid, meta->pid, - meta->partition_num, meta->column_key, meta->offset, true, task_info); + boost::bind(&TabletClient::ExtractIndexData, client, meta->tid, meta->pid, meta->partition_num, + meta->column_key, meta->offset, true, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } @@ -10168,9 +10227,8 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me for (const auto& kv : meta->pid_endpoint_map) { auto iter = meta->pid_offset_map.find(kv.first); auto sub_task = CreateTask( - meta->task_info->op_id(), meta->task_info->op_type(), kv.second, - meta->tid, kv.first, meta->partition_num, meta->column_key, - iter->second); + meta->task_info->op_id(), meta->task_info->op_type(), kv.second, meta->tid, kv.first, + meta->partition_num, meta->column_key, iter->second); task->sub_task_.push_back(sub_task); PDLOG(INFO, "add subtask kExtractIndexData. op_id[%lu] tid[%u] pid[%u] endpoint[%s]", meta->task_info->op_id(), meta->tid, kv.first, kv.second.c_str()); @@ -10181,8 +10239,7 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me case ::openmldb::api::TaskType::kAddIndexToTabletRequest: { auto meta = dynamic_cast(task_meta); boost::function fun = - boost::bind(&TabletClient::AddMultiIndex, client, meta->tid, meta->pid, - meta->column_key, task_info); + boost::bind(&TabletClient::AddMultiIndex, client, meta->tid, meta->pid, meta->column_key, task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } @@ -10192,8 +10249,8 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me for (const auto& part_meta : part.partition_meta()) { const std::string& ep = part_meta.endpoint(); auto sub_task = CreateTask( - meta->task_info->op_id(), meta->task_info->op_type(), ep, - meta->table_info.tid(), part.pid(), meta->column_key); + meta->task_info->op_id(), meta->task_info->op_type(), ep, meta->table_info.tid(), part.pid(), + meta->column_key); task->sub_task_.push_back(sub_task); PDLOG(INFO, "add subtask AddIndexToTablet. op_id[%lu] tid[%u] pid[%u] endpoint[%s]", meta->task_info->op_id(), meta->table_info.tid(), part.pid(), ep.c_str()); @@ -10204,14 +10261,14 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me } case ::openmldb::api::TaskType::kAddIndexToTableInfo: { auto meta = dynamic_cast(task_meta); - task->fun_ = boost::bind(&NameServerImpl::AddIndexToTableInfo, this, - meta->name, meta->db, meta->column_key, task_info); + task->fun_ = boost::bind(&NameServerImpl::AddIndexToTableInfo, this, meta->name, meta->db, meta->column_key, + task_info); break; } case ::openmldb::api::TaskType::kCheckBinlogSyncProgress: { auto meta = dynamic_cast(task_meta); - task->fun_ = boost::bind(&NameServerImpl::CheckBinlogSyncProgress, this, - meta->name, meta->db, meta->pid, meta->follower, meta->offset_delta, task_info); + task->fun_ = boost::bind(&NameServerImpl::CheckBinlogSyncProgress, this, meta->name, meta->db, meta->pid, + meta->follower, meta->offset_delta, task_info); break; } case ::openmldb::api::TaskType::kChangeLeader: { @@ -10220,8 +10277,8 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me } case ::openmldb::api::TaskType::kSelectLeader: { auto meta = dynamic_cast(task_meta); - task->fun_ = boost::bind(&NameServerImpl::SelectLeader, this, meta->name, meta->db, - meta->tid, meta->pid, meta->follower_endpoint, task_info); + task->fun_ = boost::bind(&NameServerImpl::SelectLeader, this, meta->name, meta->db, meta->tid, meta->pid, + meta->follower_endpoint, task_info); break; } case ::openmldb::api::TaskType::kUpdateLeaderInfo: { @@ -10230,22 +10287,22 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me } case ::openmldb::api::TaskType::kRecoverTable: { auto meta = dynamic_cast(task_meta); - task->fun_ = boost::bind(&NameServerImpl::RecoverEndpointTable, this, meta->name, meta->db, - meta->pid, meta->endpoint, meta->offset_delta, meta->concurrency, task_info); + task->fun_ = boost::bind(&NameServerImpl::RecoverEndpointTable, this, meta->name, meta->db, meta->pid, + meta->endpoint, meta->offset_delta, meta->concurrency, task_info); break; } case ::openmldb::api::TaskType::kUpdatePartitionStatus: { auto meta = dynamic_cast(task_meta); - task->fun_ = boost::bind(&NameServerImpl::UpdatePartitionStatus, this, meta->name, meta->db, - meta->endpoint, meta->pid, meta->is_leader, meta->is_alive, task_info); + task->fun_ = boost::bind(&NameServerImpl::UpdatePartitionStatus, this, meta->name, meta->db, meta->endpoint, + meta->pid, meta->is_leader, meta->is_alive, task_info); break; } case ::openmldb::api::TaskType::kCreateTableRemote: { auto meta = dynamic_cast(task_meta); auto cluster = GetHealthCluster(meta->alias); if (!cluster) { - PDLOG(WARNING, "replica[%s] not available op_index[%lu]", - meta->alias.c_str(), meta->task_info->op_id()); + PDLOG(WARNING, "replica[%s] not available op_index[%lu]", meta->alias.c_str(), + meta->task_info->op_id()); return {}; } std::string cluster_endpoint = @@ -10260,8 +10317,8 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me auto meta = dynamic_cast(task_meta); auto cluster = GetHealthCluster(meta->alias); if (!cluster) { - PDLOG(WARNING, "replica[%s] not available op_index[%lu]", - meta->alias.c_str(), meta->task_info->op_id()); + PDLOG(WARNING, "replica[%s] not available op_index[%lu]", meta->alias.c_str(), + meta->task_info->op_id()); return {}; } std::string cluster_endpoint = @@ -10276,26 +10333,26 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me auto meta = dynamic_cast(task_meta); auto cluster = GetHealthCluster(meta->alias); if (!cluster) { - PDLOG(WARNING, "replica[%s] not available op_index[%lu]", - meta->alias.c_str(), meta->task_info->op_id()); + PDLOG(WARNING, "replica[%s] not available op_index[%lu]", meta->alias.c_str(), + meta->task_info->op_id()); return {}; } std::string cluster_endpoint = std::atomic_load_explicit(&cluster->client_, std::memory_order_relaxed)->GetEndpoint(); task->task_info_->set_endpoint(cluster_endpoint); - boost::function fun = boost::bind(&NsClient::AddReplicaNS, - std::atomic_load_explicit(&cluster->client_, std::memory_order_relaxed), - meta->name, meta->endpoint_vec, meta->pid, zone_info_, *task_info); + boost::function fun = boost::bind( + &NsClient::AddReplicaNS, std::atomic_load_explicit(&cluster->client_, std::memory_order_relaxed), + meta->name, meta->endpoint_vec, meta->pid, zone_info_, *task_info); task->fun_ = boost::bind(&NameServerImpl::WrapTaskFun, this, fun, task_info); break; } case ::openmldb::api::TaskType::kAddTableIndex: { auto meta = dynamic_cast(task_meta); - auto status = FillAddIndexTask(meta->task_info->op_id(), meta->task_info->op_type(), - meta->name, meta->db, meta->column_key, &task->seq_task_); + auto status = FillAddIndexTask(meta->task_info->op_id(), meta->task_info->op_type(), meta->name, meta->db, + meta->column_key, &task->seq_task_); if (!status.OK()) { - PDLOG(WARNING, "FillAddIndexTask failed. op_id %lu msg %s", - meta->task_info->op_id(), status.GetMsg().c_str()); + PDLOG(WARNING, "FillAddIndexTask failed. op_id %lu msg %s", meta->task_info->op_id(), + status.GetMsg().c_str()); return {}; } task->fun_ = boost::bind(&NameServerImpl::RunSeqTask, this, task); @@ -10305,15 +10362,14 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me auto meta = dynamic_cast(task_meta); for (const auto& cur_table_index : meta->table_index) { auto sub_task = CreateTask( - meta->task_info->op_id(), meta->task_info->op_type(), - cur_table_index.name(), cur_table_index.db(), - schema::IndexUtil::Convert2Vector(cur_table_index.column_key())); + meta->task_info->op_id(), meta->task_info->op_type(), cur_table_index.name(), cur_table_index.db(), + schema::IndexUtil::Convert2Vector(cur_table_index.column_key())); if (!sub_task) { return {}; } task->sub_task_.push_back(sub_task); - PDLOG(INFO, "add subtask kAddTableIndex. op_id[%lu] table name %s db %s", - meta->task_info->op_id(), cur_table_index.name().c_str(), cur_table_index.db().c_str()); + PDLOG(INFO, "add subtask kAddTableIndex. op_id[%lu] table name %s db %s", meta->task_info->op_id(), + cur_table_index.name().c_str(), cur_table_index.db().c_str()); } task->fun_ = boost::bind(&NameServerImpl::RunSubTask, this, task); break; @@ -10322,14 +10378,14 @@ std::shared_ptr NameServerImpl::CreateTaskInternal(const TaskMeta* task_me auto meta = dynamic_cast(task_meta); api::CreateProcedureRequest request; request.mutable_sp_info()->CopyFrom(meta->sp_info); - boost::function wrap_fun = boost::bind(&NameServerImpl::CreateProcedureInternal, - this, request); + boost::function wrap_fun = + boost::bind(&NameServerImpl::CreateProcedureInternal, this, request); task->fun_ = boost::bind(&NameServerImpl::WrapNormalTaskFun, this, wrap_fun, task_info); break; } - case ::openmldb::api::TaskType::kDumpIndexData: // deprecated + case ::openmldb::api::TaskType::kDumpIndexData: // deprecated case ::openmldb::api::TaskType::kUpdateTableAlive: // deprecated - case ::openmldb::api::TaskType::kTableSyncTask: // deprecated + case ::openmldb::api::TaskType::kTableSyncTask: // deprecated break; } return task; @@ -10352,8 +10408,8 @@ bool NameServerImpl::IsExistDataBase(const std::string& db) { return databases_.find(db) != databases_.end(); } -void NameServerImpl::DeploySQL(RpcController* controller, const DeploySQLRequest* request, - DeploySQLResponse* response, Closure* done) { +void NameServerImpl::DeploySQL(RpcController* controller, const DeploySQLRequest* request, DeploySQLResponse* response, + Closure* done) { brpc::ClosureGuard done_guard(done); if (!running_.load(std::memory_order_acquire)) { response->set_code(::openmldb::base::ReturnCode::kNameserverIsNotLeader); @@ -10370,7 +10426,7 @@ void NameServerImpl::DeploySQL(RpcController* controller, const DeploySQLRequest return; } if (auto procedure = GetProcedure(db, deploy_name); - procedure && procedure->type() == ::openmldb::type::ProcedureType::kReqDeployment) { + procedure && procedure->type() == ::openmldb::type::ProcedureType::kReqDeployment) { base::SetResponseStatus(ReturnCode::kProcedureAlreadyExists, "deployment already exists", response); PDLOG(WARNING, "deployment[%s] already exists in db[%s]", deploy_name.c_str(), db.c_str()); return; @@ -10395,8 +10451,7 @@ void NameServerImpl::DeploySQL(RpcController* controller, const DeploySQLRequest std::lock_guard lock(mu_); if (IsExistActiveOp(db, "", api::OPType::kDeployOP)) { LOG(WARNING) << "create DeployOP failed. there is already a task running in db " << db; - base::SetResponseStatus(ReturnCode::kOPAlreadyExists, - "there is already a task running", response); + base::SetResponseStatus(ReturnCode::kOPAlreadyExists, "there is already a task running", response); return; } uint64_t op_id = 0; @@ -10451,7 +10506,7 @@ bool NameServerImpl::IsExistActiveOp(const std::string& db, const std::string& n continue; } if (op_data->op_info_.task_status() == api::TaskStatus::kInited || - op_data->op_info_.task_status() == api::TaskStatus::kDoing) { + op_data->op_info_.task_status() == api::TaskStatus::kDoing) { return true; } } @@ -10472,7 +10527,7 @@ bool NameServerImpl::IsExistActiveOp(const std::string& db, const std::string& n continue; } if (op_data->op_info_.task_status() == api::TaskStatus::kInited || - op_data->op_info_.task_status() == api::TaskStatus::kDoing) { + op_data->op_info_.task_status() == api::TaskStatus::kDoing) { return true; } } diff --git a/src/nameserver/name_server_impl.h b/src/nameserver/name_server_impl.h index 07848347ed4..4b5356d9bb5 100644 --- a/src/nameserver/name_server_impl.h +++ b/src/nameserver/name_server_impl.h @@ -31,6 +31,7 @@ #include "base/hash.h" #include "base/random.h" +#include "catalog/distribute_iterator.h" #include "client/ns_client.h" #include "client/tablet_client.h" #include "codec/schema_codec.h" @@ -78,7 +79,6 @@ struct TabletInfo { bool Health() const { return state_ == ::openmldb::type::EndpointState::kHealthy; } }; - // the container of tablet typedef std::map> Tablets; typedef std::map> TableInfos; @@ -140,7 +140,6 @@ class NameServerImpl : public NameServer { std::shared_ptr<::openmldb::nameserver::TableInfo> table_info, uint64_t cur_term, uint32_t tid, std::shared_ptr<::openmldb::api::TaskInfo> task_ptr); - void RefreshTablet(uint32_t tid); void CreateTableInfoSimply(RpcController* controller, const CreateTableInfoRequest* request, @@ -155,8 +154,8 @@ class NameServerImpl : public NameServer { void CreateProcedure(RpcController* controller, const api::CreateProcedureRequest* request, GeneralResponse* response, Closure* done); - void DeploySQL(RpcController* controller, const DeploySQLRequest* request, - DeploySQLResponse* response, Closure* done); + void DeploySQL(RpcController* controller, const DeploySQLRequest* request, DeploySQLResponse* response, + Closure* done); void DropTableInternel(const DropTableRequest& request, GeneralResponse& response, // NOLINT std::shared_ptr<::openmldb::nameserver::TableInfo> table_info, @@ -165,8 +164,8 @@ class NameServerImpl : public NameServer { void DropTable(RpcController* controller, const DropTableRequest* request, GeneralResponse* response, Closure* done); - void TruncateTable(RpcController* controller, const TruncateTableRequest* request, - TruncateTableResponse* response, Closure* done); + void TruncateTable(RpcController* controller, const TruncateTableRequest* request, TruncateTableResponse* response, + Closure* done); void AddTableField(RpcController* controller, const AddTableFieldRequest* request, GeneralResponse* response, Closure* done); @@ -182,11 +181,11 @@ class NameServerImpl : public NameServer { void CreateFunction(RpcController* controller, const CreateFunctionRequest* request, CreateFunctionResponse* response, Closure* done); - void DropFunction(RpcController* controller, const DropFunctionRequest* request, - DropFunctionResponse* response, Closure* done); + void DropFunction(RpcController* controller, const DropFunctionRequest* request, DropFunctionResponse* response, + Closure* done); - void ShowFunction(RpcController* controller, const ShowFunctionRequest* request, - ShowFunctionResponse* response, Closure* done); + void ShowFunction(RpcController* controller, const ShowFunctionRequest* request, ShowFunctionResponse* response, + Closure* done); void ShowProcedure(RpcController* controller, const api::ShowProcedureRequest* request, api::ShowProcedureResponse* response, Closure* done); @@ -309,7 +308,7 @@ class NameServerImpl : public NameServer { void UpdateOfflineTableInfo(::google::protobuf::RpcController* controller, const ::openmldb::nameserver::TableInfo* request, - ::openmldb::nameserver::GeneralResponse* response, ::google::protobuf::Closure* done); + ::openmldb::nameserver::GeneralResponse* response, ::google::protobuf::Closure* done); int SyncExistTable(const std::string& alias, const std::string& name, const std::string& db, const std::vector<::openmldb::nameserver::TableInfo> tables_remote, @@ -317,14 +316,15 @@ class NameServerImpl : public NameServer { std::string& msg); // NOLINT base::Status CreateTableOnTablet(const std::shared_ptr<::openmldb::nameserver::TableInfo>& table_info, - bool is_leader, uint64_t term, std::map>* endpoint_map); + bool is_leader, uint64_t term, + std::map>* endpoint_map); void CheckZkClient(); int UpdateTaskStatusRemote(bool is_recover_op); - int UpdateTask(const std::list>& op_list, const std::string& endpoint, - bool is_recover_op, const ::openmldb::api::TaskStatusResponse& response); + int UpdateTask(const std::list>& op_list, const std::string& endpoint, bool is_recover_op, + const ::openmldb::api::TaskStatusResponse& response); int UpdateTaskStatus(bool is_recover_op); @@ -358,7 +358,15 @@ class NameServerImpl : public NameServer { void DropProcedure(RpcController* controller, const api::DropProcedureRequest* request, GeneralResponse* response, Closure* done); + std::function(const std::string& table_name)> + GetSystemTableIterator(); + + bool GetTableInfo(const std::string& table_name, const std::string& db_name, + std::shared_ptr* table_info); + private: + base::Status InsertUserRecord(const std::string& host, const std::string& user, const std::string& password); + base::Status InitGlobalVarTable(); // create the database if not exists, exit on fail @@ -463,7 +471,7 @@ class NameServerImpl : public NameServer { int UpdateEndpointTableAlive(const std::string& endpoint, bool is_alive); template - std::shared_ptr CreateTask(Arg &&...arg) { + std::shared_ptr CreateTask(Arg&&... arg) { T meta(std::forward(arg)...); return CreateTaskInternal(&meta); } @@ -488,11 +496,8 @@ class NameServerImpl : public NameServer { uint32_t pid, const std::vector& endpoints, const ::openmldb::common::ColumnKey& column_key); - bool GetTableInfo(const std::string& table_name, const std::string& db_name, - std::shared_ptr* table_info); - bool GetTableInfoUnlock(const std::string& table_name, const std::string& db_name, - std::shared_ptr* table_info); + std::shared_ptr* table_info); int AddOPTask(const ::openmldb::api::TaskInfo& task_info, ::openmldb::api::TaskType task_type, std::shared_ptr<::openmldb::api::TaskInfo>& task_ptr, // NOLINT @@ -560,14 +565,13 @@ class NameServerImpl : public NameServer { base::Status CreateDeployOP(const DeploySQLRequest& request, uint64_t* op_id); base::Status CreateAddIndexOP(const std::string& name, const std::string& db, - const std::vector<::openmldb::common::ColumnKey>& column_key); + const std::vector<::openmldb::common::ColumnKey>& column_key); base::Status CreateAddIndexOPTask(std::shared_ptr op_data); - base::Status FillAddIndexTask(uint64_t op_index, api::OPType op_type, - const std::string& name, const std::string& db, - const std::vector<::openmldb::common::ColumnKey>& column_key, - std::list>* task_list); + base::Status FillAddIndexTask(uint64_t op_index, api::OPType op_type, const std::string& name, + const std::string& db, const std::vector<::openmldb::common::ColumnKey>& column_key, + std::list>* task_list); int DropTableRemoteOP(const std::string& name, const std::string& db, const std::string& alias, uint64_t parent_id = INVALID_PARENT_ID, @@ -583,13 +587,13 @@ class NameServerImpl : public NameServer { std::shared_ptr<::openmldb::api::TaskInfo> task_info); bool AddIndexToTableInfo(const std::string& name, const std::string& db, - const std::vector<::openmldb::common::ColumnKey>& column_key, - std::shared_ptr<::openmldb::api::TaskInfo> task_info); + const std::vector<::openmldb::common::ColumnKey>& column_key, + std::shared_ptr<::openmldb::api::TaskInfo> task_info); void WrapTaskFun(const boost::function& fun, std::shared_ptr<::openmldb::api::TaskInfo> task_info); void WrapNormalTaskFun(const boost::function& fun, - std::shared_ptr<::openmldb::api::TaskInfo> task_info); + std::shared_ptr<::openmldb::api::TaskInfo> task_info); void RunSubTask(std::shared_ptr task); void RunSeqTask(std::shared_ptr task); @@ -658,9 +662,8 @@ class NameServerImpl : public NameServer { bool AddFieldToTablet(const std::vector& cols, std::shared_ptr table_info, openmldb::common::VersionPair* new_pair); - base::Status AddMultiIndexs(const std::string& db, const std::string& name, - std::shared_ptr table_info, - const ::google::protobuf::RepeatedPtrField& column_keys); + base::Status AddMultiIndexs(const std::string& db, const std::string& name, std::shared_ptr table_info, + const ::google::protobuf::RepeatedPtrField& column_keys); void DropProcedureOnTablet(const std::string& db_name, const std::string& sp_name); diff --git a/src/nameserver/name_server_test.cc b/src/nameserver/name_server_test.cc index 3799873d597..471f7356059 100644 --- a/src/nameserver/name_server_test.cc +++ b/src/nameserver/name_server_test.cc @@ -136,16 +136,16 @@ TEST_P(NameServerImplTest, MakesnapshotTask) { FLAGS_make_snapshot_threshold_offset = 0; FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); + brpc::ServerOptions options1; + brpc::Server server1; + ASSERT_TRUE(StartTablet("127.0.0.1:9530", &server1, &options1)); + brpc::ServerOptions options; brpc::Server server; ASSERT_TRUE(StartNS("127.0.0.1:9631", &server, &options)); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client("127.0.0.1:9631", ""); name_server_client.Init(); - brpc::ServerOptions options1; - brpc::Server server1; - ASSERT_TRUE(StartTablet("127.0.0.1:9530", &server1, &options1)); - CreateTableRequest request; GeneralResponse response; TableInfo* table_info = request.mutable_table_info(); @@ -296,46 +296,32 @@ TEST_P(NameServerImplTest, MakesnapshotTask) { FLAGS_make_snapshot_threshold_offset = old_offset; ::openmldb::base::RemoveDirRecursive(FLAGS_hdd_root_path + "/2_0"); ::openmldb::base::RemoveDirRecursive(FLAGS_ssd_root_path + "/2_0"); + server1.Stop(1); + server1.Join(); + server.Stop(1); + server.Join(); } TEST_F(NameServerImplTest, ConfigGetAndSet) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - std::string endpoint = "127.0.0.1:9631"; - FLAGS_endpoint = endpoint; - NameServerImpl* nameserver = new NameServerImpl(); - bool ok = nameserver->Init(""); - ASSERT_TRUE(ok); - sleep(4); - brpc::ServerOptions options; - brpc::Server server; - if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - PDLOG(WARNING, "Fail to add service"); - exit(1); - } - if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { - PDLOG(WARNING, "Fail to start server"); - exit(1); - } + brpc::ServerOptions options_t; + brpc::Server server_t; + ASSERT_TRUE(StartTablet("127.0.0.1:9530", &server_t, &options_t)); - std::string endpoint1 = "127.0.0.1:9632"; - FLAGS_endpoint = endpoint1; - NameServerImpl* nameserver1 = new NameServerImpl(); - ok = nameserver1->Init(""); - ASSERT_TRUE(ok); - sleep(4); - brpc::ServerOptions options1; - brpc::Server server1; - if (server1.AddService(nameserver1, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - PDLOG(WARNING, "Fail to add service"); - exit(1); - } - if (server1.Start(FLAGS_endpoint.c_str(), &options1) != 0) { - PDLOG(WARNING, "Fail to start server"); - exit(1); - } - ::openmldb::client::NsClient name_server_client(endpoint, ""); + brpc::ServerOptions options_n1; + brpc::Server server_n1; + auto server_n1_endpoint = "127.0.0.1:9631"; + ASSERT_TRUE(StartNS(server_n1_endpoint, &server_n1, &options_n1)); + + brpc::ServerOptions options_n2; + brpc::Server server_n2; + auto server_n2_endpoint = "127.0.0.1:9632"; + ASSERT_TRUE(StartNS(server_n2_endpoint, &server_n2, &options_n2)); + + ::openmldb::client::NsClient name_server_client(server_n1_endpoint, ""); name_server_client.Init(); + std::string key = "auto_failover"; std::string msg; std::map conf_map; @@ -350,13 +336,17 @@ TEST_F(NameServerImplTest, ConfigGetAndSet) { ASSERT_STREQ(conf_map[key].c_str(), "true"); ret = name_server_client.DisConnectZK(msg); sleep(5); - ::openmldb::client::NsClient name_server_client1(endpoint1, ""); + ::openmldb::client::NsClient name_server_client1(server_n2_endpoint, ""); name_server_client1.Init(); ret = name_server_client1.ConfGet(key, conf_map, msg); ASSERT_TRUE(ret); ASSERT_STREQ(conf_map[key].c_str(), "true"); - delete nameserver; - delete nameserver1; + server_t.Stop(1); + server_t.Join(); + server_n1.Stop(1); + server_n1.Join(); + server_n2.Stop(1); + server_n2.Join(); } TEST_P(NameServerImplTest, CreateTable) { @@ -364,27 +354,9 @@ TEST_P(NameServerImplTest, CreateTable) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; - NameServerImpl* nameserver = new NameServerImpl(); - bool ok = nameserver->Init(""); - ASSERT_TRUE(ok); - sleep(4); - brpc::ServerOptions options; - brpc::Server server; - if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - PDLOG(WARNING, "Fail to add service"); - exit(1); - } - if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { - PDLOG(WARNING, "Fail to start server"); - exit(1); - } - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); - name_server_client.Init(); - FLAGS_endpoint = "127.0.0.1:9531"; ::openmldb::tablet::TabletImpl* tablet = new ::openmldb::tablet::TabletImpl(); - ok = tablet->Init(""); + bool ok = tablet->Init(""); ASSERT_TRUE(ok); sleep(2); @@ -403,6 +375,24 @@ TEST_P(NameServerImplTest, CreateTable) { sleep(2); + FLAGS_endpoint = "127.0.0.1:9632"; + NameServerImpl* nameserver = new NameServerImpl(); + ok = nameserver->Init(""); + ASSERT_TRUE(ok); + sleep(4); + brpc::ServerOptions options; + brpc::Server server; + if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + PDLOG(WARNING, "Fail to add service"); + exit(1); + } + if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { + PDLOG(WARNING, "Fail to start server"); + exit(1); + } + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); + name_server_client.Init(); + CreateTableRequest request; GeneralResponse response; TableInfo* table_info = request.mutable_table_info(); @@ -434,6 +424,10 @@ TEST_P(NameServerImplTest, CreateTable) { FLAGS_request_timeout_ms, 1); ASSERT_TRUE(ok); ASSERT_EQ(0, response.code()); + server1.Stop(1); + server1.Join(); + server.Stop(1); + server.Join(); delete nameserver; delete tablet; @@ -451,23 +445,6 @@ TEST_P(NameServerImplTest, Offline) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); FLAGS_auto_failover = true; - FLAGS_endpoint = "127.0.0.1:9633"; - NameServerImpl* nameserver = new NameServerImpl(); - bool ok = nameserver->Init(""); - ASSERT_TRUE(ok); - sleep(4); - brpc::ServerOptions options; - brpc::Server server; - if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - PDLOG(WARNING, "Fail to add service"); - exit(1); - } - if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { - PDLOG(WARNING, "Fail to start server"); - exit(1); - } - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); - name_server_client.Init(); FLAGS_endpoint = "127.0.0.1:9533"; std::string old_db_root_path = FLAGS_db_root_path; @@ -478,7 +455,7 @@ TEST_P(NameServerImplTest, Offline) { FLAGS_ssd_root_path = temp_path.GetTempPath(); FLAGS_hdd_root_path = temp_path.GetTempPath(); ::openmldb::tablet::TabletImpl* tablet = new ::openmldb::tablet::TabletImpl(); - ok = tablet->Init(""); + bool ok = tablet->Init(""); ASSERT_TRUE(ok); sleep(2); @@ -518,6 +495,25 @@ TEST_P(NameServerImplTest, Offline) { ASSERT_TRUE(ok); sleep(2); + + FLAGS_endpoint = "127.0.0.1:9633"; + NameServerImpl* nameserver = new NameServerImpl(); + ok = nameserver->Init(""); + ASSERT_TRUE(ok); + sleep(4); + brpc::ServerOptions options; + brpc::Server server; + if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + PDLOG(WARNING, "Fail to add service"); + exit(1); + } + if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { + PDLOG(WARNING, "Fail to start server"); + exit(1); + } + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); + name_server_client.Init(); + CreateTableRequest request; GeneralResponse response; TableInfo* table_info = request.mutable_table_info(); @@ -569,6 +565,12 @@ TEST_P(NameServerImplTest, Offline) { ASSERT_TRUE(ok); ASSERT_EQ(0, response.code()); } + server2.Stop(1); + server2.Join(); + server1.Stop(1); + server1.Join(); + server.Stop(1); + server.Join(); delete nameserver; delete tablet; delete tablet2; @@ -580,27 +582,9 @@ TEST_P(NameServerImplTest, Offline) { TEST_F(NameServerImplTest, SetTablePartition) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); - FLAGS_endpoint = "127.0.0.1:9632"; - NameServerImpl* nameserver = new NameServerImpl(); - bool ok = nameserver->Init(""); - ASSERT_TRUE(ok); - sleep(4); - brpc::ServerOptions options; - brpc::Server server; - if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - PDLOG(WARNING, "Fail to add service"); - exit(1); - } - if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { - PDLOG(WARNING, "Fail to start server"); - exit(1); - } - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); - name_server_client.Init(); - FLAGS_endpoint = "127.0.0.1:9531"; ::openmldb::tablet::TabletImpl* tablet = new ::openmldb::tablet::TabletImpl(); - ok = tablet->Init(""); + bool ok = tablet->Init(""); ASSERT_TRUE(ok); sleep(2); @@ -617,6 +601,24 @@ TEST_F(NameServerImplTest, SetTablePartition) { ok = tablet->RegisterZK(); ASSERT_TRUE(ok); + FLAGS_endpoint = "127.0.0.1:9632"; + NameServerImpl* nameserver = new NameServerImpl(); + ok = nameserver->Init(""); + ASSERT_TRUE(ok); + sleep(4); + brpc::ServerOptions options; + brpc::Server server; + if (server.AddService(nameserver, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + PDLOG(WARNING, "Fail to add service"); + exit(1); + } + if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { + PDLOG(WARNING, "Fail to start server"); + exit(1); + } + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); + name_server_client.Init(); + sleep(2); std::string msg; ConfSetRequest conf_request; @@ -689,13 +691,19 @@ TEST_F(NameServerImplTest, SetTablePartition) { ASSERT_TRUE(ok); ASSERT_EQ(0, get_response.code()); ASSERT_FALSE(get_response.table_partition().partition_meta(0).is_leader()); - + server1.Stop(1); + server1.Join(); + server.Stop(1); + server.Join(); delete nameserver; delete tablet; } TEST_F(NameServerImplTest, CancelOP) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); + brpc::ServerOptions options_t; + brpc::Server server_t; + ASSERT_TRUE(StartTablet("127.0.0.1:9530", &server_t, &options_t)); FLAGS_endpoint = "127.0.0.1:9632"; NameServerImpl* nameserver = new NameServerImpl(); @@ -747,6 +755,10 @@ TEST_F(NameServerImplTest, CancelOP) { nameserver->CancelOP(NULL, &request, &response, &closure); ASSERT_EQ(0, response.code()); ASSERT_TRUE(op_data->op_info_.task_status() == ::openmldb::api::kCanceled); + server_t.Stop(1); + server_t.Join(); + server.Stop(1); + server.Join(); delete nameserver; } @@ -772,6 +784,7 @@ void InitTablet(int port, vector services, vector services, vector ns = std::make_shared(); @@ -840,21 +852,29 @@ TEST_F(NameServerImplTest, AddAndRemoveReplicaCluster) { string f2_ns1_ep, f2_ns2_ep, f2_t1_ep, f2_t2_ep; string m1_zkpath, f1_zkpath, f2_zkpath; - vector svrs = {&m1_ns1_svr, &m1_ns2_svr}; + vector svrs = {&m1_t1_svr, &m1_t2_svr}; vector*> ns_vector = {&m1_ns1, &m1_ns2}; vector*> tb_vector = {&m1_t1, &m1_t2}; - vector endpoints = {&m1_ns1_ep, &m1_ns2_ep}; + vector endpoints = {&m1_t1_ep, &m1_t2_ep}; + ; int port = 9632; + + InitTablet(port, svrs, tb_vector, endpoints); + + svrs = {&m1_ns1_svr, &m1_ns2_svr}; + endpoints = {&m1_ns1_ep, &m1_ns2_ep}; + InitNs(port, svrs, ns_vector, endpoints); m1_zkpath = FLAGS_zk_root_path; - svrs = {&m1_t1_svr, &m1_t2_svr}; - endpoints = {&m1_t1_ep, &m1_t2_ep}; + port++; - InitTablet(port, svrs, tb_vector, endpoints); + svrs = {&f1_t1_svr, &f1_t2_svr}; + endpoints = {&f1_t1_ep, &f1_t2_ep}; + tb_vector = {&f1_t1, &f1_t2}; - port++; + InitTablet(port, svrs, tb_vector, endpoints); svrs = {&f1_ns1_svr, &f1_ns2_svr}; ns_vector = {&f1_ns1, &f1_ns2}; @@ -863,13 +883,13 @@ TEST_F(NameServerImplTest, AddAndRemoveReplicaCluster) { InitNs(port, svrs, ns_vector, endpoints); f1_zkpath = FLAGS_zk_root_path; - svrs = {&f1_t1_svr, &f1_t2_svr}; - endpoints = {&f1_t1_ep, &f1_t2_ep}; - tb_vector = {&f1_t1, &f1_t2}; + port++; - InitTablet(port, svrs, tb_vector, endpoints); + svrs = {&f2_t1_svr, &f2_t2_svr}; + endpoints = {&f2_t1_ep, &f2_t2_ep}; + tb_vector = {&f2_t1, &f2_t2}; - port++; + InitTablet(port, svrs, tb_vector, endpoints); svrs = {&f2_ns1_svr, &f2_ns2_svr}; ns_vector = {&f2_ns1, &f2_ns2}; @@ -878,12 +898,6 @@ TEST_F(NameServerImplTest, AddAndRemoveReplicaCluster) { InitNs(port, svrs, ns_vector, endpoints); f2_zkpath = FLAGS_zk_root_path; - svrs = {&f2_t1_svr, &f2_t2_svr}; - endpoints = {&f2_t1_ep, &f2_t2_ep}; - tb_vector = {&f2_t1, &f2_t2}; - - InitTablet(port, svrs, tb_vector, endpoints); - // disable autoconf ConfSetRequest conf_set_request; GeneralResponse general_response; @@ -992,6 +1006,11 @@ TEST_F(NameServerImplTest, AddAndRemoveReplicaCluster) { ASSERT_EQ(2, show_replica_cluster_response.replicas_size()); show_replica_cluster_response.Clear(); } + + for (auto svc : svrs) { + svc->Stop(1); + svc->Join(); + } } TEST_F(NameServerImplTest, SyncTableReplicaCluster) { @@ -1005,22 +1024,30 @@ TEST_F(NameServerImplTest, SyncTableReplicaCluster) { string f2_ns1_ep, f2_ns2_ep, f2_t1_ep, f2_t2_ep; string m1_zkpath, f1_zkpath, f2_zkpath; - vector svrs = {&m1_ns1_svr, &m1_ns2_svr}; + vector svrs = {&m1_t1_svr, &m1_t2_svr}; vector*> ns_vector = {&m1_ns1, &m1_ns2}; vector*> tb_vector = {&m1_t1, &m1_t2}; - vector endpoints = {&m1_ns1_ep, &m1_ns2_ep}; + vector endpoints = {&m1_t1_ep, &m1_t2_ep}; + ; + + int port = 9632; + + InitTablet(port, svrs, tb_vector, endpoints); + + svrs = {&m1_ns1_svr, &m1_ns2_svr}; + endpoints = {&m1_ns1_ep, &m1_ns2_ep}; - int port = 9642; InitNs(port, svrs, ns_vector, endpoints); m1_zkpath = FLAGS_zk_root_path; - svrs = {&m1_t1_svr, &m1_t2_svr}; - endpoints = {&m1_t1_ep, &m1_t2_ep}; + port++; + + svrs = {&f1_t1_svr, &f1_t2_svr}; + endpoints = {&f1_t1_ep, &f1_t2_ep}; + tb_vector = {&f1_t1, &f1_t2}; InitTablet(port, svrs, tb_vector, endpoints); - port++; - svrs = {&f1_ns1_svr, &f1_ns2_svr}; ns_vector = {&f1_ns1, &f1_ns2}; endpoints = {&f1_ns1_ep, &f1_ns2_ep}; @@ -1028,13 +1055,13 @@ TEST_F(NameServerImplTest, SyncTableReplicaCluster) { InitNs(port, svrs, ns_vector, endpoints); f1_zkpath = FLAGS_zk_root_path; - svrs = {&f1_t1_svr, &f1_t2_svr}; - endpoints = {&f1_t1_ep, &f1_t2_ep}; - tb_vector = {&f1_t1, &f1_t2}; + port++; - InitTablet(port, svrs, tb_vector, endpoints); + svrs = {&f2_t1_svr, &f2_t2_svr}; + endpoints = {&f2_t1_ep, &f2_t2_ep}; + tb_vector = {&f2_t1, &f2_t2}; - port++; + InitTablet(port, svrs, tb_vector, endpoints); svrs = {&f2_ns1_svr, &f2_ns2_svr}; ns_vector = {&f2_ns1, &f2_ns2}; @@ -1043,12 +1070,6 @@ TEST_F(NameServerImplTest, SyncTableReplicaCluster) { InitNs(port, svrs, ns_vector, endpoints); f2_zkpath = FLAGS_zk_root_path; - svrs = {&f2_t1_svr, &f2_t2_svr}; - endpoints = {&f2_t1_ep, &f2_t2_ep}; - tb_vector = {&f2_t1, &f2_t2}; - - InitTablet(port, svrs, tb_vector, endpoints); - // disable autoconf ConfSetRequest conf_set_request; GeneralResponse general_response; @@ -1147,21 +1168,26 @@ TEST_F(NameServerImplTest, SyncTableReplicaCluster) { ASSERT_EQ(name, show_table_response.table_info(0).name()); show_table_response.Clear(); } + + for (auto svc : svrs) { + svc->Stop(1); + svc->Join(); + } } TEST_F(NameServerImplTest, ShowCatalogVersion) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); + brpc::ServerOptions options1; + brpc::Server server1; + ASSERT_TRUE(StartTablet("127.0.0.1:9535", &server1, &options1)); + brpc::ServerOptions options; brpc::Server server; ASSERT_TRUE(StartNS("127.0.0.1:9634", &server, &options)); ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client("127.0.0.1:9634", ""); name_server_client.Init(); - brpc::ServerOptions options1; - brpc::Server server1; - ASSERT_TRUE(StartTablet("127.0.0.1:9535", &server1, &options1)); - brpc::ServerOptions options2; brpc::Server server2; ASSERT_TRUE(StartTablet("127.0.0.1:9536", &server2, &options2)); @@ -1239,6 +1265,13 @@ TEST_F(NameServerImplTest, ShowCatalogVersion) { ASSERT_EQ(cur_catalog.version(), version_map[cur_catalog.endpoint()] + 1); PDLOG(INFO, "endpoint %s version %lu", cur_catalog.endpoint().c_str(), cur_catalog.version()); } + + server2.Stop(1); + server2.Join(); + server1.Stop(1); + server1.Join(); + server.Stop(1); + server.Join(); } INSTANTIATE_TEST_CASE_P(TabletMemAndHDD, NameServerImplTest, @@ -1247,16 +1280,16 @@ INSTANTIATE_TEST_CASE_P(TabletMemAndHDD, NameServerImplTest, TEST_F(NameServerImplTest, AddField) { FLAGS_zk_root_path = "/rtidb3" + ::openmldb::test::GenRand(); + brpc::ServerOptions options1; + brpc::Server server1; + ASSERT_TRUE(StartTablet("127.0.0.1:9535", &server1, &options1)); + brpc::ServerOptions options; brpc::Server server; ASSERT_TRUE(StartNS("127.0.0.1:9634", &server, &options)); auto ns_client = std::make_shared("127.0.0.1:9634", "127.0.0.1:9634"); ns_client->Init(); - brpc::ServerOptions options1; - brpc::Server server1; - ASSERT_TRUE(StartTablet("127.0.0.1:9535", &server1, &options1)); - std::string db_name = "db1"; std::string msg; ASSERT_TRUE(ns_client->CreateDatabase(db_name, msg, true)); @@ -1279,6 +1312,10 @@ TEST_F(NameServerImplTest, AddField) { ASSERT_EQ(table_info1.schema_versions_size(), 1); ASSERT_EQ(table_info1.schema_versions(0).id(), 2); ASSERT_EQ(table_info1.schema_versions(0).field_count(), 3); + server1.Stop(1); + server1.Join(); + server.Stop(1); + server.Join(); } } // namespace nameserver diff --git a/src/nameserver/new_server_env_test.cc b/src/nameserver/new_server_env_test.cc index f24a29810ee..1bb364a0de9 100644 --- a/src/nameserver/new_server_env_test.cc +++ b/src/nameserver/new_server_env_test.cc @@ -162,15 +162,6 @@ TEST_F(NewServerEnvTest, ShowRealEndpoint) { FLAGS_zk_cluster = "127.0.0.1:6181"; FLAGS_zk_root_path = "/rtidb4" + ::openmldb::test::GenRand(); - // ns1 - FLAGS_use_name = true; - FLAGS_endpoint = "ns1"; - std::string ns_real_ep = "127.0.0.1:9631"; - brpc::Server ns_server; - StartNameServer(ns_server, ns_real_ep); - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(ns_real_ep); - name_server_client.Init(); - // tablet1 FLAGS_use_name = true; FLAGS_endpoint = "tb1"; @@ -188,6 +179,15 @@ TEST_F(NewServerEnvTest, ShowRealEndpoint) { brpc::Server tb_server2; StartTablet(tb_server2, tb_real_ep_2); + // ns1 + FLAGS_use_name = true; + FLAGS_endpoint = "ns1"; + std::string ns_real_ep = "127.0.0.1:9631"; + brpc::Server ns_server; + StartNameServer(ns_server, ns_real_ep); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(ns_real_ep); + name_server_client.Init(); + { std::map map; ShowNameServer(&map); @@ -252,15 +252,6 @@ TEST_F(NewServerEnvTest, ShowRealEndpointDelayNameserverStart) { FLAGS_zk_cluster = "127.0.0.1:6181"; FLAGS_zk_root_path = "/rtidb4" + ::openmldb::test::GenRand(); - // ns1 - FLAGS_use_name = true; - FLAGS_endpoint = "ns1"; - std::string ns_real_ep = "127.0.0.1:9631"; - brpc::Server ns_server; - StartNameServerWithDelay(ns_server, ns_real_ep); - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(ns_real_ep); - name_server_client.Init(); - // tablet1 FLAGS_use_name = true; FLAGS_endpoint = "tb1"; @@ -278,6 +269,15 @@ TEST_F(NewServerEnvTest, ShowRealEndpointDelayNameserverStart) { brpc::Server tb_server2; StartTablet(tb_server2, tb_real_ep_2); + // ns1 + FLAGS_use_name = true; + FLAGS_endpoint = "ns1"; + std::string ns_real_ep = "127.0.0.1:9631"; + brpc::Server ns_server; + StartNameServerWithDelay(ns_server, ns_real_ep); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(ns_real_ep); + name_server_client.Init(); + { std::map map; ShowNameServer(&map); diff --git a/src/rpc/rpc_client.h b/src/rpc/rpc_client.h index 375240dff50..77026b4d6d2 100644 --- a/src/rpc/rpc_client.h +++ b/src/rpc/rpc_client.h @@ -42,6 +42,7 @@ #include // NOLINT #include +#include "auth/brpc_authenticator.h" #include "base/glog_wrapper.h" #include "base/status.h" #include "proto/tablet.pb.h" @@ -75,10 +76,22 @@ static SleepRetryPolicy sleep_retry_policy; template class RpcClient { public: - explicit RpcClient(const std::string& endpoint) - : endpoint_(endpoint), use_sleep_policy_(false), log_id_(0), stub_(NULL), channel_(NULL) {} - RpcClient(const std::string& endpoint, bool use_sleep_policy) - : endpoint_(endpoint), use_sleep_policy_(use_sleep_policy), log_id_(0), stub_(NULL), channel_(NULL) {} + explicit RpcClient(const std::string& endpoint, + const openmldb::authn::AuthToken auth_token = openmldb::authn::ServiceToken{"default"}) + : endpoint_(endpoint), + use_sleep_policy_(false), + log_id_(0), + stub_(NULL), + channel_(NULL), + client_authenticator_(auth_token) {} + RpcClient(const std::string& endpoint, bool use_sleep_policy, + const openmldb::authn::AuthToken auth_token = openmldb::authn::ServiceToken{"default"}) + : endpoint_(endpoint), + use_sleep_policy_(use_sleep_policy), + log_id_(0), + stub_(NULL), + channel_(NULL), + client_authenticator_(auth_token) {} ~RpcClient() { delete channel_; delete stub_; @@ -90,6 +103,8 @@ class RpcClient { if (use_sleep_policy_) { options.retry_policy = &sleep_retry_policy; } + options.auth = &client_authenticator_; + if (channel_->Init(endpoint_.c_str(), "", &options) != 0) { return -1; } @@ -148,7 +163,8 @@ class RpcClient { template base::Status SendRequestSt(void (T::*func)(google::protobuf::RpcController*, const Request*, Response*, Callback*), const Request* request, Response* response, uint64_t rpc_timeout, int retry_times) { - return SendRequestSt(func, [](brpc::Controller* cntl) {}, request, response, rpc_timeout, retry_times); + return SendRequestSt( + func, [](brpc::Controller* cntl) {}, request, response, rpc_timeout, retry_times); } template @@ -218,10 +234,12 @@ class RpcClient { private: std::string endpoint_; + std::string auth_str_; bool use_sleep_policy_; uint64_t log_id_; T* stub_; brpc::Channel* channel_; + authn::BRPCAuthenticator client_authenticator_; }; template diff --git a/src/sdk/CMakeLists.txt b/src/sdk/CMakeLists.txt index 698cf6ea631..db53a69f226 100644 --- a/src/sdk/CMakeLists.txt +++ b/src/sdk/CMakeLists.txt @@ -73,7 +73,7 @@ if(TESTING_ENABLE) target_link_libraries(options_map_parser_test ${BIN_LIBS}) endif() -set(SDK_LIBS openmldb_sdk openmldb_catalog client zk_client schema openmldb_flags openmldb_codec openmldb_proto base hybridse_sdk zookeeper_mt) +set(SDK_LIBS openmldb_sdk openmldb_catalog client zk_client schema openmldb_flags openmldb_codec openmldb_proto base auth hybridse_sdk zookeeper_mt) if(SQL_PYSDK_ENABLE) find_package(Python3 COMPONENTS Interpreter Development) @@ -266,6 +266,7 @@ target_link_libraries(openmldb_api openmldb_sdk client zk_client base + auth openmldb_flags hybridse_sdk hybridse_core diff --git a/src/sdk/db_sdk.cc b/src/sdk/db_sdk.cc index 8a4f951cee1..9275a5c29aa 100644 --- a/src/sdk/db_sdk.cc +++ b/src/sdk/db_sdk.cc @@ -45,7 +45,12 @@ std::shared_ptr<::openmldb::client::NsClient> DBSDK::GetNsClient() { DLOG(ERROR) << "fail to get ns address"; return {}; } - ns_client = std::make_shared<::openmldb::client::NsClient>(endpoint, real_endpoint); + if (auto options = GetOptions(); !options->user.empty()) { + ns_client = std::make_shared<::openmldb::client::NsClient>( + endpoint, real_endpoint, authn::UserToken{options->user, codec::Encrypt(options->password)}); + } else { + ns_client = std::make_shared<::openmldb::client::NsClient>(endpoint, real_endpoint); + } int ret = ns_client->Init(); if (ret != 0) { // We GetNsClient and use it without checking not null. It's intolerable. @@ -142,8 +147,10 @@ bool DBSDK::RegisterExternalFun(const std::shared_ptrarg_type(i), &data_type); arg_types.emplace_back(data_type); } - if (engine_->RegisterExternalFunction(fun->name(), return_type, fun->return_nullable(), - arg_types, fun->arg_nullable(), fun->is_aggregate(), "").isOK()) { + if (engine_ + ->RegisterExternalFunction(fun->name(), return_type, fun->return_nullable(), arg_types, fun->arg_nullable(), + fun->is_aggregate(), "") + .isOK()) { std::lock_guard<::openmldb::base::SpinMutex> lock(mu_); external_fun_.emplace(fun->name(), fun); return true; @@ -212,11 +219,8 @@ void ClusterSDK::CheckZk() { } bool ClusterSDK::Init() { - zk_client_ = new ::openmldb::zk::ZkClient(options_->zk_cluster, "", - options_->zk_session_timeout, "", - options_->zk_path, - options_->zk_auth_schema, - options_->zk_cert); + zk_client_ = new ::openmldb::zk::ZkClient(options_->zk_cluster, "", options_->zk_session_timeout, "", + options_->zk_path, options_->zk_auth_schema, options_->zk_cert); bool ok = zk_client_->Init(options_->zk_log_level, options_->zk_log_file); if (!ok) { diff --git a/src/sdk/db_sdk.h b/src/sdk/db_sdk.h index 982bdd5a40f..38dc098f37d 100644 --- a/src/sdk/db_sdk.h +++ b/src/sdk/db_sdk.h @@ -28,6 +28,7 @@ #include "client/ns_client.h" #include "client/tablet_client.h" #include "client/taskmanager_client.h" +#include "codec/encrypt.h" #include "common/thread_pool.h" #include "sdk/options.h" #include "vm/catalog.h" @@ -49,9 +50,8 @@ struct ClusterOptions { std::string to_string() { std::stringstream ss; ss << "zk options [cluster:" << zk_cluster << ", path:" << zk_path - << ", zk_session_timeout:" << zk_session_timeout - << ", log_level:" << zk_log_level << ", log_file:" << zk_log_file - << ", zk_auth_schema:" << zk_auth_schema << ", zk_cert:" << zk_cert << "]"; + << ", zk_session_timeout:" << zk_session_timeout << ", log_level:" << zk_log_level + << ", log_file:" << zk_log_file << ", zk_auth_schema:" << zk_auth_schema << ", zk_cert:" << zk_cert << "]"; return ss.str(); } }; diff --git a/src/sdk/mini_cluster.h b/src/sdk/mini_cluster.h index f6d0bbc950f..ab4b7d5c8c2 100644 --- a/src/sdk/mini_cluster.h +++ b/src/sdk/mini_cluster.h @@ -21,9 +21,12 @@ #include #include +#include #include #include +#include "auth/brpc_authenticator.h" +#include "auth/user_access_manager.h" #include "base/file_util.h" #include "base/glog_wrapper.h" #include "brpc/server.h" @@ -69,6 +72,16 @@ class MiniCluster { : zk_port_(zk_port), ns_(), tablet_num_(2), zk_cluster_(), zk_path_(), ns_client_(NULL) {} ~MiniCluster() { + if (user_access_manager_) { + delete user_access_manager_; + user_access_manager_ = nullptr; + } + + if (ns_authenticator_) { + delete ns_authenticator_; + ns_authenticator_ = nullptr; + } + for (const auto& kv : tb_clients_) { delete kv.second; } @@ -108,7 +121,19 @@ class MiniCluster { if (!ok) { return false; } + if (!nameserver->GetTableInfo(::openmldb::nameserver::USER_INFO_NAME, ::openmldb::nameserver::INTERNAL_DB, + &user_table_info_)) { + PDLOG(WARNING, "Failed to get table info for user table"); + return false; + } + user_access_manager_ = + new openmldb::auth::UserAccessManager(nameserver->GetSystemTableIterator(), user_table_info_); + ns_authenticator_ = new openmldb::authn::BRPCAuthenticator( + [this](const std::string& host, const std::string& username, const std::string& password) { + return user_access_manager_->IsAuthenticated(host, username, password); + }); brpc::ServerOptions options; + options.auth = ns_authenticator_; if (ns_.AddService(nameserver, brpc::SERVER_OWNS_SERVICE) != 0) { LOG(WARNING) << "fail to add ns"; return false; @@ -133,6 +158,15 @@ class MiniCluster { } void Close() { + if (user_access_manager_) { + delete user_access_manager_; + user_access_manager_ = nullptr; + } + + if (ns_authenticator_) { + delete ns_authenticator_; + ns_authenticator_ = nullptr; + } nameserver->CloseThreadpool(); ns_.Stop(10); ns_.Join(); @@ -183,6 +217,8 @@ class MiniCluster { return false; } brpc::ServerOptions ts_opt; + ts_opt.auth = &tablet_authenticator_; + if (tb_server->AddService(tablet, brpc::SERVER_OWNS_SERVICE) != 0) { LOG(WARNING) << "fail to add tablet"; return false; @@ -218,12 +254,25 @@ class MiniCluster { ::openmldb::client::NsClient* ns_client_; std::map tablets_; std::map tb_clients_; + openmldb::authn::BRPCAuthenticator tablet_authenticator_; + openmldb::authn::BRPCAuthenticator* ns_authenticator_; + openmldb::auth::UserAccessManager* user_access_manager_; + std::shared_ptr<::openmldb::nameserver::TableInfo> user_table_info_; }; class StandaloneEnv { public: StandaloneEnv() : ns_(), ns_client_(nullptr), tb_client_(nullptr) {} ~StandaloneEnv() { + if (user_access_manager_) { + delete user_access_manager_; + user_access_manager_ = nullptr; + } + + if (ns_authenticator_) { + delete ns_authenticator_; + ns_authenticator_ = nullptr; + } if (tb_client_) { delete tb_client_; } @@ -253,7 +302,19 @@ class StandaloneEnv { if (!ok) { return false; } + if (!nameserver->GetTableInfo(::openmldb::nameserver::USER_INFO_NAME, ::openmldb::nameserver::INTERNAL_DB, + &user_table_info_)) { + PDLOG(WARNING, "Failed to get table info for user table"); + return false; + } + user_access_manager_ = + new openmldb::auth::UserAccessManager(nameserver->GetSystemTableIterator(), user_table_info_); + ns_authenticator_ = new openmldb::authn::BRPCAuthenticator( + [this](const std::string& host, const std::string& username, const std::string& password) { + return user_access_manager_->IsAuthenticated(host, username, password); + }); brpc::ServerOptions options; + options.auth = ns_authenticator_; if (ns_.AddService(nameserver, brpc::SERVER_OWNS_SERVICE) != 0) { LOG(WARNING) << "fail to add ns"; return false; @@ -276,6 +337,15 @@ class StandaloneEnv { } void Close() { + if (user_access_manager_) { + delete user_access_manager_; + user_access_manager_ = nullptr; + } + + if (ns_authenticator_) { + delete ns_authenticator_; + ns_authenticator_ = nullptr; + } nameserver->CloseThreadpool(); ns_.Stop(10); ns_.Join(); @@ -305,6 +375,7 @@ class StandaloneEnv { return false; } brpc::ServerOptions ts_opt; + ts_opt.auth = &tablet_authenticator_; if (tb_server->AddService(tablet, brpc::SERVER_OWNS_SERVICE) != 0) { LOG(WARNING) << "fail to add tablet"; return false; @@ -330,6 +401,10 @@ class StandaloneEnv { uint64_t ns_port_ = 0; ::openmldb::client::NsClient* ns_client_; ::openmldb::client::TabletClient* tb_client_; + openmldb::authn::BRPCAuthenticator tablet_authenticator_; + openmldb::authn::BRPCAuthenticator* ns_authenticator_; + openmldb::auth::UserAccessManager* user_access_manager_; + std::shared_ptr<::openmldb::nameserver::TableInfo> user_table_info_; }; } // namespace sdk diff --git a/src/sdk/sql_cluster_router.cc b/src/sdk/sql_cluster_router.cc index 7076804ff49..8be810f1559 100644 --- a/src/sdk/sql_cluster_router.cc +++ b/src/sdk/sql_cluster_router.cc @@ -198,7 +198,7 @@ bool SQLClusterRouter::Init() { session_variables_.emplace("insert_memory_usage_limit", "0"); session_variables_.emplace("spark_config", ""); } - return Auth(); + return true; } bool SQLClusterRouter::Auth() { diff --git a/src/tablet/file_sender.cc b/src/tablet/file_sender.cc index 47f3e535833..9420d1cf88f 100644 --- a/src/tablet/file_sender.cc +++ b/src/tablet/file_sender.cc @@ -63,6 +63,7 @@ bool FileSender::Init() { } channel_ = new brpc::Channel(); brpc::ChannelOptions options; + options.auth = &client_authenticator_; options.timeout_ms = FLAGS_request_timeout_ms; options.connect_timeout_ms = FLAGS_request_timeout_ms; options.max_retry = FLAGS_request_max_retry; diff --git a/src/tablet/file_sender.h b/src/tablet/file_sender.h index fb5f4be5c56..221850fd08e 100644 --- a/src/tablet/file_sender.h +++ b/src/tablet/file_sender.h @@ -22,6 +22,7 @@ #include #include "proto/tablet.pb.h" +#include "auth/brpc_authenticator.h" namespace openmldb { namespace tablet { @@ -50,6 +51,7 @@ class FileSender { uint64_t limit_time_; brpc::Channel* channel_; ::openmldb::api::TabletServer_Stub* stub_; + openmldb::authn::BRPCAuthenticator client_authenticator_; }; } // namespace tablet diff --git a/src/tablet/sql_cluster_availability_test.cc b/src/tablet/procedure_drop_test.cc similarity index 67% rename from src/tablet/sql_cluster_availability_test.cc rename to src/tablet/procedure_drop_test.cc index aef00e4c851..de43a2e02dc 100644 --- a/src/tablet/sql_cluster_availability_test.cc +++ b/src/tablet/procedure_drop_test.cc @@ -43,11 +43,11 @@ using ::openmldb::nameserver::NameServerImpl; namespace openmldb { namespace tablet { -class SqlClusterTest : public ::testing::Test { +class ProcedureDropTest : public ::testing::Test { public: - SqlClusterTest() {} + ProcedureDropTest() {} - ~SqlClusterTest() {} + ~ProcedureDropTest() {} }; std::shared_ptr GetNewSQLRouter() { @@ -125,144 +125,26 @@ void ShowTable(::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub>& n ::openmldb::nameserver::ShowTableRequest request; ::openmldb::nameserver::ShowTableResponse response; request.set_db(db); - request.set_show_all(true); + request.set_show_all(false); bool ok = name_server_client.SendRequest(&::openmldb::nameserver::NameServer_Stub::ShowTable, &request, &response, FLAGS_request_timeout_ms, 1); ASSERT_TRUE(ok); ASSERT_EQ(response.table_info_size(), size); } -TEST_F(SqlClusterTest, RecoverProcedure) { +TEST_F(ProcedureDropTest, DropProcedureBeforeDropTable) { FLAGS_auto_failover = true; FLAGS_zk_cluster = "127.0.0.1:6181"; FLAGS_zk_root_path = "/rtidb4" + ::openmldb::test::GenRand(); - // ns1 - FLAGS_endpoint = "127.0.0.1:9631"; - brpc::Server ns_server; - StartNameServer(ns_server); - ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); - name_server_client.Init(); - // tablet1 - FLAGS_endpoint = "127.0.0.1:9831"; + FLAGS_endpoint = "127.0.0.1:9832"; ::openmldb::test::TempPath tmp_path; FLAGS_db_root_path = tmp_path.GetTempPath(); brpc::Server tb_server1; ::openmldb::tablet::TabletImpl* tablet1 = new ::openmldb::tablet::TabletImpl(); StartTablet(&tb_server1, tablet1); - { - // showtablet - ::openmldb::nameserver::ShowTabletRequest request; - ::openmldb::nameserver::ShowTabletResponse response; - bool ok = name_server_client.SendRequest(&::openmldb::nameserver::NameServer_Stub::ShowTablet, &request, - &response, FLAGS_request_timeout_ms, 1); - ASSERT_TRUE(ok); - ASSERT_EQ(response.tablets_size(), 1); - ::openmldb::nameserver::TabletStatus status = response.tablets(0); - ASSERT_EQ(FLAGS_endpoint, status.endpoint()); - ASSERT_EQ("kHealthy", status.state()); - } - - // create table - std::string ddl = - "create table trans(c1 string,\n" - " c3 int,\n" - " c4 bigint,\n" - " c5 float,\n" - " c6 double,\n" - " c7 timestamp,\n" - " c8 date,\n" - " index(key=c1, ts=c7));"; - auto router = GetNewSQLRouter(); - if (!router) { - FAIL() << "Fail new cluster sql router"; - } - std::string db = "test"; - hybridse::sdk::Status status; - ASSERT_TRUE(router->CreateDB(db, &status)); - router->ExecuteDDL(db, "drop table trans;", &status); - ASSERT_TRUE(router->RefreshCatalog()); - if (!router->ExecuteDDL(db, ddl, &status)) { - FAIL() << "fail to create table"; - } - ASSERT_TRUE(router->RefreshCatalog()); - // insert - std::string insert_sql = "insert into trans values(\"bb\",24,34,1.5,2.5,1590738994000,\"2020-05-05\");"; - ASSERT_TRUE(router->ExecuteInsert(db, insert_sql, &status)); - // create procedure - std::string sp_name = "sp"; - std::string sql = - "SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM trans WINDOW w1 AS" - " (PARTITION BY trans.c1 ORDER BY trans.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);"; - std::string sp_ddl = "create procedure " + sp_name + - " (const c1 string, const c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date" + - ")" + " begin " + sql + " end;"; - if (!router->ExecuteDDL(db, sp_ddl, &status)) { - FAIL() << "fail to create procedure"; - } - // call procedure - ASSERT_TRUE(router->RefreshCatalog()); - auto request_row = router->GetRequestRow(db, sql, &status); - ASSERT_TRUE(request_row); - request_row->Init(2); - ASSERT_TRUE(request_row->AppendString("bb")); - ASSERT_TRUE(request_row->AppendInt32(23)); - ASSERT_TRUE(request_row->AppendInt64(33)); - ASSERT_TRUE(request_row->AppendFloat(1.5f)); - ASSERT_TRUE(request_row->AppendDouble(2.5)); - ASSERT_TRUE(request_row->AppendTimestamp(1590738994000)); - ASSERT_TRUE(request_row->AppendDate(1234)); - ASSERT_TRUE(request_row->Build()); - auto rs = router->CallProcedure(db, sp_name, request_row, &status); - if (!rs) FAIL() << "call procedure failed"; - auto schema = rs->GetSchema(); - ASSERT_EQ(schema->GetColumnCnt(), 3); - ASSERT_TRUE(rs->Next()); - ASSERT_EQ(rs->GetStringUnsafe(0), "bb"); - ASSERT_EQ(rs->GetInt32Unsafe(1), 23); - ASSERT_EQ(rs->GetInt64Unsafe(2), 67); - ASSERT_FALSE(rs->Next()); - // stop - tb_server1.Stop(10); - delete tablet1; - sleep(3); - rs = router->CallProcedure(db, sp_name, request_row, &status); - ASSERT_FALSE(rs); - // restart - brpc::Server tb_server2; - ::openmldb::tablet::TabletImpl* tablet2 = new ::openmldb::tablet::TabletImpl(); - StartTablet(&tb_server2, tablet2); - sleep(3); - rs = router->CallProcedure(db, sp_name, request_row, &status); - if (!rs) FAIL() << "call procedure failed"; - schema = rs->GetSchema(); - ASSERT_EQ(schema->GetColumnCnt(), 3); - ASSERT_TRUE(rs->Next()); - ASSERT_EQ(rs->GetStringUnsafe(0), "bb"); - ASSERT_EQ(rs->GetInt32Unsafe(1), 23); - ASSERT_EQ(rs->GetInt64Unsafe(2), 67); - ASSERT_FALSE(rs->Next()); - - ShowTable(name_server_client, db, 1); - // drop table fail - DropTable(name_server_client, db, "trans", false); - // drop procedure sp - DropProcedure(name_server_client, db, sp_name); - // drop table success - DropTable(name_server_client, db, "trans", true); - ShowTable(name_server_client, db, 0); - - tb_server2.Stop(10); - delete tablet2; -} - -TEST_F(SqlClusterTest, DropProcedureBeforeDropTable) { - FLAGS_auto_failover = true; - FLAGS_zk_cluster = "127.0.0.1:6181"; - FLAGS_zk_root_path = "/rtidb4" + ::openmldb::test::GenRand(); - // ns1 FLAGS_endpoint = "127.0.0.1:9632"; brpc::Server ns_server; @@ -270,15 +152,8 @@ TEST_F(SqlClusterTest, DropProcedureBeforeDropTable) { ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); name_server_client.Init(); - // tablet1 - FLAGS_endpoint = "127.0.0.1:9832"; - ::openmldb::test::TempPath tmp_path; - FLAGS_db_root_path = tmp_path.GetTempPath(); - brpc::Server tb_server1; - ::openmldb::tablet::TabletImpl* tablet1 = new ::openmldb::tablet::TabletImpl(); - StartTablet(&tb_server1, tablet1); - { + FLAGS_endpoint = "127.0.0.1:9832"; // showtablet ::openmldb::nameserver::ShowTabletRequest request; ::openmldb::nameserver::ShowTabletResponse response; @@ -421,7 +296,7 @@ int main(int argc, char** argv) { srand(time(NULL)); ::openmldb::base::SetLogLevel(INFO); ::google::ParseCommandLineFlags(&argc, &argv, true); - ::openmldb::test::InitRandomDiskFlags("sql_cluster_availability_test"); + ::openmldb::test::InitRandomDiskFlags("procedure_recover_test"); FLAGS_system_table_replica_num = 0; return RUN_ALL_TESTS(); } diff --git a/src/tablet/procedure_recover_test.cc b/src/tablet/procedure_recover_test.cc new file mode 100644 index 00000000000..eef6d87669b --- /dev/null +++ b/src/tablet/procedure_recover_test.cc @@ -0,0 +1,275 @@ +/* + * Copyright 2021 4Paradigm + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "client/ns_client.h" +#include "common/timer.h" +#include "gtest/gtest.h" +#include "nameserver/name_server_impl.h" +#include "proto/name_server.pb.h" +#include "proto/tablet.pb.h" +#include "rpc/rpc_client.h" +#include "sdk/sql_router.h" +#include "tablet/tablet_impl.h" +#include "test/util.h" + +DECLARE_string(endpoint); +DECLARE_string(db_root_path); +DECLARE_string(zk_cluster); +DECLARE_string(zk_root_path); +DECLARE_int32(zk_session_timeout); +DECLARE_int32(request_timeout_ms); +DECLARE_bool(auto_failover); +DECLARE_uint32(system_table_replica_num); + +using ::openmldb::nameserver::NameServerImpl; + +namespace openmldb { +namespace tablet { + +class ProcedureRecoverTest : public ::testing::Test { + public: + ProcedureRecoverTest() {} + + ~ProcedureRecoverTest() {} +}; + +std::shared_ptr GetNewSQLRouter() { + ::hybridse::vm::Engine::InitializeGlobalLLVM(); + openmldb::sdk::SQLRouterOptions sql_opt; + sql_opt.zk_cluster = FLAGS_zk_cluster; + sql_opt.zk_path = FLAGS_zk_root_path; + sql_opt.enable_debug = true; + return openmldb::sdk::NewClusterSQLRouter(sql_opt); +} + +void StartNameServer(brpc::Server& server) { // NOLINT + NameServerImpl* nameserver = new NameServerImpl(); + bool ok = nameserver->Init(""); + ASSERT_TRUE(ok); + brpc::ServerOptions options; + if (server.AddService(nameserver, brpc::SERVER_OWNS_SERVICE) != 0) { + PDLOG(WARNING, "Fail to add service"); + exit(1); + } + if (server.Start(FLAGS_endpoint.c_str(), &options) != 0) { + PDLOG(WARNING, "Fail to start server"); + exit(1); + } + sleep(2); +} + +void StartTablet(brpc::Server* server, ::openmldb::tablet::TabletImpl* tablet) { // NOLINT + bool ok = tablet->Init(""); + ASSERT_TRUE(ok); + brpc::ServerOptions options; + if (server->AddService(tablet, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + PDLOG(WARNING, "Fail to add service"); + exit(1); + } + if (server->Start(FLAGS_endpoint.c_str(), &options) != 0) { + PDLOG(WARNING, "Fail to start server"); + exit(1); + } + ASSERT_TRUE(tablet->RegisterZK()); + sleep(2); +} + +void DropTable(::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub>& name_server_client, // NOLINT + const std::string& db, const std::string& table_name, bool success) { + ::openmldb::nameserver::DropTableRequest request; + ::openmldb::nameserver::GeneralResponse response; + request.set_db(db); + request.set_name(table_name); + bool ok = name_server_client.SendRequest(&::openmldb::nameserver::NameServer_Stub::DropTable, &request, &response, + FLAGS_request_timeout_ms, 1); + ASSERT_TRUE(ok); + if (success) { + ASSERT_EQ(response.code(), 0); + } else { + ASSERT_NE(response.code(), 0); + } +} + +void DropProcedure(::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub>& name_server_client, // NOLINT + const std::string& db, const std::string& sp_name) { + api::DropProcedureRequest request; + nameserver::GeneralResponse response; + request.set_db_name(db); + request.set_sp_name(sp_name); + bool ok = name_server_client.SendRequest(&::openmldb::nameserver::NameServer_Stub::DropProcedure, &request, + &response, FLAGS_request_timeout_ms, 1); + ASSERT_TRUE(ok); + ASSERT_EQ(response.code(), 0); +} + +void ShowTable(::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub>& name_server_client, // NOLINT + + const std::string& db, int32_t size) { + ::openmldb::nameserver::ShowTableRequest request; + ::openmldb::nameserver::ShowTableResponse response; + request.set_db(db); + request.set_show_all(false); + bool ok = name_server_client.SendRequest(&::openmldb::nameserver::NameServer_Stub::ShowTable, &request, &response, + FLAGS_request_timeout_ms, 1); + ASSERT_TRUE(ok); + ASSERT_EQ(response.table_info_size(), size); +} + +TEST_F(ProcedureRecoverTest, RecoverProcedure) { + FLAGS_auto_failover = true; + FLAGS_zk_cluster = "127.0.0.1:6181"; + FLAGS_zk_root_path = "/rtidb4" + ::openmldb::test::GenRand(); + + // tablet1 + FLAGS_endpoint = "127.0.0.1:9831"; + ::openmldb::test::TempPath tmp_path; + FLAGS_db_root_path = tmp_path.GetTempPath(); + brpc::Server tb_server1; + ::openmldb::tablet::TabletImpl* tablet1 = new ::openmldb::tablet::TabletImpl(); + StartTablet(&tb_server1, tablet1); + + // ns1 + FLAGS_endpoint = "127.0.0.1:9631"; + brpc::Server ns_server; + StartNameServer(ns_server); + ::openmldb::RpcClient<::openmldb::nameserver::NameServer_Stub> name_server_client(FLAGS_endpoint, ""); + name_server_client.Init(); + + FLAGS_endpoint = "127.0.0.1:9831"; + + { + // showtablet + ::openmldb::nameserver::ShowTabletRequest request; + ::openmldb::nameserver::ShowTabletResponse response; + bool ok = name_server_client.SendRequest(&::openmldb::nameserver::NameServer_Stub::ShowTablet, &request, + &response, FLAGS_request_timeout_ms, 1); + ASSERT_TRUE(ok); + ASSERT_EQ(response.tablets_size(), 1); + ::openmldb::nameserver::TabletStatus status = response.tablets(0); + ASSERT_EQ(FLAGS_endpoint, status.endpoint()); + ASSERT_EQ("kHealthy", status.state()); + } + + // create table + std::string ddl = + "create table trans(c1 string,\n" + " c3 int,\n" + " c4 bigint,\n" + " c5 float,\n" + " c6 double,\n" + " c7 timestamp,\n" + " c8 date,\n" + " index(key=c1, ts=c7));"; + auto router = GetNewSQLRouter(); + if (!router) { + FAIL() << "Fail new cluster sql router"; + } + std::string db = "test"; + hybridse::sdk::Status status; + ASSERT_TRUE(router->CreateDB(db, &status)); + router->ExecuteDDL(db, "drop table trans;", &status); + ASSERT_TRUE(router->RefreshCatalog()); + if (!router->ExecuteDDL(db, ddl, &status)) { + FAIL() << "fail to create table"; + } + ASSERT_TRUE(router->RefreshCatalog()); + // insert + std::string insert_sql = "insert into trans values(\"bb\",24,34,1.5,2.5,1590738994000,\"2020-05-05\");"; + ASSERT_TRUE(router->ExecuteInsert(db, insert_sql, &status)); + // create procedure + std::string sp_name = "sp"; + std::string sql = + "SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM trans WINDOW w1 AS" + " (PARTITION BY trans.c1 ORDER BY trans.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);"; + std::string sp_ddl = "create procedure " + sp_name + + " (const c1 string, const c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date" + + ")" + " begin " + sql + " end;"; + if (!router->ExecuteDDL(db, sp_ddl, &status)) { + FAIL() << "fail to create procedure"; + } + // call procedure + ASSERT_TRUE(router->RefreshCatalog()); + auto request_row = router->GetRequestRow(db, sql, &status); + ASSERT_TRUE(request_row); + request_row->Init(2); + ASSERT_TRUE(request_row->AppendString("bb")); + ASSERT_TRUE(request_row->AppendInt32(23)); + ASSERT_TRUE(request_row->AppendInt64(33)); + ASSERT_TRUE(request_row->AppendFloat(1.5f)); + ASSERT_TRUE(request_row->AppendDouble(2.5)); + ASSERT_TRUE(request_row->AppendTimestamp(1590738994000)); + ASSERT_TRUE(request_row->AppendDate(1234)); + ASSERT_TRUE(request_row->Build()); + auto rs = router->CallProcedure(db, sp_name, request_row, &status); + if (!rs) FAIL() << "call procedure failed"; + auto schema = rs->GetSchema(); + ASSERT_EQ(schema->GetColumnCnt(), 3); + ASSERT_TRUE(rs->Next()); + ASSERT_EQ(rs->GetStringUnsafe(0), "bb"); + ASSERT_EQ(rs->GetInt32Unsafe(1), 23); + ASSERT_EQ(rs->GetInt64Unsafe(2), 67); + ASSERT_FALSE(rs->Next()); + // stop + tb_server1.Stop(10); + delete tablet1; + sleep(3); + rs = router->CallProcedure(db, sp_name, request_row, &status); + ASSERT_FALSE(rs); + // restart + brpc::Server tb_server2; + ::openmldb::tablet::TabletImpl* tablet2 = new ::openmldb::tablet::TabletImpl(); + StartTablet(&tb_server2, tablet2); + sleep(3); + rs = router->CallProcedure(db, sp_name, request_row, &status); + if (!rs) FAIL() << "call procedure failed"; + schema = rs->GetSchema(); + ASSERT_EQ(schema->GetColumnCnt(), 3); + ASSERT_TRUE(rs->Next()); + ASSERT_EQ(rs->GetStringUnsafe(0), "bb"); + ASSERT_EQ(rs->GetInt32Unsafe(1), 23); + ASSERT_EQ(rs->GetInt64Unsafe(2), 67); + ASSERT_FALSE(rs->Next()); + + ShowTable(name_server_client, db, 1); + // drop table fail + DropTable(name_server_client, db, "trans", false); + // drop procedure sp + DropProcedure(name_server_client, db, sp_name); + // drop table success + DropTable(name_server_client, db, "trans", true); + ShowTable(name_server_client, db, 0); + + tb_server2.Stop(10); + delete tablet2; +} + +} // namespace tablet +} // namespace openmldb + +int main(int argc, char** argv) { + FLAGS_zk_session_timeout = 2000; + ::testing::InitGoogleTest(&argc, argv); + srand(time(NULL)); + ::openmldb::base::SetLogLevel(INFO); + ::google::ParseCommandLineFlags(&argc, &argv, true); + ::openmldb::test::InitRandomDiskFlags("recover_procedure_test"); + FLAGS_system_table_replica_num = 0; + return RUN_ALL_TESTS(); +}