diff --git a/docs/en/framework_config_full.md b/docs/en/framework_config_full.md index 7dfa9be5..d77cd8d7 100644 --- a/docs/en/framework_config_full.md +++ b/docs/en/framework_config_full.md @@ -145,6 +145,7 @@ client: disable_servicerouter: false #Whether to disable service rule-route support_pipeline: false #Whether support connection pipeline.Connection pipeline means that you can multi-send and multi-recv in ordered on one connection fiber_pipeline_connector_queue_size: #The queue size of FiberPipelineConnector + fiber_connpool_shards: 1 #The number of shard groups for the idle queue under the Fiber connection pool. A larger value will result in a higher allocation of connections, leading to better parallelism and improved performance. However, it will also result in more connections being created. If you are sensitive to the number of created connections, you may consider reducing this value, such as setting it to 1 connect_timeout: 0 #The timeout(ms) of check connection establishment filter: #only effective for the current service. - xxx diff --git a/docs/zh/framework_config_full.md b/docs/zh/framework_config_full.md index aaaf522b..c1989ffa 100644 --- a/docs/zh/framework_config_full.md +++ b/docs/zh/framework_config_full.md @@ -149,6 +149,7 @@ client: disable_servicerouter: false #是否禁用服务规则路由,默认不禁用 support_pipeline: false #是否启用pipeline,默认关闭,当前仅针对redis协议有效。调用redis-server时建议开启,可以获得更好的性能。 fiber_pipeline_connector_queue_size: #FiberPipelineConnector队列大小,如果内存占用加大可以减小此配置 + fiber_connpool_shards: 1 #Fiber链接池下空闲队列分片组个数,值越大分配的链接会偏多,带来更好的并行度会提升性能,但是会带来更多的链接;如果对创建连接数较为敏感可以考虑调小此值,如为1 connect_timeout: 0 #是否开启connect连接超时检测,默认不开启(为0表示不启用)。当前仅支持IO/Handle分离及合并模式 filter: #service级别的filter列表,只针对当前service生效 - xxx #具体的filter名称 diff --git a/trpc/client/service_proxy.cc b/trpc/client/service_proxy.cc index ce2281f1..65be8118 100644 --- a/trpc/client/service_proxy.cc +++ b/trpc/client/service_proxy.cc @@ -470,6 +470,7 @@ TransInfo ServiceProxy::ProxyOptionToTransInfo() { trans_info.support_pipeline = option_->support_pipeline; trans_info.fiber_pipeline_connector_queue_size = option_->fiber_pipeline_connector_queue_size; trans_info.protocol = option_->codec_name; + trans_info.fiber_connpool_shards = option_->fiber_connpool_shards; // set the callback function trans_info.conn_close_function = option_->proxy_callback.conn_close_function; diff --git a/trpc/client/service_proxy_manager.cc b/trpc/client/service_proxy_manager.cc index 6a51e460..b2f62e8a 100644 --- a/trpc/client/service_proxy_manager.cc +++ b/trpc/client/service_proxy_manager.cc @@ -51,6 +51,7 @@ void ServiceProxyManager::SetOptionFromConfig(const ServiceProxyConfig& proxy_co option->ssl_config = proxy_conf.ssl_config; option->support_pipeline = proxy_conf.support_pipeline; option->fiber_pipeline_connector_queue_size = proxy_conf.fiber_pipeline_connector_queue_size; + option->fiber_connpool_shards = proxy_conf.fiber_connpool_shards; option->service_filter_configs = proxy_conf.service_filter_configs; diff --git a/trpc/client/service_proxy_option.h b/trpc/client/service_proxy_option.h index ce472a3f..f08feb58 100644 --- a/trpc/client/service_proxy_option.h +++ b/trpc/client/service_proxy_option.h @@ -197,6 +197,13 @@ struct ServiceProxyOption { /// The size of the sliding window for flow control, in bytes. The default value is 65535, and a value of 0 means that /// flow control is disabled. Currently, flow control is effective for tRPC streaming. uint32_t stream_max_window_size{kDefaultStreamMaxWindowSize}; + + /// The number of FiberConnectionPool shard groups for the idle queue. + /// A larger value of this parameter will result in a higher allocation of connections, leading to better parallelism + /// and improved performance. However, it will also result in more connections being created + /// If you are sensitive to the number of created connections, you may consider reducing this value, such as setting + /// it to 1 + uint32_t fiber_connpool_shards = 4; }; } // namespace trpc diff --git a/trpc/common/config/client_conf.h b/trpc/common/config/client_conf.h index 45483a52..ebf751b6 100644 --- a/trpc/common/config/client_conf.h +++ b/trpc/common/config/client_conf.h @@ -159,6 +159,13 @@ struct ServiceProxyConfig { /// Redis auth config RedisClientConf redis_conf; + /// The number of FiberConnectionPool shard groups for the idle queue. + /// A larger value of this parameter will result in a higher allocation of connections, leading to better parallelism + /// and improved performance. However, it will also result in more connections being created + /// If you are sensitive to the number of created connections, you may consider reducing this value, such as setting + /// it to 1 + uint32_t fiber_connpool_shards = 4; + void Display() const; }; diff --git a/trpc/common/config/client_conf_parser.h b/trpc/common/config/client_conf_parser.h index ac83de02..e1f2bbc8 100644 --- a/trpc/common/config/client_conf_parser.h +++ b/trpc/common/config/client_conf_parser.h @@ -69,6 +69,8 @@ struct convert { node["redis"] = proxy_config.redis_conf; } + node["fiber_connpool_shards"] = proxy_config.fiber_connpool_shards; + return node; } @@ -130,6 +132,10 @@ struct convert { proxy_config.redis_conf.enable = true; } + if (node["fiber_connpool_shards"]) { + proxy_config.fiber_connpool_shards = node["fiber_connpool_shards"].as(); + } + return true; } }; diff --git a/trpc/transport/client/fiber/conn_pool/fiber_tcp_conn_pool_connector_group.cc b/trpc/transport/client/fiber/conn_pool/fiber_tcp_conn_pool_connector_group.cc index 0e497bcd..592a7bb0 100644 --- a/trpc/transport/client/fiber/conn_pool/fiber_tcp_conn_pool_connector_group.cc +++ b/trpc/transport/client/fiber/conn_pool/fiber_tcp_conn_pool_connector_group.cc @@ -22,19 +22,16 @@ namespace trpc { -constexpr static int kConnPoolShards = 4; - FiberTcpConnPoolConnectorGroup::FiberTcpConnPoolConnectorGroup(const FiberConnectorGroup::Options& options) : options_(options) { - conn_shards_ = std::make_unique(kConnPoolShards); - max_conn_per_shard_ = options_.trans_info->max_conn_num / kConnPoolShards; - max_conn_per_shard_ = std::ceil(options_.trans_info->max_conn_num / kConnPoolShards); + conn_shards_ = std::make_unique(options_.trans_info->fiber_connpool_shards); + max_conn_per_shard_ = std::ceil(options_.trans_info->max_conn_num / options_.trans_info->fiber_connpool_shards); } FiberTcpConnPoolConnectorGroup::~FiberTcpConnPoolConnectorGroup() {} void FiberTcpConnPoolConnectorGroup::Stop() { - for (int i = 0; i != kConnPoolShards; ++i) { + for (uint32_t i = 0; i != options_.trans_info->fiber_connpool_shards; ++i) { auto&& shard = conn_shards_[i]; std::list> tcp_conns; @@ -51,7 +48,7 @@ void FiberTcpConnPoolConnectorGroup::Stop() { } void FiberTcpConnPoolConnectorGroup::Destroy() { - for (int i = 0; i != kConnPoolShards; ++i) { + for (uint32_t i = 0; i != options_.trans_info->fiber_connpool_shards; ++i) { auto&& shard = conn_shards_[i]; std::list> tcp_conns; @@ -184,7 +181,7 @@ stream::StreamReaderWriterProviderPtr FiberTcpConnPoolConnectorGroup::CreateStre if (reason == 0 && connector->IsHealthy()) { uint32_t shard_id = (connector->GetConnId() >> 32); - auto& shard = conn_shards_[shard_id % kConnPoolShards]; + auto& shard = conn_shards_[shard_id % options_.trans_info->fiber_connpool_shards]; std::scoped_lock _(shard.lock); if ((shard.tcp_conns.size() <= max_conn_per_shard_) && @@ -221,7 +218,7 @@ RefPtr FiberTcpConnPoolConnectorGroup::GetOrCreate() int retry_num = 3; while (retry_num > 0) { - auto& shard = conn_shards_[shard_id % kConnPoolShards]; + auto& shard = conn_shards_[shard_id % options_.trans_info->fiber_connpool_shards]; { std::scoped_lock _(shard.lock); @@ -268,7 +265,7 @@ void FiberTcpConnPoolConnectorGroup::Reclaim(int ret, RefPtrGetConnId() >> 32); - auto& shard = conn_shards_[shard_id % kConnPoolShards]; + auto& shard = conn_shards_[shard_id % options_.trans_info->fiber_connpool_shards]; std::scoped_lock _(shard.lock); if ((shard.tcp_conns.size() <= max_conn_per_shard_) && diff --git a/trpc/transport/client/trans_info.h b/trpc/transport/client/trans_info.h index c39ed786..6f069b06 100644 --- a/trpc/transport/client/trans_info.h +++ b/trpc/transport/client/trans_info.h @@ -136,6 +136,13 @@ struct TransInfo { /// User-defined transInfo data,such as RedisClientConf std::any user_data; + + /// The number of FiberConnectionPool shard groups for the idle queue. + /// A larger value of this parameter will result in a higher allocation of connections, leading to better parallelism + /// and improved performance. However, it will also result in more connections being created + /// If you are sensitive to the number of created connections, you may consider reducing this value, such as setting + /// it to 1 + uint32_t fiber_connpool_shards = 4; }; } // namespace trpc