Skip to content

Commit

Permalink
Feature: add fiber_connpool_shards config
Browse files Browse the repository at this point in the history
  • Loading branch information
kiviyu committed Oct 13, 2023
1 parent 536cb9e commit e7fb546
Show file tree
Hide file tree
Showing 9 changed files with 38 additions and 10 deletions.
1 change: 1 addition & 0 deletions docs/en/framework_config_full.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ client:
disable_servicerouter: false #Whether to disable service rule-route
support_pipeline: false #Whether support connection pipeline.Connection pipeline means that you can multi-send and multi-recv in ordered on one connection
fiber_pipeline_connector_queue_size: #The queue size of FiberPipelineConnector
fiber_connpool_shards: 1 #The number of shard groups for the idle queue under the Fiber connection pool. A larger value will result in a higher allocation of connections, leading to better parallelism and improved performance. However, it will also result in more connections being created. If you are sensitive to the number of created connections, you may consider reducing this value, such as setting it to 1
connect_timeout: 0 #The timeout(ms) of check connection establishment
filter: #only effective for the current service.
- xxx
Expand Down
1 change: 1 addition & 0 deletions docs/zh/framework_config_full.md
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ client:
disable_servicerouter: false #是否禁用服务规则路由,默认不禁用
support_pipeline: false #是否启用pipeline,默认关闭,当前仅针对redis协议有效。调用redis-server时建议开启,可以获得更好的性能。
fiber_pipeline_connector_queue_size: #FiberPipelineConnector队列大小,如果内存占用加大可以减小此配置
fiber_connpool_shards: 1 #Fiber链接池下空闲队列分片组个数,值越大分配的链接会偏多,带来更好的并行度会提升性能,但是会带来更多的链接;如果对创建连接数较为敏感可以考虑调小此值,如为1
connect_timeout: 0 #是否开启connect连接超时检测,默认不开启(为0表示不启用)。当前仅支持IO/Handle分离及合并模式
filter: #service级别的filter列表,只针对当前service生效
- xxx #具体的filter名称
Expand Down
1 change: 1 addition & 0 deletions trpc/client/service_proxy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,7 @@ TransInfo ServiceProxy::ProxyOptionToTransInfo() {
trans_info.support_pipeline = option_->support_pipeline;
trans_info.fiber_pipeline_connector_queue_size = option_->fiber_pipeline_connector_queue_size;
trans_info.protocol = option_->codec_name;
trans_info.fiber_connpool_shards = option_->fiber_connpool_shards;

// set the callback function
trans_info.conn_close_function = option_->proxy_callback.conn_close_function;
Expand Down
1 change: 1 addition & 0 deletions trpc/client/service_proxy_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ void ServiceProxyManager::SetOptionFromConfig(const ServiceProxyConfig& proxy_co
option->ssl_config = proxy_conf.ssl_config;
option->support_pipeline = proxy_conf.support_pipeline;
option->fiber_pipeline_connector_queue_size = proxy_conf.fiber_pipeline_connector_queue_size;
option->fiber_connpool_shards = proxy_conf.fiber_connpool_shards;

option->service_filter_configs = proxy_conf.service_filter_configs;

Expand Down
7 changes: 7 additions & 0 deletions trpc/client/service_proxy_option.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,13 @@ struct ServiceProxyOption {
/// The size of the sliding window for flow control, in bytes. The default value is 65535, and a value of 0 means that
/// flow control is disabled. Currently, flow control is effective for tRPC streaming.
uint32_t stream_max_window_size{kDefaultStreamMaxWindowSize};

/// The number of FiberConnectionPool shard groups for the idle queue.
/// A larger value of this parameter will result in a higher allocation of connections, leading to better parallelism
/// and improved performance. However, it will also result in more connections being created
/// If you are sensitive to the number of created connections, you may consider reducing this value, such as setting
/// it to 1
uint32_t fiber_connpool_shards = 4;
};

} // namespace trpc
7 changes: 7 additions & 0 deletions trpc/common/config/client_conf.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,13 @@ struct ServiceProxyConfig {
/// Redis auth config
RedisClientConf redis_conf;

/// The number of FiberConnectionPool shard groups for the idle queue.
/// A larger value of this parameter will result in a higher allocation of connections, leading to better parallelism
/// and improved performance. However, it will also result in more connections being created
/// If you are sensitive to the number of created connections, you may consider reducing this value, such as setting
/// it to 1
uint32_t fiber_connpool_shards = 4;

void Display() const;
};

Expand Down
6 changes: 6 additions & 0 deletions trpc/common/config/client_conf_parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ struct convert<trpc::ServiceProxyConfig> {
node["redis"] = proxy_config.redis_conf;
}

node["fiber_connpool_shards"] = proxy_config.fiber_connpool_shards;

return node;
}

Expand Down Expand Up @@ -130,6 +132,10 @@ struct convert<trpc::ServiceProxyConfig> {
proxy_config.redis_conf.enable = true;
}

if (node["fiber_connpool_shards"]) {
proxy_config.fiber_connpool_shards = node["fiber_connpool_shards"].as<uint32_t>();
}

return true;
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,16 @@

namespace trpc {

constexpr static int kConnPoolShards = 4;

FiberTcpConnPoolConnectorGroup::FiberTcpConnPoolConnectorGroup(const FiberConnectorGroup::Options& options)
: options_(options) {
conn_shards_ = std::make_unique<Shard[]>(kConnPoolShards);
max_conn_per_shard_ = options_.trans_info->max_conn_num / kConnPoolShards;
max_conn_per_shard_ = std::ceil(options_.trans_info->max_conn_num / kConnPoolShards);
conn_shards_ = std::make_unique<Shard[]>(options_.trans_info->fiber_connpool_shards);
max_conn_per_shard_ = std::ceil(options_.trans_info->max_conn_num / options_.trans_info->fiber_connpool_shards);
}

FiberTcpConnPoolConnectorGroup::~FiberTcpConnPoolConnectorGroup() {}

void FiberTcpConnPoolConnectorGroup::Stop() {
for (int i = 0; i != kConnPoolShards; ++i) {
for (uint32_t i = 0; i != options_.trans_info->fiber_connpool_shards; ++i) {
auto&& shard = conn_shards_[i];

std::list<RefPtr<FiberTcpConnPoolConnector>> tcp_conns;
Expand All @@ -51,7 +48,7 @@ void FiberTcpConnPoolConnectorGroup::Stop() {
}

void FiberTcpConnPoolConnectorGroup::Destroy() {
for (int i = 0; i != kConnPoolShards; ++i) {
for (uint32_t i = 0; i != options_.trans_info->fiber_connpool_shards; ++i) {
auto&& shard = conn_shards_[i];

std::list<RefPtr<FiberTcpConnPoolConnector>> tcp_conns;
Expand Down Expand Up @@ -184,7 +181,7 @@ stream::StreamReaderWriterProviderPtr FiberTcpConnPoolConnectorGroup::CreateStre

if (reason == 0 && connector->IsHealthy()) {
uint32_t shard_id = (connector->GetConnId() >> 32);
auto& shard = conn_shards_[shard_id % kConnPoolShards];
auto& shard = conn_shards_[shard_id % options_.trans_info->fiber_connpool_shards];

std::scoped_lock _(shard.lock);
if ((shard.tcp_conns.size() <= max_conn_per_shard_) &&
Expand Down Expand Up @@ -221,7 +218,7 @@ RefPtr<FiberTcpConnPoolConnector> FiberTcpConnPoolConnectorGroup::GetOrCreate()
int retry_num = 3;

while (retry_num > 0) {
auto& shard = conn_shards_[shard_id % kConnPoolShards];
auto& shard = conn_shards_[shard_id % options_.trans_info->fiber_connpool_shards];

{
std::scoped_lock _(shard.lock);
Expand Down Expand Up @@ -268,7 +265,7 @@ void FiberTcpConnPoolConnectorGroup::Reclaim(int ret, RefPtr<FiberTcpConnPoolCon

if (ret == 0) {
uint32_t shard_id = (connector->GetConnId() >> 32);
auto& shard = conn_shards_[shard_id % kConnPoolShards];
auto& shard = conn_shards_[shard_id % options_.trans_info->fiber_connpool_shards];

std::scoped_lock _(shard.lock);
if ((shard.tcp_conns.size() <= max_conn_per_shard_) &&
Expand Down
7 changes: 7 additions & 0 deletions trpc/transport/client/trans_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,13 @@ struct TransInfo {

/// User-defined transInfo data,such as RedisClientConf
std::any user_data;

/// The number of FiberConnectionPool shard groups for the idle queue.
/// A larger value of this parameter will result in a higher allocation of connections, leading to better parallelism
/// and improved performance. However, it will also result in more connections being created
/// If you are sensitive to the number of created connections, you may consider reducing this value, such as setting
/// it to 1
uint32_t fiber_connpool_shards = 4;
};

} // namespace trpc

0 comments on commit e7fb546

Please sign in to comment.