Skip to content

Commit

Permalink
domain: change groupSize in splitForConcurrentFetch (#55518)
Browse files Browse the repository at this point in the history
ref #50959
  • Loading branch information
joechenrh committed Aug 20, 2024
1 parent 6d95459 commit 85235b3
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 11 deletions.
1 change: 1 addition & 0 deletions pkg/domain/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ go_library(
"//pkg/util/globalconn",
"//pkg/util/intest",
"//pkg/util/logutil",
"//pkg/util/mathutil",
"//pkg/util/memory",
"//pkg/util/memoryusagealarm",
"//pkg/util/printer",
Expand Down
25 changes: 14 additions & 11 deletions pkg/domain/domain.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ import (
"github.com/pingcap/tidb/pkg/util/globalconn"
"github.com/pingcap/tidb/pkg/util/intest"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/mathutil"
"github.com/pingcap/tidb/pkg/util/memory"
"github.com/pingcap/tidb/pkg/util/memoryusagealarm"
"github.com/pingcap/tidb/pkg/util/replayer"
Expand Down Expand Up @@ -462,20 +463,22 @@ func (do *Domain) fetchAllSchemasWithTables(m *meta.Meta) ([]*model.DBInfo, erro
const fetchSchemaConcurrency = 1

func (*Domain) splitForConcurrentFetch(schemas []*model.DBInfo) [][]*model.DBInfo {
groupSize := (len(schemas) + fetchSchemaConcurrency - 1) / fetchSchemaConcurrency
if variable.SchemaCacheSize.Load() > 0 && len(schemas) > 1000 {
groupCnt := fetchSchemaConcurrency
schemaCnt := len(schemas)
if variable.SchemaCacheSize.Load() > 0 && schemaCnt > 1000 {
// TODO: Temporary solution to speed up when too many databases, will refactor it later.
groupSize = 8
groupCnt = 8
}
splitted := make([][]*model.DBInfo, 0, fetchSchemaConcurrency)
schemaCnt := len(schemas)
for i := 0; i < schemaCnt; i += groupSize {
end := i + groupSize
if end > schemaCnt {
end = schemaCnt
}
splitted = append(splitted, schemas[i:end])

splitted := make([][]*model.DBInfo, 0, groupCnt)
groupSizes := mathutil.Divide2Batches(schemaCnt, groupCnt)

start := 0
for _, groupSize := range groupSizes {
splitted = append(splitted, schemas[start:start+groupSize])
start += groupSize
}

return splitted
}

Expand Down

0 comments on commit 85235b3

Please sign in to comment.