Skip to content

Commit

Permalink
restore: use token bucket to balance download SSTs
Browse files Browse the repository at this point in the history
  • Loading branch information
3pointer committed Jan 4, 2024
1 parent 354c186 commit d7d32b7
Show file tree
Hide file tree
Showing 4 changed files with 200 additions and 178 deletions.
44 changes: 30 additions & 14 deletions br/pkg/restore/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,9 @@ type Client struct {
tlsConf *tls.Config
keepaliveConf keepalive.ClientParameters

databases map[string]*utils.Database
ddlJobs []*model.Job
concurrencyPerStore uint
databases map[string]*utils.Database
ddlJobs []*model.Job

// store tables need to rebase info like auto id and random id and so on after create table
rebasedTablesMap map[UniqueTableName]bool
Expand Down Expand Up @@ -532,23 +533,22 @@ func (rc *Client) SetStorage(ctx context.Context, backend *backuppb.StorageBacke

func (rc *Client) InitClients(ctx context.Context, backend *backuppb.StorageBackend, isRawKvMode bool, isTxnKvMode bool) {
storeWorkerPoolMap := make(map[uint64]chan struct{})
storeStatisticMap := make(map[uint64]*int64)
stores, err := conn.GetAllTiKVStoresWithRetry(ctx, rc.pdClient, util.SkipTiFlash)
if err != nil {
log.Fatal("failed to get stores", zap.Error(err))
}
concurrencyPerStore := 512
concurrencyPerStore := rc.GetConcurrencyPerStore()
for _, store := range stores {
ch := make(chan struct{}, concurrencyPerStore)
for i := 0; i < concurrencyPerStore; i += 1 {
for i := 0; i < int(concurrencyPerStore); i += 1 {
ch <- struct{}{}
}
storeWorkerPoolMap[store.Id] = ch
storeStatisticMap[store.Id] = new(int64)
}

metaClient := split.NewSplitClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, isRawKvMode)
importCli := NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf)
rc.fileImporter = NewFileImporter(metaClient, importCli, backend, isRawKvMode, isTxnKvMode, storeWorkerPoolMap, storeStatisticMap, rc.rewriteMode)
rc.fileImporter = NewFileImporter(metaClient, importCli, backend, isRawKvMode, isTxnKvMode, storeWorkerPoolMap, rc.rewriteMode)
}

func (rc *Client) SetRawKVClient(c *RawKVBatchClient) {
Expand Down Expand Up @@ -656,8 +656,24 @@ func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string)

// SetConcurrency sets the concurrency of dbs tables files.
func (rc *Client) SetConcurrency(c uint) {
log.Info("new worker pool", zap.Uint("currency-count", c))
rc.workerPool = utils.NewWorkerPool(c, "file")
if rc.storeCount <= 0 {
log.Fatal("uninitical store count")
}
totalCount := c * uint(rc.storeCount)
log.Info("new worker pool", zap.Uint("currency-per-store", c), zap.Uint("total", totalCount))
rc.workerPool = utils.NewWorkerPool(totalCount, "file")
rc.concurrencyPerStore = c
}

func (rc *Client) GetConcurrency() uint {
if rc.storeCount <= 0 {
log.Fatal("uninitialize store count", zap.Int("storeCount", rc.storeCount))
}
return rc.concurrencyPerStore * uint(rc.storeCount)
}

func (rc *Client) GetConcurrencyPerStore() uint {
return rc.concurrencyPerStore
}

// EnableOnline sets the mode of restore to online.
Expand Down Expand Up @@ -1256,7 +1272,7 @@ func MockCallSetSpeedLimit(ctx context.Context, fakeImportClient ImporterClient,
rc.SetRateLimit(42)
rc.SetConcurrency(concurrency)
rc.hasSpeedLimited = false
rc.fileImporter = NewFileImporter(nil, fakeImportClient, nil, false, false, nil, nil, rc.rewriteMode)
rc.fileImporter = NewFileImporter(nil, fakeImportClient, nil, false, false, nil, rc.rewriteMode)
return rc.setSpeedLimit(ctx, rc.rateLimit)
}

Expand All @@ -1283,7 +1299,7 @@ func (rc *Client) setSpeedLimit(ctx context.Context, rateLimit uint64) error {
}

finalStore := store
rc.workerPool.ApplyOnErrorGroup(eg,
eg.Go(
func() error {
err := rc.fileImporter.setDownloadSpeedLimit(ectx, finalStore.GetId(), rateLimit)
if err != nil {
Expand Down Expand Up @@ -1459,7 +1475,7 @@ LOOPFORTABLE:
// breaking here directly is also a reasonable behavior.
break LOOPFORTABLE
}
rc.workerPool.ApplyOnErrorGroup(eg, func() error {
eg.Go(func() error {
filesGroups := getGroupFiles(filesReplica, rc.fileImporter.supportMultiIngest)
for _, filesGroup := range filesGroups {
if importErr := func(fs []*backuppb.File) error {
Expand Down Expand Up @@ -1510,7 +1526,7 @@ func (rc *Client) WaitForFilesRestored(ctx context.Context, files []*backuppb.Fi

for _, file := range files {
fileReplica := file
rc.workerPool.ApplyOnErrorGroup(eg,
eg.Go(
func() error {
defer updateCh.Inc()
return rc.fileImporter.ImportSSTFiles(ectx, []*backuppb.File{fileReplica}, EmptyRewriteRule(), rc.cipher, rc.backupMeta.ApiVersion)
Expand Down Expand Up @@ -1605,7 +1621,7 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo
}

finalStore := store
rc.workerPool.ApplyOnErrorGroup(eg,
eg.Go(
func() error {
opt := grpc.WithTransportCredentials(insecure.NewCredentials())
if rc.tlsConf != nil {
Expand Down
Loading

0 comments on commit d7d32b7

Please sign in to comment.