Skip to content

Commit

Permalink
[to tikv#67] remove unused code related to restore
Browse files Browse the repository at this point in the history
Signed-off-by: Jian Zhang <[email protected]>
  • Loading branch information
zz-jason committed Mar 28, 2022
1 parent 57ff45e commit 1e6f30f
Show file tree
Hide file tree
Showing 16 changed files with 8 additions and 1,713 deletions.
174 changes: 3 additions & 171 deletions br/cmd/br/debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,31 +3,18 @@
package main

import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"path"
"reflect"

"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/kvproto/pkg/import_sstpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/parser/model"
"github.com/spf13/cobra"
berrors "github.com/tikv/migration/br/pkg/errors"
"github.com/tikv/migration/br/pkg/logutil"
"github.com/tikv/migration/br/pkg/metautil"
"github.com/tikv/migration/br/pkg/mock/mockid"
"github.com/tikv/migration/br/pkg/restore"
"github.com/tikv/migration/br/pkg/rtree"
"github.com/tikv/migration/br/pkg/task"
"github.com/tikv/migration/br/pkg/utils"
"github.com/tikv/migration/br/pkg/version/build"
"go.uber.org/zap"
)

// NewDebugCommand return a debug subcommand.
Expand Down Expand Up @@ -64,80 +51,7 @@ func newCheckSumCommand() *cobra.Command {
Short: "check the backup data",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
ctx, cancel := context.WithCancel(GetDefaultContext())
defer cancel()

var cfg task.Config
if err := cfg.ParseFromFlags(cmd.Flags()); err != nil {
return errors.Trace(err)
}

_, s, backupMeta, err := task.ReadBackupMeta(ctx, metautil.MetaFile, &cfg)
if err != nil {
return errors.Trace(err)
}

reader := metautil.NewMetaReader(backupMeta, s, &cfg.CipherInfo)
dbs, err := utils.LoadBackupTables(ctx, reader)
if err != nil {
return errors.Trace(err)
}

for _, schema := range backupMeta.Schemas {
dbInfo := &model.DBInfo{}
err = json.Unmarshal(schema.Db, dbInfo)
if err != nil {
return errors.Trace(err)
}
tblInfo := &model.TableInfo{}
err = json.Unmarshal(schema.Table, tblInfo)
if err != nil {
return errors.Trace(err)
}
tbl := dbs[dbInfo.Name.String()].GetTable(tblInfo.Name.String())

var calCRC64 uint64
var totalKVs uint64
var totalBytes uint64
for _, file := range tbl.Files {
calCRC64 ^= file.Crc64Xor
totalKVs += file.GetTotalKvs()
totalBytes += file.GetTotalBytes()
log.Info("file info", zap.Stringer("table", tblInfo.Name),
zap.String("file", file.GetName()),
zap.Uint64("crc64xor", file.GetCrc64Xor()),
zap.Uint64("totalKvs", file.GetTotalKvs()),
zap.Uint64("totalBytes", file.GetTotalBytes()),
zap.Uint64("startVersion", file.GetStartVersion()),
zap.Uint64("endVersion", file.GetEndVersion()),
logutil.Key("startKey", file.GetStartKey()),
logutil.Key("endKey", file.GetEndKey()),
)

var data []byte
data, err = s.ReadFile(ctx, file.Name)
if err != nil {
return errors.Trace(err)
}
s := sha256.Sum256(data)
if !bytes.Equal(s[:], file.Sha256) {
return errors.Annotatef(berrors.ErrBackupChecksumMismatch, `
backup data checksum failed: %s may be changed
calculated sha256 is %s,
origin sha256 is %s`,
file.Name, hex.EncodeToString(s[:]), hex.EncodeToString(file.Sha256))
}
}
log.Info("table info", zap.Stringer("table", tblInfo.Name),
zap.Uint64("CRC64", calCRC64),
zap.Uint64("totalKvs", totalKVs),
zap.Uint64("totalBytes", totalBytes),
zap.Uint64("schemaTotalKvs", schema.TotalKvs),
zap.Uint64("schemaTotalBytes", schema.TotalBytes),
zap.Uint64("schemaCRC64", schema.Crc64Xor))
}
cmd.Println("backup data checksum succeed!")
return nil
return errors.Errorf("checksum is unsupported")
},
}
command.Hidden = true
Expand All @@ -159,89 +73,7 @@ func newBackupMetaValidateCommand() *cobra.Command {
Use: "validate",
Short: "validate key range and rewrite rules of backupmeta",
RunE: func(cmd *cobra.Command, _ []string) error {
ctx, cancel := context.WithCancel(GetDefaultContext())
defer cancel()

tableIDOffset, err := cmd.Flags().GetUint64("offset")
if err != nil {
return errors.Trace(err)
}

var cfg task.Config
if err = cfg.ParseFromFlags(cmd.Flags()); err != nil {
return errors.Trace(err)
}
_, s, backupMeta, err := task.ReadBackupMeta(ctx, metautil.MetaFile, &cfg)
if err != nil {
log.Error("read backupmeta failed", zap.Error(err))
return errors.Trace(err)
}
reader := metautil.NewMetaReader(backupMeta, s, &cfg.CipherInfo)
dbs, err := utils.LoadBackupTables(ctx, reader)
if err != nil {
log.Error("load tables failed", zap.Error(err))
return errors.Trace(err)
}
files := make([]*backuppb.File, 0)
tables := make([]*metautil.Table, 0)
for _, db := range dbs {
for _, table := range db.Tables {
files = append(files, table.Files...)
}
tables = append(tables, db.Tables...)
}
// Check if the ranges of files overlapped
rangeTree := rtree.NewRangeTree()
for _, file := range files {
if out := rangeTree.InsertRange(rtree.Range{
StartKey: file.GetStartKey(),
EndKey: file.GetEndKey(),
}); out != nil {
log.Error(
"file ranges overlapped",
zap.Stringer("out", out),
logutil.File(file),
)
}
}

tableIDAllocator := mockid.NewIDAllocator()
// Advance table ID allocator to the offset.
for offset := uint64(0); offset < tableIDOffset; offset++ {
_, _ = tableIDAllocator.Alloc() // Ignore error
}
rewriteRules := &restore.RewriteRules{
Data: make([]*import_sstpb.RewriteRule, 0),
}
tableIDMap := make(map[int64]int64)
// Simulate to create table
for _, table := range tables {
indexIDAllocator := mockid.NewIDAllocator()
newTable := new(model.TableInfo)
tableID, _ := tableIDAllocator.Alloc()
newTable.ID = int64(tableID)
newTable.Name = table.Info.Name
newTable.Indices = make([]*model.IndexInfo, len(table.Info.Indices))
for i, indexInfo := range table.Info.Indices {
indexID, _ := indexIDAllocator.Alloc()
newTable.Indices[i] = &model.IndexInfo{
ID: int64(indexID),
Name: indexInfo.Name,
}
}
rules := restore.GetRewriteRules(newTable, table.Info, 0)
rewriteRules.Data = append(rewriteRules.Data, rules.Data...)
tableIDMap[table.Info.ID] = int64(tableID)
}
// Validate rewrite rules
for _, file := range files {
err = restore.ValidateFileRewriteRule(file, rewriteRules)
if err != nil {
return errors.Trace(err)
}
}
cmd.Println("Check backupmeta done")
return nil
return errors.Errorf("validate is unsupported")
},
}
command.Flags().Uint64("offset", 0, "the offset of table id alloctor")
Expand Down Expand Up @@ -376,7 +208,7 @@ func setPDConfigCommand() *cobra.Command {
return errors.Trace(err)
}

mgr, err := task.NewMgr(ctx, tidbGlue, cfg.PD, cfg.TLS, task.GetKeepalive(&cfg), cfg.CheckRequirements, false)
mgr, err := task.NewMgr(ctx, tidbGlue, cfg.PD, cfg.TLS, task.GetKeepalive(&cfg), cfg.CheckRequirements)
if err != nil {
return errors.Trace(err)
}
Expand Down
21 changes: 0 additions & 21 deletions br/pkg/conn/conn.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/txnkv/txnlock"
Expand Down Expand Up @@ -104,7 +103,6 @@ func NewConnPool(cap int, newConn func(ctx context.Context) (*grpc.ClientConn, e
type Mgr struct {
*pdutil.PdController
tlsConf *tls.Config
dom *domain.Domain
storage kv.Storage // Used to access SQL related interfaces.
tikvStore tikv.Storage // Used to access TiKV specific interfaces.
grpcClis struct {
Expand Down Expand Up @@ -222,7 +220,6 @@ func checkStoresAlive(ctx context.Context,

// NewMgr creates a new Mgr.
//
// Domain is optional for Backup, set `needDomain` to false to disable
// initializing Domain.
func NewMgr(
ctx context.Context,
Expand All @@ -233,7 +230,6 @@ func NewMgr(
keepalive keepalive.ClientParameters,
storeBehavior StoreBehavior,
checkRequirements bool,
needDomain bool,
) (*Mgr, error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("conn.NewMgr", opentracing.ChildOf(span.Context()))
Expand Down Expand Up @@ -272,19 +268,10 @@ func NewMgr(
return nil, berrors.ErrKVNotTiKV
}

var dom *domain.Domain
if needDomain {
dom, err = g.GetDomain(storage)
if err != nil {
return nil, errors.Trace(err)
}
}

mgr := &Mgr{
PdController: controller,
storage: storage,
tikvStore: tikvStorage,
dom: dom,
tlsConf: tlsConf,
ownsStorage: g.OwnsStorage(),
grpcClis: struct {
Expand Down Expand Up @@ -418,11 +405,6 @@ func (mgr *Mgr) GetLockResolver() *txnlock.LockResolver {
return mgr.tikvStore.GetLockResolver()
}

// GetDomain returns a tikv storage.
func (mgr *Mgr) GetDomain() *domain.Domain {
return mgr.dom
}

// Close closes all client in Mgr.
func (mgr *Mgr) Close() {
mgr.grpcClis.mu.Lock()
Expand All @@ -437,9 +419,6 @@ func (mgr *Mgr) Close() {
// Gracefully shutdown domain so it does not affect other TiDB DDL.
// Must close domain before closing storage, otherwise it gets stuck forever.
if mgr.ownsStorage {
if mgr.dom != nil {
mgr.dom.Close()
}
tikv.StoreShuttingDown(1)
mgr.storage.Close()
}
Expand Down
8 changes: 0 additions & 8 deletions br/pkg/glue/glue.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,12 @@ package glue
import (
"context"

"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
pd "github.com/tikv/pd/client"
)

// Glue is an abstraction of TiDB function calls used in BR.
type Glue interface {
GetDomain(store kv.Storage) (*domain.Domain, error)
CreateSession(store kv.Storage) (Session, error)
Open(path string, option pd.SecurityOption) (kv.Storage, error)

// OwnsStorage returns whether the storage returned by Open() is owned
Expand All @@ -32,10 +28,6 @@ type Glue interface {

// Session is an abstraction of the session.Session interface.
type Session interface {
Execute(ctx context.Context, sql string) error
ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error
CreateDatabase(ctx context.Context, schema *model.DBInfo) error
CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error
Close()
}

Expand Down
Loading

0 comments on commit 1e6f30f

Please sign in to comment.