From 8c97452f3c27c641f4e0be5b980cbe491740056b Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 13 Feb 2020 10:24:23 +0800 Subject: [PATCH 1/5] restore: fix restore summary log (#150) Co-authored-by: kennytm --- pkg/restore/client.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 3030ba857..5e8df4418 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -242,8 +242,6 @@ func (rc *Client) RestoreTable( key := fmt.Sprintf("%s.%s", table.Db.Name.String(), table.Schema.Name.String()) if err != nil { summary.CollectFailureUnit(key, err) - } else { - summary.CollectSuccessUnit(key, elapsed) } }() @@ -342,6 +340,7 @@ func (rc *Client) RestoreAll( defer func() { elapsed := time.Since(start) log.Info("Restore All", zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("restore all", elapsed) }() errCh := make(chan error, len(rc.databases)) wg := new(sync.WaitGroup) From 008ec45182220d6d7cc7e8b13f940bfea1e24fd6 Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Thu, 13 Feb 2020 15:43:18 +0800 Subject: [PATCH 2/5] restore: enhance error handling (#152) * restore: enhance error handling Signed-off-by: 5kbpers * unit test Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * fix region epoch error Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * remove `Restore*` Signed-off-by: 5kbpers * address lint Signed-off-by: 5kbpers * add debug log Signed-off-by: 5kbpers * Apply suggestions from code review Co-Authored-By: kennytm * Update pkg/restore/import.go Co-Authored-By: kennytm * fix retry error Signed-off-by: 5kbpers * handle RegionNotFound error Signed-off-by: 5kbpers Co-authored-by: Neil Shen Co-authored-by: kennytm --- go.mod | 3 + go.sum | 11 +++ pkg/restore/backoff.go | 117 +++++++++++++++++++++++++ pkg/restore/backoff_test.go | 58 +++++++++++++ pkg/restore/client.go | 133 +++++----------------------- pkg/restore/import.go | 168 ++++++++++++++++++------------------ pkg/restore/split.go | 3 +- pkg/restore/util.go | 52 +---------- pkg/task/restore.go | 2 +- pkg/utils/retry.go | 40 +++++++++ tests/br_full_ddl/run.sh | 2 +- 11 files changed, 338 insertions(+), 251 deletions(-) create mode 100644 pkg/restore/backoff.go create mode 100644 pkg/restore/backoff_test.go create mode 100644 pkg/utils/retry.go diff --git a/go.mod b/go.mod index 850750f09..180c58f93 100644 --- a/go.mod +++ b/go.mod @@ -8,12 +8,14 @@ require ( github.com/cheggaaa/pb/v3 v3.0.1 github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect + github.com/fatih/color v1.9.0 // indirect github.com/fsouza/fake-gcs-server v1.15.0 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 github.com/golang/snappy v0.0.1 // indirect github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 + github.com/mattn/go-runewidth v0.0.7 // indirect github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 @@ -33,6 +35,7 @@ require ( go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 ) diff --git a/go.sum b/go.sum index d5e9c891d..0fe4a3024 100644 --- a/go.sum +++ b/go.sum @@ -97,6 +97,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.15.0 h1:ss/ztlt10Y64A5qslmxZKsiqW/i28t5DkRtv6qSFaLQ= @@ -224,12 +226,18 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -518,6 +526,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190909082730-f460065e899a h1:mIzbOulag9/gXacgxKlFVwpCOWSfBT3/pDyyCwGA9as= golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= @@ -555,6 +564,8 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= +golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go new file mode 100644 index 000000000..dae14e109 --- /dev/null +++ b/pkg/restore/backoff.go @@ -0,0 +1,117 @@ +package restore + +import ( + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/utils" +) + +var ( + errNotLeader = errors.NewNoStackError("not leader") + errEpochNotMatch = errors.NewNoStackError("epoch not match") + errKeyNotInRegion = errors.NewNoStackError("key not in region") + errRegionNotFound = errors.NewNoStackError("region not found") + errResp = errors.NewNoStackError("response error") + errRewriteRuleNotFound = errors.NewNoStackError("rewrite rule not found") + errRangeIsEmpty = errors.NewNoStackError("range is empty") + errGrpc = errors.NewNoStackError("gRPC error") + + // TODO: add `error` field to `DownloadResponse` for distinguish the errors of gRPC + // and the errors of request + errBadFormat = errors.NewNoStackError("bad format") + errWrongKeyPrefix = errors.NewNoStackError("wrong key prefix") + errFileCorrupted = errors.NewNoStackError("file corrupted") + errCannotRead = errors.NewNoStackError("cannot read externel storage") +) + +const ( + importSSTRetryTimes = 16 + importSSTWaitInterval = 10 * time.Millisecond + importSSTMaxWaitInterval = 1 * time.Second + + downloadSSTRetryTimes = 8 + downloadSSTWaitInterval = 10 * time.Millisecond + downloadSSTMaxWaitInterval = 1 * time.Second + + resetTsRetryTime = 16 + resetTSWaitInterval = 50 * time.Millisecond + resetTSMaxWaitInterval = 500 * time.Millisecond +) + +type importerBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newImportSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: importSSTRetryTimes, + delayTime: importSSTWaitInterval, + maxDelayTime: importSSTMaxWaitInterval, + } +} + +func newDownloadSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: downloadSSTRetryTimes, + delayTime: downloadSSTWaitInterval, + maxDelayTime: downloadSSTMaxWaitInterval, + } +} + +func (bo *importerBackoffer) NextBackoff(err error) time.Duration { + switch errors.Cause(err) { + case errResp, errGrpc, errEpochNotMatch, errNotLeader: + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + case errRangeIsEmpty, errRewriteRuleNotFound: + // Excepted error, finish the operation + bo.delayTime = 0 + bo.attempt = 0 + default: + // Unexcepted error + bo.delayTime = 0 + bo.attempt = 0 + log.Warn("unexcepted error, stop to retry", zap.Error(err)) + } + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *importerBackoffer) Attempt() int { + return bo.attempt +} + +type resetTSBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newResetTSBackoffer() utils.Backoffer { + return &resetTSBackoffer{ + attempt: resetTsRetryTime, + delayTime: resetTSWaitInterval, + maxDelayTime: resetTSMaxWaitInterval, + } +} + +func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *resetTSBackoffer) Attempt() int { + return bo.attempt +} diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go new file mode 100644 index 000000000..537f0980c --- /dev/null +++ b/pkg/restore/backoff_test.go @@ -0,0 +1,58 @@ +package restore + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" + + "github.com/pingcap/br/pkg/utils" +) + +var _ = Suite(&testBackofferSuite{}) + +type testBackofferSuite struct { + mock *utils.MockCluster +} + +func (s *testBackofferSuite) SetUpSuite(c *C) { + var err error + s.mock, err = utils.NewMockCluster() + c.Assert(err, IsNil) +} + +func (s *testBackofferSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testBackofferSuite) TestImporterBackoffer(c *C) { + var counter int + err := utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + switch counter { + case 0: + return errGrpc + case 1: + return errResp + case 2: + return errRangeIsEmpty + } + return nil + }, newImportSSTBackoffer()) + c.Assert(counter, Equals, 3) + c.Assert(err, Equals, errRangeIsEmpty) + + counter = 0 + backoffer := importerBackoffer{ + attempt: 10, + delayTime: time.Nanosecond, + maxDelayTime: time.Nanosecond, + } + err = utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + return errResp + }, &backoffer) + c.Assert(counter, Equals, 10) + c.Assert(err, Equals, errResp) +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 5e8df4418..5402d78bc 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -2,7 +2,6 @@ package restore import ( "context" - "fmt" "math" "sync" "time" @@ -26,15 +25,9 @@ import ( "github.com/pingcap/br/pkg/utils" ) -const ( - resetTsRetryTime = 16 - resetTSWaitInterval = 50 * time.Millisecond - resetTSMaxWaitInterval = 500 * time.Millisecond - - // defaultChecksumConcurrency is the default number of the concurrent - // checksum tasks. - defaultChecksumConcurrency = 64 -) +// defaultChecksumConcurrency is the default number of the concurrent +// checksum tasks. +const defaultChecksumConcurrency = 64 // Client sends requests to restore files type Client struct { @@ -138,13 +131,10 @@ func (rc *Client) ResetTS(pdAddrs []string) error { restoreTS := rc.backupMeta.GetEndVersion() log.Info("reset pd timestamp", zap.Uint64("ts", restoreTS)) i := 0 - return withRetry(func() error { + return utils.WithRetry(rc.ctx, func() error { idx := i % len(pdAddrs) return utils.ResetTS(pdAddrs[idx], restoreTS) - }, func(e error) bool { - i++ - return true - }, resetTsRetryTime, resetTSWaitInterval, resetTSMaxWaitInterval) + }, newResetTSBackoffer()) } // GetDatabases returns all databases. @@ -228,29 +218,28 @@ func (rc *Client) setSpeedLimit() error { return nil } -// RestoreTable tries to restore the data of a table. -func (rc *Client) RestoreTable( - table *utils.Table, +// RestoreFiles tries to restore the files. +func (rc *Client) RestoreFiles( + files []*backup.File, rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() defer func() { elapsed := time.Since(start) - log.Info("restore table", - zap.Stringer("table", table.Schema.Name), zap.Duration("take", elapsed)) - key := fmt.Sprintf("%s.%s", table.Db.Name.String(), table.Schema.Name.String()) - if err != nil { - summary.CollectFailureUnit(key, err) + if err == nil { + log.Info("Restore Files", + zap.Int("files", len(files)), zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("files", elapsed) + } else { + summary.CollectFailureUnit("files", err) } }() - log.Debug("start to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - zap.Array("files", files(table.Files)), + log.Debug("start to restore files", + zap.Int("files", len(files)), ) - errCh := make(chan error, len(table.Files)) + errCh := make(chan error, len(files)) wg := new(sync.WaitGroup) defer close(errCh) err = rc.setSpeedLimit() @@ -258,7 +247,7 @@ func (rc *Client) RestoreTable( return err } - for _, file := range table.Files { + for _, file := range files { wg.Add(1) fileReplica := file rc.workerPool.Apply( @@ -272,100 +261,18 @@ func (rc *Client) RestoreTable( } }) } - for range table.Files { + for range files { err := <-errCh if err != nil { rc.cancel() wg.Wait() log.Error( - "restore table failed", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), + "restore files failed", zap.Error(err), ) return err } } - log.Info( - "finish to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - ) - return nil -} - -// RestoreDatabase tries to restore the data of a database -func (rc *Client) RestoreDatabase( - db *utils.Database, - rewriteRules *RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore Database", zap.Stringer("db", db.Schema.Name), zap.Duration("take", elapsed)) - }() - errCh := make(chan error, len(db.Tables)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, table := range db.Tables { - wg.Add(1) - tblReplica := table - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreTable( - tblReplica, rewriteRules, updateCh): - } - }) - } - for range db.Tables { - err = <-errCh - if err != nil { - wg.Wait() - return err - } - } - return nil -} - -// RestoreAll tries to restore all the data of backup files. -func (rc *Client) RestoreAll( - rewriteRules *RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore All", zap.Duration("take", elapsed)) - summary.CollectSuccessUnit("restore all", elapsed) - }() - errCh := make(chan error, len(rc.databases)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, db := range rc.databases { - wg.Add(1) - dbReplica := db - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreDatabase( - dbReplica, rewriteRules, updateCh): - } - }) - } - - for range rc.databases { - err = <-errCh - if err != nil { - wg.Wait() - return err - } - } return nil } diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 77273ebab..01f8456ef 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -2,6 +2,7 @@ package restore import ( "context" + "strings" "sync" "time" @@ -16,25 +17,10 @@ import ( "google.golang.org/grpc" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" ) -var ( - errNotLeader = errors.New("not leader") - errEpochNotMatch = errors.New("epoch not match") - errRewriteRuleNotFound = errors.New("rewrite rule not found") - errRangeIsEmpty = errors.New("range is empty") -) - -const ( - importScanResgionTime = 10 * time.Second - importFileRetryTimes = 16 - importFileWaitInterval = 10 * time.Millisecond - importFileMaxWaitInterval = 1 * time.Second - - downloadSSTRetryTimes = 8 - downloadSSTWaitInterval = 10 * time.Millisecond - downloadSSTMaxWaitInterval = 1 * time.Second -) +const importScanRegionTime = 10 * time.Second // ImporterClient is used to import a file to TiKV type ImporterClient interface { @@ -172,10 +158,9 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul log.Debug("rewrite file keys", zap.Stringer("file", file), zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - ) - err = withRetry(func() error { - ctx, cancel := context.WithTimeout(importer.ctx, importScanResgionTime) + zap.Binary("endKey", endKey)) + err = utils.WithRetry(importer.ctx, func() error { + ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range regionInfos, err1 := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) @@ -185,63 +170,56 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { - var downloadMeta *import_sstpb.SSTMeta info := regionInfo // Try to download file. - err = withRetry(func() error { - var err2 error - var isEmpty bool - downloadMeta, isEmpty, err2 = importer.downloadSST(info, file, rewriteRules) - if err2 != nil { - if err != errRewriteRuleNotFound { - log.Warn("download file failed", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - zap.Error(err2), - ) - } - return err2 - } - if isEmpty { - log.Info( - "file don't have any key in this region, skip it", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - ) - return errRangeIsEmpty - } - return nil - }, func(e error) bool { - // Scan regions may return some regions which cannot match any rewrite rule, - // like [t{tableID}, t{tableID}_r), those regions should be skipped - return e != errRewriteRuleNotFound && e != errRangeIsEmpty - }, downloadSSTRetryTimes, downloadSSTWaitInterval, downloadSSTMaxWaitInterval) - if err != nil { - if err == errRewriteRuleNotFound || err == errRangeIsEmpty { + var downloadMeta *import_sstpb.SSTMeta + err1 = utils.WithRetry(importer.ctx, func() error { + var e error + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + return e + }, newDownloadSSTBackoffer()) + if err1 != nil { + if err1 == errRewriteRuleNotFound || err1 == errRangeIsEmpty { // Skip this region continue } - return err + log.Error("download file failed", + zap.Stringer("file", file), + zap.Stringer("region", info.Region), + zap.Binary("startKey", startKey), + zap.Binary("endKey", endKey), + zap.Error(err1)) + return err1 } - err = importer.ingestSST(downloadMeta, info) - if err != nil { - log.Warn("ingest file failed", + err1 = importer.ingestSST(downloadMeta, info) + // If error is `NotLeader`, update the region info and retry + for errors.Cause(err1) == errNotLeader { + log.Debug("ingest sst returns not leader error, retry it", + zap.Stringer("region", info.Region)) + var newInfo *RegionInfo + newInfo, err1 = importer.metaClient.GetRegion(importer.ctx, info.Region.GetStartKey()) + if err1 != nil { + break + } + if !checkRegionEpoch(newInfo, info) { + err1 = errEpochNotMatch + break + } + err1 = importer.ingestSST(downloadMeta, newInfo) + } + if err1 != nil { + log.Error("ingest file failed", zap.Stringer("file", file), zap.Stringer("range", downloadMeta.GetRange()), zap.Stringer("region", info.Region), - zap.Error(err), - ) - return err + zap.Error(err1)) + return err1 } summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) } return nil - }, func(e error) bool { - return true - }, importFileRetryTimes, importFileWaitInterval, importFileMaxWaitInterval) + }, newImportSSTBackoffer()) return err } @@ -257,33 +235,25 @@ func (importer *FileImporter) downloadSST( regionInfo *RegionInfo, file *backup.File, rewriteRules *RewriteRules, -) (*import_sstpb.SSTMeta, bool, error) { +) (*import_sstpb.SSTMeta, error) { id, err := uuid.New().MarshalBinary() if err != nil { - return nil, true, errors.Trace(err) + return nil, errors.Trace(err) } // Assume one region reflects to one rewrite rule _, key, err := codec.DecodeBytes(regionInfo.Region.GetStartKey()) if err != nil { - return nil, true, err + return nil, err } regionRule := matchNewPrefix(key, rewriteRules) if regionRule == nil { - log.Debug("cannot find rewrite rule, skip region", - zap.Stringer("region", regionInfo.Region), - zap.Array("tableRule", rules(rewriteRules.Table)), - zap.Array("dataRule", rules(rewriteRules.Data)), - zap.Binary("key", key), - ) - return nil, true, errRewriteRuleNotFound + return nil, errors.Trace(errRewriteRuleNotFound) } rule := import_sstpb.RewriteRule{ OldKeyPrefix: encodeKeyPrefix(regionRule.GetOldKeyPrefix()), NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) - sstMeta.RegionId = regionInfo.Region.GetId() - sstMeta.RegionEpoch = regionInfo.Region.GetRegionEpoch() req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -298,15 +268,15 @@ func (importer *FileImporter) downloadSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, true, err + return nil, extractDownloadSSTError(err) } if resp.GetIsEmpty() { - return &sstMeta, true, nil + return nil, errors.Trace(errRangeIsEmpty) } } sstMeta.Range.Start = truncateTS(resp.Range.GetStart()) sstMeta.Range.End = truncateTS(resp.Range.GetEnd()) - return &sstMeta, false, nil + return &sstMeta, nil } func (importer *FileImporter) ingestSST( @@ -329,17 +299,45 @@ func (importer *FileImporter) ingestSST( log.Debug("download SST", zap.Stringer("sstMeta", sstMeta)) resp, err := importer.importClient.IngestSST(importer.ctx, leader.GetStoreId(), req) if err != nil { - return err + if strings.Contains(err.Error(), "RegionNotFound") { + return errors.Trace(errRegionNotFound) + } + return errors.Trace(err) } respErr := resp.GetError() if respErr != nil { - if respErr.EpochNotMatch != nil { - return errEpochNotMatch + log.Debug("ingest sst resp error", zap.Stringer("error", respErr)) + if respErr.GetKeyNotInRegion() != nil { + return errors.Trace(errKeyNotInRegion) } - if respErr.NotLeader != nil { - return errNotLeader + if respErr.GetNotLeader() != nil { + return errors.Trace(errNotLeader) } - return errors.Errorf("ingest failed: %v", respErr) + return errors.Wrap(errResp, respErr.String()) } return nil } + +func checkRegionEpoch(new, old *RegionInfo) bool { + if new.Region.GetId() == old.Region.GetId() && + new.Region.GetRegionEpoch().GetVersion() == old.Region.GetRegionEpoch().GetVersion() && + new.Region.GetRegionEpoch().GetConfVer() == old.Region.GetRegionEpoch().GetConfVer() { + return true + } + return false +} + +func extractDownloadSSTError(e error) error { + err := errGrpc + switch { + case strings.Contains(e.Error(), "bad format"): + err = errBadFormat + case strings.Contains(e.Error(), "wrong prefix"): + err = errWrongKeyPrefix + case strings.Contains(e.Error(), "corrupted"): + err = errFileCorrupted + case strings.Contains(e.Error(), "Cannot read"): + err = errCannotRead + } + return errors.Trace(err) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 31b23a60f..3248fdd0d 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -111,7 +111,7 @@ SplitRegions: } time.Sleep(interval) if i > 3 { - log.Warn("splitting regions failed, retry it", zap.Error(err)) + log.Warn("splitting regions failed, retry it", zap.Error(err), zap.ByteStrings("keys", keys)) } continue SplitRegions } @@ -259,6 +259,7 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI splitKeys = make([][]byte, 0, 1) } splitKeyMap[region.Region.GetId()] = append(splitKeys, key) + log.Debug("get key for split region", zap.Binary("key", key), zap.Stringer("region", region.Region)) } } return splitKeyMap diff --git a/pkg/restore/util.go b/pkg/restore/util.go index ea8629470..63ee92969 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -17,31 +17,12 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "github.com/pingcap/br/pkg/summary" ) var recordPrefixSep = []byte("_r") -type files []*backup.File - -func (fs files) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range fs { - arr.AppendString(fs[i].String()) - } - return nil -} - -type rules []*import_sstpb.RewriteRule - -func (rs rules) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range rs { - arr.AppendString(rs[i].String()) - } - return nil -} - // idAllocator always returns a specified ID type idAllocator struct { id int64 @@ -163,40 +144,11 @@ func getSSTMetaFromFile( Start: rangeStart, End: rangeEnd, }, + RegionId: region.GetId(), + RegionEpoch: region.GetRegionEpoch(), } } -type retryableFunc func() error -type continueFunc func(error) bool - -func withRetry( - retryableFunc retryableFunc, - continueFunc continueFunc, - attempts uint, - delayTime time.Duration, - maxDelayTime time.Duration, -) error { - var lastErr error - for i := uint(0); i < attempts; i++ { - err := retryableFunc() - if err != nil { - lastErr = err - // If this is the last attempt, do not wait - if !continueFunc(err) || i == attempts-1 { - break - } - delayTime = 2 * delayTime - if delayTime > maxDelayTime { - delayTime = maxDelayTime - } - time.Sleep(delayTime) - } else { - return nil - } - } - return lastErr -} - // ValidateFileRanges checks and returns the ranges of the files. func ValidateFileRanges( files []*backup.File, diff --git a/pkg/task/restore.go b/pkg/task/restore.go index f2f3caf43..a56a1d6da 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -139,7 +139,7 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { if err != nil { return err } - err = client.RestoreAll(rewriteRules, updateCh) + err = client.RestoreFiles(files, rewriteRules, updateCh) // always run the post-work even on error, so we don't stuck in the import mode or emptied schedulers postErr := restorePostWork(ctx, client, mgr, removedSchedulers) diff --git a/pkg/utils/retry.go b/pkg/utils/retry.go new file mode 100644 index 000000000..a8f446764 --- /dev/null +++ b/pkg/utils/retry.go @@ -0,0 +1,40 @@ +package utils + +import ( + "context" + "time" +) + +// RetryableFunc presents a retryable opreation +type RetryableFunc func() error + +// Backoffer implements a backoff policy for retrying operations +type Backoffer interface { + // NextBackoff returns a duration to wait before retrying again + NextBackoff(err error) time.Duration + // Attempt returns the remain attempt times + Attempt() int +} + +// WithRetry retrys a given operation with a backoff policy +func WithRetry( + ctx context.Context, + retryableFunc RetryableFunc, + backoffer Backoffer, +) error { + var lastErr error + for backoffer.Attempt() > 0 { + err := retryableFunc() + if err != nil { + lastErr = err + select { + case <-ctx.Done(): + return lastErr + case <-time.After(backoffer.NextBackoff(err)): + } + } else { + return nil + } + } + return lastErr +} diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 3db1ecd60..1e40415d7 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -28,7 +28,7 @@ for i in $(seq $DDL_COUNT); do run_sql "USE $DB; ALTER TABLE $TABLE ADD INDEX (FIELD$i);" done -for i in $(sql $DDL_COUNT); do +for i in $(seq $DDL_COUNT); do if (( RANDOM % 2 )); then run_sql "USE $DB; ALTER TABLE $TABLE DROP INDEX FIELD$i;" fi From 1e1fc97bc5771f93bb7ded23bec73830802f4eef Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Thu, 20 Feb 2020 20:04:45 +0800 Subject: [PATCH 3/5] Incremental BR: support DDL (#155) * support backup&restore ddl Signed-off-by: 5kbpers * integration tests Signed-off-by: 5kbpers * update kvproto Signed-off-by: 5kbpers * fix integration tests Signed-off-by: 5kbpers * reduce cyclomatic complexity of `runRestore` Signed-off-by: 5kbpers * fix test Signed-off-by: 5kbpers * add unit test Signed-off-by: 5kbpers * fix tests Signed-off-by: 5kbpers * disable fast checksum in incremental br Signed-off-by: 5kbpers * fix no valid key error Signed-off-by: 5kbpers * address lint Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers --- cmd/validate.go | 10 +-- go.mod | 2 +- go.sum | 11 ++++ pkg/backup/client.go | 55 +++++++++++++++- pkg/checksum/executor.go | 8 +-- pkg/checksum/executor_test.go | 2 +- pkg/restore/client.go | 41 +++++++++++- pkg/restore/client_test.go | 2 +- pkg/restore/db.go | 103 +++++++++++++++++++++++++++--- pkg/restore/db_test.go | 50 +++++++++++++-- pkg/restore/split.go | 37 ++++++++++- pkg/restore/split_test.go | 25 +++++++- pkg/task/backup.go | 37 +++++++++-- pkg/task/restore.go | 12 +++- pkg/utils/schema.go | 10 +-- tests/br_full_ddl/run.sh | 4 +- tests/br_incremental/run.sh | 57 ++++++----------- tests/br_incremental_ddl/run.sh | 74 +++++++++++++++++++++ tests/br_incremental_index/run.sh | 74 +++++++++++++++++++++ tests/config/tikv.toml | 1 + tests/run.sh | 2 + 21 files changed, 527 insertions(+), 90 deletions(-) create mode 100755 tests/br_incremental_ddl/run.sh create mode 100755 tests/br_incremental_index/run.sh diff --git a/cmd/validate.go b/cmd/validate.go index 559cb9983..8bca7e553 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -196,19 +196,19 @@ func newBackupMetaCommand() *cobra.Command { newTable := new(model.TableInfo) tableID, _ := tableIDAllocator.Alloc() newTable.ID = int64(tableID) - newTable.Name = table.Schema.Name - newTable.Indices = make([]*model.IndexInfo, len(table.Schema.Indices)) - for i, indexInfo := range table.Schema.Indices { + newTable.Name = table.Info.Name + newTable.Indices = make([]*model.IndexInfo, len(table.Info.Indices)) + for i, indexInfo := range table.Info.Indices { indexID, _ := indexIDAllocator.Alloc() newTable.Indices[i] = &model.IndexInfo{ ID: int64(indexID), Name: indexInfo.Name, } } - rules := restore.GetRewriteRules(newTable, table.Schema, 0) + rules := restore.GetRewriteRules(newTable, table.Info, 0) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) - tableIDMap[table.Schema.ID] = int64(tableID) + tableIDMap[table.Info.ID] = int64(tableID) } // Validate rewrite rules for _, file := range files { diff --git a/go.mod b/go.mod index 180c58f93..7d9e6b77e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/onsi/gomega v1.7.1 // indirect github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 + github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d diff --git a/go.sum b/go.sum index 0fe4a3024..24f73f5c9 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,7 @@ github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdc github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.26.1 h1:JGQggXhOiNJIqsmbYUl3cYtJZUffeOWlHtxfzGK7WPI= github.com/aws/aws-sdk-go v1.26.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -182,6 +183,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -300,6 +303,8 @@ github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 h1:RUxQExD5yubAjWGnw8kcxfO9abbiVHIE1rbuCyQCWDE= github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 h1:lsoIoCoXMpcHvW6jHcqP/prA4I6duAp1DVyG2ULz4bM= +github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d h1:4QwSJRxmBjTB9ssJNWg2f2bDm5rqnHCUUjMh4N1QOOY= @@ -342,6 +347,7 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -496,6 +502,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -563,6 +570,7 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -596,6 +604,7 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -605,6 +614,7 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -629,6 +639,7 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 49e48638d..6d6eff033 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" @@ -119,7 +120,12 @@ func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend } // SaveBackupMeta saves the current backup meta at the given path. -func (bc *Client) SaveBackupMeta(ctx context.Context) error { +func (bc *Client) SaveBackupMeta(ctx context.Context, ddlJobs []*model.Job) error { + ddlJobsData, err := json.Marshal(ddlJobs) + if err != nil { + return errors.Trace(err) + } + bc.backupMeta.Ddls = ddlJobsData backupMetaData, err := proto.Marshal(&bc.backupMeta) if err != nil { return errors.Trace(err) @@ -127,7 +133,7 @@ func (bc *Client) SaveBackupMeta(ctx context.Context) error { log.Debug("backup meta", zap.Reflect("meta", bc.backupMeta)) backendURL := storage.FormatBackendURL(bc.backend) - log.Info("save backup meta", zap.Stringer("path", &backendURL)) + log.Info("save backup meta", zap.Stringer("path", &backendURL), zap.Int("jobs", len(ddlJobs))) return bc.storage.Write(ctx, utils.MetaFile, backupMetaData) } @@ -241,6 +247,51 @@ func BuildBackupRangeAndSchema( return ranges, backupSchemas, nil } +// GetBackupDDLJobs returns the ddl jobs are done in (lastBackupTS, backupTS] +func GetBackupDDLJobs(dom *domain.Domain, lastBackupTS, backupTS uint64) ([]*model.Job, error) { + snapMeta, err := dom.GetSnapshotMeta(backupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSnapMeta, err := dom.GetSnapshotMeta(lastBackupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSchemaVersion, err := lastSnapMeta.GetSchemaVersion() + if err != nil { + return nil, errors.Trace(err) + } + allJobs := make([]*model.Job, 0) + defaultJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get default jobs", zap.Int("jobs", len(defaultJobs))) + allJobs = append(allJobs, defaultJobs...) + addIndexJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get add index jobs", zap.Int("jobs", len(addIndexJobs))) + allJobs = append(allJobs, addIndexJobs...) + historyJobs, err := snapMeta.GetAllHistoryDDLJobs() + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get history jobs", zap.Int("jobs", len(historyJobs))) + allJobs = append(allJobs, historyJobs...) + + completedJobs := make([]*model.Job, 0) + for _, job := range allJobs { + if (job.State == model.JobStateDone || job.State == model.JobStateSynced) && + (job.BinlogInfo != nil && job.BinlogInfo.SchemaVersion > lastSchemaVersion) { + completedJobs = append(completedJobs, job) + } + } + log.Debug("get completed jobs", zap.Int("jobs", len(completedJobs))) + return completedJobs, nil +} + // BackupRanges make a backup of the given key ranges. func (bc *Client) BackupRanges( ctx context.Context, diff --git a/pkg/checksum/executor.go b/pkg/checksum/executor.go index 2ca5cf66d..30e8f11c8 100644 --- a/pkg/checksum/executor.go +++ b/pkg/checksum/executor.go @@ -61,7 +61,7 @@ func buildChecksumRequest( reqs := make([]*kv.Request, 0, (len(newTable.Indices)+1)*(len(partDefs)+1)) var oldTableID int64 if oldTable != nil { - oldTableID = oldTable.Schema.ID + oldTableID = oldTable.Info.ID } rs, err := buildRequest(newTable, newTable.ID, oldTable, oldTableID, startTS) if err != nil { @@ -72,7 +72,7 @@ func buildChecksumRequest( for _, partDef := range partDefs { var oldPartID int64 if oldTable != nil { - for _, oldPartDef := range oldTable.Schema.Partition.Definitions { + for _, oldPartDef := range oldTable.Info.Partition.Definitions { if oldPartDef.Name == partDef.Name { oldPartID = oldPartDef.ID } @@ -108,7 +108,7 @@ func buildRequest( } var oldIndexInfo *model.IndexInfo if oldTable != nil { - for _, oldIndex := range oldTable.Schema.Indices { + for _, oldIndex := range oldTable.Info.Indices { if oldIndex.Name == indexInfo.Name { oldIndexInfo = oldIndex break @@ -117,7 +117,7 @@ func buildRequest( if oldIndexInfo == nil { log.Panic("index not found", zap.Reflect("table", tableInfo), - zap.Reflect("oldTable", oldTable.Schema), + zap.Reflect("oldTable", oldTable.Info), zap.Stringer("index", indexInfo.Name)) } } diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index ca68628e2..3e6d8078c 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -83,7 +83,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { // Test rewrite rules tk.MustExec("alter table t1 add index i2(a);") tableInfo1 = s.getTableInfo(c, "test", "t1") - oldTable := utils.Table{Schema: tableInfo1} + oldTable := utils.Table{Info: tableInfo1} exe2, err = NewExecutorBuilder(tableInfo2, math.MaxUint64). SetOldTable(&oldTable).Build() c.Assert(err, IsNil) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 5402d78bc..f45b3d510 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -2,7 +2,9 @@ package restore import ( "context" + "encoding/json" "math" + "sort" "sync" "time" @@ -40,6 +42,7 @@ type Client struct { tableWorkerPool *utils.WorkerPool databases map[string]*utils.Database + ddlJobs []*model.Job backupMeta *backup.BackupMeta db *DB rateLimit uint64 @@ -97,8 +100,15 @@ func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup. if err != nil { return errors.Trace(err) } + var ddlJobs []*model.Job + err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) + if err != nil { + return errors.Trace(err) + } rc.databases = databases + rc.ddlJobs = ddlJobs rc.backupMeta = backupMeta + log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) metaClient := NewSplitClient(rc.pdClient) importClient := NewImportClient(metaClient) @@ -151,6 +161,11 @@ func (rc *Client) GetDatabase(name string) *utils.Database { return rc.databases[name] } +// GetDDLJobs returns ddl jobs +func (rc *Client) GetDDLJobs() []*model.Job { + return rc.ddlJobs +} + // GetTableSchema returns the schema of a table from TiDB. func (rc *Client) GetTableSchema( dom *domain.Domain, @@ -189,11 +204,11 @@ func (rc *Client) CreateTables( if err != nil { return nil, nil, err } - newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Schema.Name) + newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Info.Name) if err != nil { return nil, nil, err } - rules := GetRewriteRules(newTableInfo, table.Schema, newTS) + rules := GetRewriteRules(newTableInfo, table.Info, newTS) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) newTables = append(newTables, newTableInfo) @@ -201,6 +216,26 @@ func (rc *Client) CreateTables( return rewriteRules, newTables, nil } +// ExecDDLs executes the queries of the ddl jobs. +func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { + // Sort the ddl jobs by schema version in ascending order. + sort.Slice(ddlJobs, func(i, j int) bool { + return ddlJobs[i].BinlogInfo.SchemaVersion < ddlJobs[j].BinlogInfo.SchemaVersion + }) + + for _, job := range ddlJobs { + err := rc.db.ExecDDL(rc.ctx, job) + if err != nil { + return errors.Trace(err) + } + log.Info("execute ddl query", + zap.String("db", job.SchemaName), + zap.String("query", job.Query), + zap.Int64("historySchemaVersion", job.BinlogInfo.SchemaVersion)) + } + return nil +} + func (rc *Client) setSpeedLimit() error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { stores, err := rc.pdClient.GetAllStores(rc.ctx, pd.WithExcludeTombstone()) @@ -380,7 +415,7 @@ func (rc *Client) ValidateChecksum( checksumResp.TotalBytes != table.TotalBytes { log.Error("failed in validate checksum", zap.String("database", table.Db.Name.L), - zap.String("table", table.Schema.Name.L), + zap.String("table", table.Info.Name.L), zap.Uint64("origin tidb crc64", table.Crc64Xor), zap.Uint64("calculated crc64", checksumResp.Checksum), zap.Uint64("origin tidb total kvs", table.TotalKvs), diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 5007f1281..3d608b3b9 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -52,7 +52,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { for i := len(tables) - 1; i >= 0; i-- { tables[i] = &utils.Table{ Db: dbSchema, - Schema: &model.TableInfo{ + Info: &model.TableInfo{ ID: int64(i), Name: model.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ diff --git a/pkg/restore/db.go b/pkg/restore/db.go index b114b7629..8c09af16f 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "sort" "strings" "github.com/pingcap/errors" @@ -38,6 +39,31 @@ func NewDB(store kv.Storage) (*DB, error) { }, nil } +// ExecDDL executes the query of a ddl job. +func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { + var err error + if ddlJob.BinlogInfo.TableInfo != nil { + switchDbSQL := fmt.Sprintf("use %s;", ddlJob.SchemaName) + _, err = db.se.Execute(ctx, switchDbSQL) + if err != nil { + log.Error("switch db failed", + zap.String("query", switchDbSQL), + zap.String("db", ddlJob.SchemaName), + zap.Error(err)) + return errors.Trace(err) + } + } + _, err = db.se.Execute(ctx, ddlJob.Query) + if err != nil { + log.Error("execute ddl query failed", + zap.String("query", ddlJob.Query), + zap.String("db", ddlJob.SchemaName), + zap.Int64("historySchemaVersion", ddlJob.BinlogInfo.SchemaVersion), + zap.Error(err)) + } + return errors.Trace(err) +} + // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { var buf bytes.Buffer @@ -49,16 +75,15 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { createSQL := buf.String() _, err = db.se.Execute(ctx, createSQL) if err != nil { - log.Error("create database failed", zap.String("SQL", createSQL), zap.Error(err)) - return errors.Trace(err) + log.Error("create database failed", zap.String("query", createSQL), zap.Error(err)) } - return nil + return errors.Trace(err) } // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { var buf bytes.Buffer - schema := table.Schema + schema := table.Info err := executor.ConstructResultOfShowCreateTable(db.se, schema, newIDAllocator(schema.AutoIncID), &buf) if err != nil { log.Error( @@ -88,7 +113,7 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { log.Error("create table failed", zap.String("SQL", createSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) return errors.Trace(err) } @@ -99,16 +124,76 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { _, err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", - zap.String("SQL", alterAutoIncIDSQL), + zap.String("query", alterAutoIncIDSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) - return errors.Trace(err) } - return nil + return errors.Trace(err) } // Close closes the connection func (db *DB) Close() { db.se.Close() } + +// FilterDDLJobs filters ddl jobs +func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*model.Job) { + // Sort the ddl jobs by schema version in descending order. + sort.Slice(allDDLJobs, func(i, j int) bool { + return allDDLJobs[i].BinlogInfo.SchemaVersion > allDDLJobs[j].BinlogInfo.SchemaVersion + }) + dbs := getDatabases(tables) + for _, db := range dbs { + // These maps is for solving some corner case. + // e.g. let "t=2" indicates that the id of database "t" is 2, if the ddl execution sequence is: + // rename "a" to "b"(a=1) -> drop "b"(b=1) -> create "b"(b=2) -> rename "b" to "a"(a=2) + // Which we cannot find the "create" DDL by name and id directly. + // To cover †his case, we must find all names and ids the database/table ever had. + dbIDs := make(map[int64]bool) + dbIDs[db.ID] = true + dbNames := make(map[string]bool) + dbNames[db.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.DBInfo != nil { + if dbIDs[job.SchemaID] || dbNames[job.BinlogInfo.DBInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + // The the jobs executed with the old id, like the step 2 in the example above. + dbIDs[job.SchemaID] = true + // For the jobs executed after rename, like the step 3 in the example above. + dbNames[job.BinlogInfo.DBInfo.Name.String()] = true + } + } + } + } + + for _, table := range tables { + tableIDs := make(map[int64]bool) + tableIDs[table.Info.ID] = true + tableNames := make(map[string]bool) + tableNames[table.Info.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.TableInfo != nil { + if tableIDs[job.TableID] || tableNames[job.BinlogInfo.TableInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + tableIDs[job.TableID] = true + // For truncate table, the id may be changed + tableIDs[job.BinlogInfo.TableInfo.ID] = true + tableNames[job.BinlogInfo.TableInfo.Name.String()] = true + } + } + } + } + return ddlJobs +} + +func getDatabases(tables []*utils.Table) (dbs []*model.DBInfo) { + dbIDs := make(map[int64]bool) + for _, table := range tables { + if !dbIDs[table.Db.ID] { + dbs = append(dbs, table.Db) + dbIDs[table.Db.ID] = true + } + } + return +} diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 98341f510..0151b4da6 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -12,6 +12,7 @@ import ( "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/backup" "github.com/pingcap/br/pkg/utils" ) @@ -25,19 +26,18 @@ func (s *testRestoreSchemaSuite) SetUpSuite(c *C) { var err error s.mock, err = utils.NewMockCluster() c.Assert(err, IsNil) + c.Assert(s.mock.Start(), IsNil) } func TestT(t *testing.T) { TestingT(t) } func (s *testRestoreSchemaSuite) TearDownSuite(c *C) { + s.mock.Stop() testleak.AfterTest(c)() } func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { - c.Assert(s.mock.Start(), IsNil) - defer s.mock.Stop() - tk := testkit.NewTestKit(c, s.mock.Storage) tk.MustExec("use test") tk.MustExec("set @@sql_mode=''") @@ -60,16 +60,16 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { tableInfo, err := info.TableByName(model.NewCIStr("test"), model.NewCIStr("\"t\"")) c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) table := utils.Table{ - Schema: tableInfo.Meta(), - Db: dbInfo, + Info: tableInfo.Meta(), + Db: dbInfo, } // Get the next AutoIncID idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false, autoid.RowIDAllocType) - globalAutoID, err := idAlloc.NextGlobalAutoID(table.Schema.ID) + globalAutoID, err := idAlloc.NextGlobalAutoID(table.Info.ID) c.Assert(err, IsNil, Commentf("Error allocate next auto id")) c.Assert(autoIncID, Equals, uint64(globalAutoID)) // Alter AutoIncID to the next AutoIncID + 100 - table.Schema.AutoIncID = globalAutoID + 100 + table.Info.AutoIncID = globalAutoID + 100 db, err := NewDB(s.mock.Storage) c.Assert(err, IsNil, Commentf("Error create DB")) tk.MustExec("drop database if exists test;") @@ -92,3 +92,39 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err)) c.Assert(autoIncID, Equals, uint64(globalAutoID+100)) } + +func (s *testRestoreSchemaSuite) TestFilterDDLJobs(c *C) { + tk := testkit.NewTestKit(c, s.mock.Storage) + tk.MustExec("CREATE DATABASE IF NOT EXISTS test_db;") + tk.MustExec("CREATE TABLE IF NOT EXISTS test_db.test_table (c1 INT);") + lastTs, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get last ts: %s", err)) + tk.MustExec("RENAME TABLE test_db.test_table to test_db.test_table1;") + tk.MustExec("DROP TABLE test_db.test_table1;") + tk.MustExec("DROP DATABASE test_db;") + tk.MustExec("CREATE DATABASE test_db;") + tk.MustExec("USE test_db;") + tk.MustExec("CREATE TABLE test_table1 (c2 CHAR(255));") + tk.MustExec("RENAME TABLE test_table1 to test_table;") + tk.MustExec("TRUNCATE TABLE test_table;") + + ts, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get ts: %s", err)) + allDDLJobs, err := backup.GetBackupDDLJobs(s.mock.Domain, lastTs, ts) + c.Assert(err, IsNil, Commentf("Error get ddl jobs: %s", err)) + infoSchema, err := s.mock.Domain.GetSnapshotInfoSchema(ts) + c.Assert(err, IsNil, Commentf("Error get snapshot info schema: %s", err)) + dbInfo, ok := infoSchema.SchemaByName(model.NewCIStr("test_db")) + c.Assert(ok, IsTrue, Commentf("DB info not exist")) + tableInfo, err := infoSchema.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_table")) + c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) + tables := []*utils.Table{{ + Db: dbInfo, + Info: tableInfo.Meta(), + }} + ddlJobs := FilterDDLJobs(allDDLJobs, tables) + for _, job := range ddlJobs { + c.Logf("get ddl job: %s", job.Query) + } + c.Assert(len(ddlJobs), Equals, 7) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 3248fdd0d..378e256c6 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -3,12 +3,14 @@ package restore import ( "bytes" "context" + "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" ) @@ -103,8 +105,18 @@ SplitRegions: } for regionID, keys := range splitKeyMap { var newRegions []*RegionInfo - newRegions, err = rs.splitAndScatterRegions(ctx, regionMap[regionID], keys) + region := regionMap[regionID] + newRegions, err = rs.splitAndScatterRegions(ctx, region, keys) if err != nil { + if strings.Contains(err.Error(), "no valid key") { + for _, key := range keys { + log.Error("no valid key", + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey), + zap.Binary("key", codec.EncodeBytes([]byte{}, key))) + } + return errors.Trace(err) + } interval = 2 * interval if interval > SplitMaxRetryInterval { interval = SplitMaxRetryInterval @@ -115,6 +127,7 @@ SplitRegions: } continue SplitRegions } + log.Debug("split regions", zap.Stringer("region", region.Region), zap.ByteStrings("keys", keys)) scatterRegions = append(scatterRegions, newRegions...) onSplit(keys) } @@ -250,7 +263,7 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) } for _, rg := range ranges { - checkKeys = append(checkKeys, rg.EndKey) + checkKeys = append(checkKeys, truncateRowKey(rg.EndKey)) } for _, key := range checkKeys { if region := needSplit(key, regions); region != nil { @@ -259,7 +272,10 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI splitKeys = make([][]byte, 0, 1) } splitKeyMap[region.Region.GetId()] = append(splitKeys, key) - log.Debug("get key for split region", zap.Binary("key", key), zap.Stringer("region", region.Region)) + log.Debug("get key for split region", + zap.Binary("key", key), + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey)) } } return splitKeyMap @@ -285,6 +301,21 @@ func needSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { return nil } +var ( + tablePrefix = []byte{'t'} + idLen = 8 + recordPrefix = []byte("_r") +) + +func truncateRowKey(key []byte) []byte { + if bytes.HasPrefix(key, tablePrefix) && + len(key) > tablecodec.RecordRowKeyLen && + bytes.HasPrefix(key[len(tablePrefix)+idLen:], recordPrefix) { + return key[:tablecodec.RecordRowKeyLen] + } + return key +} + func beforeEnd(key []byte, end []byte) bool { return bytes.Compare(key, end) < 0 || len(end) == 0 } diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 509c4cfa0..3ace5b8c8 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -280,7 +280,7 @@ func validateRegions(regions map[uint64]*RegionInfo) bool { return false } FindRegion: - for i := 1; i < 12; i++ { + for i := 1; i < len(keys); i++ { for _, region := range regions { startKey := []byte(keys[i-1]) if len(startKey) != 0 { @@ -299,3 +299,26 @@ FindRegion: } return true } + +func (s *testRestoreUtilSuite) TestNeedSplit(c *C) { + regions := []*RegionInfo{ + { + Region: &metapb.Region{ + StartKey: codec.EncodeBytes([]byte{}, []byte("b")), + EndKey: codec.EncodeBytes([]byte{}, []byte("d")), + }, + }, + } + // Out of region + c.Assert(needSplit([]byte("a"), regions), IsNil) + // Region start key + c.Assert(needSplit([]byte("b"), regions), IsNil) + // In region + region := needSplit([]byte("c"), regions) + c.Assert(bytes.Compare(region.Region.GetStartKey(), codec.EncodeBytes([]byte{}, []byte("b"))), Equals, 0) + c.Assert(bytes.Compare(region.Region.GetEndKey(), codec.EncodeBytes([]byte{}, []byte("d"))), Equals, 0) + // Region end key + c.Assert(needSplit([]byte("d"), regions), IsNil) + // Out of region + c.Assert(needSplit([]byte("e"), regions), IsNil) +} diff --git a/pkg/task/backup.go b/pkg/task/backup.go index b9613cd56..240754517 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -6,8 +6,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" + "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/spf13/pflag" + "go.uber.org/zap" "github.com/pingcap/br/pkg/backup" "github.com/pingcap/br/pkg/storage" @@ -98,6 +100,19 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { return err } + ddlJobs := make([]*model.Job, 0) + if cfg.LastBackupTS > 0 { + err = backup.CheckGCSafepoint(ctx, mgr.GetPDClient(), cfg.LastBackupTS) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + ddlJobs, err = backup.GetBackupDDLJobs(mgr.GetDomain(), cfg.LastBackupTS, backupTS) + if err != nil { + return err + } + } + // The number of regions need to backup approximateRegions := 0 for _, r := range ranges { @@ -139,17 +154,25 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { return err } - valid, err := client.FastChecksum() - if err != nil { - return err - } - if !valid { - log.Error("backup FastChecksum mismatch!") + if cfg.LastBackupTS == 0 { + var valid bool + valid, err = client.FastChecksum() + if err != nil { + return err + } + if !valid { + log.Error("backup FastChecksum mismatch!") + return errors.Errorf("mismatched checksum") + } + + } else { + // Since we don't support checksum for incremental data, fast checksum should be skipped. + log.Info("Skip fast checksum in incremental backup") } // Checksum has finished close(updateCh) - err = client.SaveBackupMeta(ctx) + err = client.SaveBackupMeta(ctx, ddlJobs) if err != nil { return err } diff --git a/pkg/task/restore.go b/pkg/task/restore.go index a56a1d6da..599dcb478 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -103,6 +103,14 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { return err } } + ddlJobs := restore.FilterDDLJobs(client.GetDDLJobs(), tables) + if err != nil { + return err + } + err = client.ExecDDLs(ddlJobs) + if err != nil { + return errors.Trace(err) + } rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) if err != nil { return err @@ -178,12 +186,12 @@ func filterRestoreFiles( for _, db := range client.GetDatabases() { createdDatabase := false for _, table := range db.Tables { - if !tableFilter.Match(&filter.Table{Schema: db.Schema.Name.O, Name: table.Schema.Name.O}) { + if !tableFilter.Match(&filter.Table{Schema: db.Info.Name.O, Name: table.Info.Name.O}) { continue } if !createdDatabase { - if err = client.CreateDatabase(db.Schema); err != nil { + if err = client.CreateDatabase(db.Info); err != nil { return nil, nil, err } createdDatabase = true diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index 67d28132f..0afe98e5b 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -24,7 +24,7 @@ const ( // Table wraps the schema and files of a table. type Table struct { Db *model.DBInfo - Schema *model.TableInfo + Info *model.TableInfo Crc64Xor uint64 TotalKvs uint64 TotalBytes uint64 @@ -33,14 +33,14 @@ type Table struct { // Database wraps the schema and tables of a database. type Database struct { - Schema *model.DBInfo + Info *model.DBInfo Tables []*Table } // GetTable returns a table of the database by name. func (db *Database) GetTable(name string) *Table { for _, table := range db.Tables { - if table.Schema.Name.String() == name { + if table.Info.Name.String() == name { return table } } @@ -61,7 +61,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { db, ok := databases[dbInfo.Name.String()] if !ok { db = &Database{ - Schema: dbInfo, + Info: dbInfo, Tables: make([]*Table, 0), } databases[dbInfo.Name.String()] = db @@ -94,7 +94,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { } table := &Table{ Db: dbInfo, - Schema: tableInfo, + Info: tableInfo, Crc64Xor: schema.Crc64Xor, TotalKvs: schema.TotalKvs, TotalBytes: schema.TotalBytes, diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 1e40415d7..e50ef1ecf 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -36,7 +36,7 @@ done # backup full echo "backup start..." -br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) @@ -50,7 +50,7 @@ run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." -br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR +run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') diff --git a/tests/br_incremental/run.sh b/tests/br_incremental/run.sh index bb6a42efb..b6a6061de 100755 --- a/tests/br_incremental/run.sh +++ b/tests/br_incremental/run.sh @@ -20,55 +20,38 @@ TABLE="usertable" run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB - -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +row_count_ori_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # full backup echo "full backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 - -run_sql "DROP TABLE $DB.$TABLE;" - -# full restore -echo "full restore start..." -run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -if [ "$row_count_ori" -ne "$row_count_new" ];then - echo "TEST: [$TEST_NAME] full br failed!" - exit 1 -fi +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 go-ycsb run mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) - -# clean up data -rm -rf $TEST_DIR/$DB - # incremental backup echo "incremental backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts - -start_ts=$(br validate decode --field="start-version" -s "local://$TEST_DIR/$DB" | tail -n1) -end_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts +row_count_ori_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -echo "start version: $start_ts, end version: $end_ts" +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${row_count_ori_full}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi # incremental restore echo "incremental restore start..." -run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -echo "[original] row count: $row_count_ori, [after br] row count: $row_count_new" - -if [ "$row_count_ori" -eq "$row_count_new" ];then - echo "TEST: [$TEST_NAME] successed!" -else - echo "TEST: [$TEST_NAME] failed!" +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${row_count_ori_inc}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" exit 1 fi diff --git a/tests/br_incremental_ddl/run.sh b/tests/br_incremental_ddl/run.sh new file mode 100755 index 000000000..d9a88709b --- /dev/null +++ b/tests/br_incremental_ddl/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('$i');" +done +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('1');" + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_index/run.sh b/tests/br_incremental_index/run.sh new file mode 100755 index 000000000..f4b4b9de7 --- /dev/null +++ b/tests/br_incremental_index/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE} VALUES ($i);" +done + +# full backup +echo "backup full start..." +run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)" & +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +wait +# run ddls +echo "run ddls..." +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c2 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c3 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} DROP COLUMN c3;"; +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE} VALUES (1, 1);" +row_count_insert=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check insert count +if [ "${row_count_insert}" != "$(expr $row_count_inc + 1)" ];then + echo "TEST: [$TEST_NAME] insert record fail on database $DB" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/config/tikv.toml b/tests/config/tikv.toml index e93a16597..73323d878 100644 --- a/tests/config/tikv.toml +++ b/tests/config/tikv.toml @@ -11,3 +11,4 @@ max-open-files = 4096 [raftstore] # true (default value) for high reliability, this can prevent data loss when power failure. sync-log = false +capacity = "10GB" \ No newline at end of file diff --git a/tests/run.sh b/tests/run.sh index 3cedc7093..a4edb762a 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -24,6 +24,7 @@ TIDB_ADDR="127.0.0.1:4000" TIDB_STATUS_ADDR="127.0.0.1:10080" # actaul tikv_addr are TIKV_ADDR${i} TIKV_ADDR="127.0.0.1:2016" +TIKV_STATUS_ADDR="127.0.0.1:2018" TIKV_COUNT=4 stop_services() { @@ -55,6 +56,7 @@ start_services() { bin/tikv-server \ --pd "$PD_ADDR" \ -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ --log-file "$TEST_DIR/tikv${i}.log" \ -C "tests/config/tikv.toml" \ -s "$TEST_DIR/tikv${i}" & From 4492b8fc8a60bb9b1e275bc95b906c85a477e0d4 Mon Sep 17 00:00:00 2001 From: kennytm Date: Sat, 22 Feb 2020 17:46:38 +0800 Subject: [PATCH 4/5] Reduce TiDB dependencies (#158) * utils: exclude mock_cluster outside of unit test * utils: remove unused ResultSetToStringSlice() * *: abstract away dependencies of tidb/session into a Glue interface * *: fix hound lint * util,mock: move utils.MockCluster to mock.Cluster * restore: fix test build failure Co-authored-by: 3pointer --- cmd/backup.go | 2 +- cmd/cmd.go | 2 + cmd/restore.go | 2 +- pkg/backup/safe_point_test.go | 6 +-- pkg/backup/schema_test.go | 10 ++--- pkg/checksum/executor_test.go | 7 ++-- pkg/conn/conn.go | 7 ++-- pkg/glue/glue.go | 24 +++++++++++ pkg/gluetidb/glue.go | 65 +++++++++++++++++++++++++++++ pkg/{utils => mock}/mock_cluster.go | 16 +++---- pkg/mock/mock_cluster_test.go | 27 ++++++++++++ pkg/restore/backoff_test.go | 5 ++- pkg/restore/client.go | 4 +- pkg/restore/client_test.go | 10 +++-- pkg/restore/db.go | 36 +++++++--------- pkg/restore/db_test.go | 8 ++-- pkg/restore/util.go | 9 ---- pkg/task/backup.go | 5 ++- pkg/task/common.go | 5 ++- pkg/task/restore.go | 7 ++-- pkg/utils/mock_cluster_test.go | 27 ------------ pkg/utils/schema.go | 33 --------------- 22 files changed, 186 insertions(+), 131 deletions(-) create mode 100644 pkg/glue/glue.go create mode 100644 pkg/gluetidb/glue.go rename pkg/{utils => mock}/mock_cluster.go (93%) create mode 100644 pkg/mock/mock_cluster_test.go delete mode 100644 pkg/utils/mock_cluster_test.go diff --git a/cmd/backup.go b/cmd/backup.go index 39aa4fd28..8ae45270c 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -15,7 +15,7 @@ func runBackupCommand(command *cobra.Command, cmdName string) error { if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return task.RunBackup(GetDefaultContext(), cmdName, &cfg) + return task.RunBackup(GetDefaultContext(), tidbGlue, cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. diff --git a/cmd/cmd.go b/cmd/cmd.go index fdadaa6f8..83355e5dd 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" + "github.com/pingcap/br/pkg/gluetidb" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -21,6 +22,7 @@ var ( initOnce = sync.Once{} defaultContext context.Context hasLogFile uint64 + tidbGlue = gluetidb.Glue{} ) const ( diff --git a/cmd/restore.go b/cmd/restore.go index 2dfec9846..9f7c47bdb 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -14,7 +14,7 @@ func runRestoreCommand(command *cobra.Command, cmdName string) error { if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return task.RunRestore(GetDefaultContext(), cmdName, &cfg) + return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) } // NewRestoreCommand returns a restore subcommand diff --git a/pkg/backup/safe_point_test.go b/pkg/backup/safe_point_test.go index 1bea9e210..5a4939191 100644 --- a/pkg/backup/safe_point_test.go +++ b/pkg/backup/safe_point_test.go @@ -8,18 +8,18 @@ import ( pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testSaftPointSuite{}) type testSaftPointSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testSaftPointSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index f657310bf..a1514ba4a 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -9,18 +9,18 @@ import ( "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testBackupSchemaSuite{}) type testBackupSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackupSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -77,7 +77,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) @@ -97,7 +97,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 2) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index 3e6d8078c..e9db6267b 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -12,6 +12,7 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) @@ -22,12 +23,12 @@ func TestT(t *testing.T) { var _ = Suite(&testChecksumSuite{}) type testChecksumSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testChecksumSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -61,7 +62,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { c.Assert(len(exe1.reqs), Equals, 1) resp, err := exe1.Execute(context.TODO(), s.mock.Storage.GetClient(), func() {}) c.Assert(err, IsNil) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(resp.Checksum, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalKvs, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalBytes, Equals, uint64(1), Commentf("%v", resp)) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 3695a2a0c..6ef1e87eb 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -20,13 +20,14 @@ import ( "github.com/pingcap/log" pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/keepalive" + + "github.com/pingcap/br/pkg/glue" ) const ( @@ -87,7 +88,7 @@ func pdRequest( } // NewMgr creates a new Mgr. -func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, error) { +func NewMgr(ctx context.Context, g glue.Glue, pdAddrs string, storage tikv.Storage) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") failure := errors.Errorf("pd address (%s) has wrong format", pdAddrs) @@ -130,7 +131,7 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er return nil, errors.Errorf("tikv cluster not health %+v", stores) } - dom, err := session.BootstrapSession(storage) + dom, err := g.BootstrapSession(storage) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go new file mode 100644 index 000000000..bc6fb214e --- /dev/null +++ b/pkg/glue/glue.go @@ -0,0 +1,24 @@ +package glue + +import ( + "context" + + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" +) + +// Glue is an abstraction of TiDB function calls used in BR. +type Glue interface { + BootstrapSession(store kv.Storage) (*domain.Domain, error) + CreateSession(store kv.Storage) (Session, error) +} + +// Session is an abstraction of the session.Session interface. +type Session interface { + Execute(ctx context.Context, sql string) error + ShowCreateDatabase(schema *model.DBInfo) (string, error) + ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) + Close() +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go new file mode 100644 index 000000000..6b9f2f667 --- /dev/null +++ b/pkg/gluetidb/glue.go @@ -0,0 +1,65 @@ +package gluetidb + +import ( + "bytes" + "context" + + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/session" + + "github.com/pingcap/br/pkg/glue" +) + +// Glue is an implementation of glue.Glue using a new TiDB session. +type Glue struct{} + +type tidbSession struct { + se session.Session +} + +// BootstrapSession implements glue.Glue +func (Glue) BootstrapSession(store kv.Storage) (*domain.Domain, error) { + return session.BootstrapSession(store) +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + se, err := session.CreateSession(store) + if err != nil { + return nil, err + } + return &tidbSession{se: se}, nil +} + +// Execute implements glue.Session +func (gs *tidbSession) Execute(ctx context.Context, sql string) error { + _, err := gs.se.Execute(ctx, sql) + return err +} + +// ShowCreateDatabase implements glue.Session +func (gs *tidbSession) ShowCreateDatabase(schema *model.DBInfo) (string, error) { + var buf bytes.Buffer + if err := executor.ConstructResultOfShowCreateDatabase(gs.se, schema, true, &buf); err != nil { + return "", err + } + return buf.String(), nil +} + +// ShowCreateTable implements glue.Session +func (gs *tidbSession) ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) { + var buf bytes.Buffer + if err := executor.ConstructResultOfShowCreateTable(gs.se, table, allocator, &buf); err != nil { + return "", err + } + return buf.String(), nil +} + +// Close implements glue.Session +func (gs *tidbSession) Close() { + gs.se.Close() +} diff --git a/pkg/utils/mock_cluster.go b/pkg/mock/mock_cluster.go similarity index 93% rename from pkg/utils/mock_cluster.go rename to pkg/mock/mock_cluster.go index dc7b87c3c..aee9666ed 100644 --- a/pkg/utils/mock_cluster.go +++ b/pkg/mock/mock_cluster.go @@ -1,4 +1,4 @@ -package utils +package mock import ( "database/sql" @@ -28,8 +28,8 @@ import ( var pprofOnce sync.Once -// MockCluster is mock tidb cluster, includes tikv and pd. -type MockCluster struct { +// Cluster is mock tidb cluster, includes tikv and pd. +type Cluster struct { *server.Server *mocktikv.Cluster mocktikv.MVCCStore @@ -40,8 +40,8 @@ type MockCluster struct { PDClient pd.Client } -// NewMockCluster create a new mock cluster. -func NewMockCluster() (*MockCluster, error) { +// NewCluster create a new mock cluster. +func NewCluster() (*Cluster, error) { pprofOnce.Do(func() { go func() { // Make sure pprof is registered. @@ -72,7 +72,7 @@ func NewMockCluster() (*MockCluster, error) { if err != nil { return nil, err } - return &MockCluster{ + return &Cluster{ Cluster: cluster, MVCCStore: mvccStore, Storage: storage, @@ -82,7 +82,7 @@ func NewMockCluster() (*MockCluster, error) { } // Start runs a mock cluster -func (mock *MockCluster) Start() error { +func (mock *Cluster) Start() error { statusURL, err := url.Parse(tempurl.Alloc()) if err != nil { return err @@ -124,7 +124,7 @@ func (mock *MockCluster) Start() error { } // Stop stops a mock cluster -func (mock *MockCluster) Stop() { +func (mock *Cluster) Stop() { if mock.Domain != nil { mock.Domain.Close() } diff --git a/pkg/mock/mock_cluster_test.go b/pkg/mock/mock_cluster_test.go new file mode 100644 index 000000000..e7ffc6e85 --- /dev/null +++ b/pkg/mock/mock_cluster_test.go @@ -0,0 +1,27 @@ +package mock + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testClusterSuite{}) + +type testClusterSuite struct { + mock *Cluster +} + +func (s *testClusterSuite) SetUpSuite(c *C) { + var err error + s.mock, err = NewCluster() + c.Assert(err, IsNil) +} + +func (s *testClusterSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testClusterSuite) TestSmoke(c *C) { + c.Assert(s.mock.Start(), IsNil) + s.mock.Stop() +} diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go index 537f0980c..73161a9f6 100644 --- a/pkg/restore/backoff_test.go +++ b/pkg/restore/backoff_test.go @@ -7,18 +7,19 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testBackofferSuite{}) type testBackofferSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackofferSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } diff --git a/pkg/restore/client.go b/pkg/restore/client.go index f45b3d510..a06617084 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -53,11 +54,12 @@ type Client struct { // NewRestoreClient returns a new RestoreClient func NewRestoreClient( ctx context.Context, + g glue.Glue, pdClient pd.Client, store kv.Storage, ) (*Client, error) { ctx, cancel := context.WithCancel(ctx) - db, err := NewDB(store) + db, err := NewDB(g, store) if err != nil { cancel() return nil, errors.Trace(err) diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 3d608b3b9..b67bbcfd7 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -12,18 +12,20 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreClientSuite{}) type testRestoreClientSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreClientSuite) SetUpTest(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -36,7 +38,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() @@ -93,7 +95,7 @@ func (s *testRestoreClientSuite) TestIsOnline(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() diff --git a/pkg/restore/db.go b/pkg/restore/db.go index 8c09af16f..22a1a4794 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -1,7 +1,6 @@ package restore import ( - "bytes" "context" "fmt" "sort" @@ -10,27 +9,26 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/session" "go.uber.org/zap" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/utils" ) // DB is a TiDB instance, not thread-safe. type DB struct { - se session.Session + se glue.Session } // NewDB returns a new DB -func NewDB(store kv.Storage) (*DB, error) { - se, err := session.CreateSession(store) +func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { + se, err := g.CreateSession(store) if err != nil { return nil, errors.Trace(err) } // Set SQL mode to None for avoiding SQL compatibility problem - _, err = se.Execute(context.Background(), "set @@sql_mode=''") + err = se.Execute(context.Background(), "set @@sql_mode=''") if err != nil { return nil, errors.Trace(err) } @@ -44,7 +42,7 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { var err error if ddlJob.BinlogInfo.TableInfo != nil { switchDbSQL := fmt.Sprintf("use %s;", ddlJob.SchemaName) - _, err = db.se.Execute(ctx, switchDbSQL) + err = db.se.Execute(ctx, switchDbSQL) if err != nil { log.Error("switch db failed", zap.String("query", switchDbSQL), @@ -53,7 +51,7 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { return errors.Trace(err) } } - _, err = db.se.Execute(ctx, ddlJob.Query) + err = db.se.Execute(ctx, ddlJob.Query) if err != nil { log.Error("execute ddl query failed", zap.String("query", ddlJob.Query), @@ -66,14 +64,12 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { - var buf bytes.Buffer - err := executor.ConstructResultOfShowCreateDatabase(db.se, schema, true, &buf) + createSQL, err := db.se.ShowCreateDatabase(schema) if err != nil { log.Error("build create database SQL failed", zap.Stringer("db", schema.Name), zap.Error(err)) return errors.Trace(err) } - createSQL := buf.String() - _, err = db.se.Execute(ctx, createSQL) + err = db.se.Execute(ctx, createSQL) if err != nil { log.Error("create database failed", zap.String("query", createSQL), zap.Error(err)) } @@ -82,9 +78,8 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { - var buf bytes.Buffer schema := table.Info - err := executor.ConstructResultOfShowCreateTable(db.se, schema, newIDAllocator(schema.AutoIncID), &buf) + createSQL, err := db.se.ShowCreateTable(schema, newIDAllocator(schema.AutoIncID)) if err != nil { log.Error( "build create table SQL failed", @@ -93,8 +88,8 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { zap.Error(err)) return errors.Trace(err) } - switchDbSQL := fmt.Sprintf("use %s;", table.Db.Name) - _, err = db.se.Execute(ctx, switchDbSQL) + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) + err = db.se.Execute(ctx, switchDbSQL) if err != nil { log.Error("switch db failed", zap.String("SQL", switchDbSQL), @@ -102,13 +97,12 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { zap.Error(err)) return errors.Trace(err) } - createSQL := buf.String() // Insert `IF NOT EXISTS` statement to skip the created tables words := strings.SplitN(createSQL, " ", 3) if len(words) > 2 && strings.ToUpper(words[0]) == "CREATE" && strings.ToUpper(words[1]) == "TABLE" { createSQL = "CREATE TABLE IF NOT EXISTS " + words[2] } - _, err = db.se.Execute(ctx, createSQL) + err = db.se.Execute(ctx, createSQL) if err != nil { log.Error("create table failed", zap.String("SQL", createSQL), @@ -119,9 +113,9 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { } alterAutoIncIDSQL := fmt.Sprintf( "alter table %s auto_increment = %d", - escapeTableName(schema.Name), + utils.EncloseName(schema.Name.O), schema.AutoIncID) - _, err = db.se.Execute(ctx, alterAutoIncIDSQL) + err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", zap.String("query", alterAutoIncIDSQL), diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 0151b4da6..b1e9e947c 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -13,18 +13,20 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreSchemaSuite{}) type testRestoreSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) c.Assert(s.mock.Start(), IsNil) } @@ -70,7 +72,7 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { c.Assert(autoIncID, Equals, uint64(globalAutoID)) // Alter AutoIncID to the next AutoIncID + 100 table.Info.AutoIncID = globalAutoID + 100 - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil, Commentf("Error create DB")) tk.MustExec("drop database if exists test;") // Test empty collate value diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 63ee92969..64ccfab19 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -324,12 +324,3 @@ func encodeKeyPrefix(key []byte) []byte { encodedPrefix = append(encodedPrefix, codec.EncodeBytes([]byte{}, key[:len(key)-ungroupedLen])...) return append(encodedPrefix[:len(encodedPrefix)-9], key[len(key)-ungroupedLen:]...) } - -// escape the identifier for pretty-printing. -// For instance, the identifier "foo `bar`" will become "`foo ``bar```". -// The sqlMode controls whether to escape with backquotes (`) or double quotes -// (`"`) depending on whether mysql.ModeANSIQuotes is enabled. -func escapeTableName(cis model.CIStr) string { - quote := "`" - return quote + strings.Replace(cis.O, quote, quote+quote, -1) + quote -} diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 240754517..ead4c2351 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -61,7 +62,7 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunBackup starts a backup task inside the current goroutine. -func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { +func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig) error { ctx, cancel := context.WithCancel(c) defer cancel() @@ -73,7 +74,7 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { if err != nil { return err } - mgr, err := newMgr(ctx, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD) if err != nil { return err } diff --git a/pkg/task/common.go b/pkg/task/common.go index 2433d94b9..c3f866492 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/pflag" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/utils" ) @@ -178,7 +179,7 @@ func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { } // newMgr creates a new mgr at the given PD address. -func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { +func newMgr(ctx context.Context, g glue.Glue, pds []string) (*conn.Mgr, error) { pdAddress := strings.Join(pds, ",") if len(pdAddress) == 0 { return nil, errors.New("pd address can not be empty") @@ -189,7 +190,7 @@ func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { if err != nil { return nil, err } - return conn.NewMgr(ctx, pdAddress, store.(tikv.Storage)) + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage)) } // GetStorage gets the storage backend from the config. diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 599dcb478..bb00d189d 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -11,6 +11,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/restore" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -55,17 +56,17 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunRestore starts a restore task inside the current goroutine. -func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { +func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { ctx, cancel := context.WithCancel(c) defer cancel() - mgr, err := newMgr(ctx, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD) if err != nil { return err } defer mgr.Close() - client, err := restore.NewRestoreClient(ctx, mgr.GetPDClient(), mgr.GetTiKV()) + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV()) if err != nil { return err } diff --git a/pkg/utils/mock_cluster_test.go b/pkg/utils/mock_cluster_test.go deleted file mode 100644 index 42cacae9c..000000000 --- a/pkg/utils/mock_cluster_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package utils - -import ( - . "github.com/pingcap/check" - "github.com/pingcap/tidb/util/testleak" -) - -var _ = Suite(&testMockClusterSuite{}) - -type testMockClusterSuite struct { - mock *MockCluster -} - -func (s *testMockClusterSuite) SetUpSuite(c *C) { - var err error - s.mock, err = NewMockCluster() - c.Assert(err, IsNil) -} - -func (s *testMockClusterSuite) TearDownSuite(c *C) { - testleak.AfterTest(c)() -} - -func (s *testMockClusterSuite) TestSmoke(c *C) { - c.Assert(s.mock.Start(), IsNil) - s.mock.Stop() -} diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index 0afe98e5b..e1aec1225 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -2,16 +2,13 @@ package utils import ( "bytes" - "context" "encoding/json" "strings" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/util/sqlexec" ) const ( @@ -106,36 +103,6 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { return databases, nil } -// ResultSetToStringSlice changes the RecordSet to [][]string. port from tidb -func ResultSetToStringSlice(ctx context.Context, s session.Session, rs sqlexec.RecordSet) ([][]string, error) { - rows, err := session.GetRows4Test(ctx, s, rs) - if err != nil { - return nil, err - } - err = rs.Close() - if err != nil { - return nil, err - } - sRows := make([][]string, len(rows)) - for i := range rows { - row := rows[i] - iRow := make([]string, row.Len()) - for j := 0; j < row.Len(); j++ { - if row.IsNull(j) { - iRow[j] = "" - } else { - d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) - iRow[j], err = d.ToString() - if err != nil { - return nil, err - } - } - } - sRows[i] = iRow - } - return sRows, nil -} - // EncloseName formats name in sql func EncloseName(name string) string { return "`" + strings.ReplaceAll(name, "`", "``") + "`" From 3c9d42fdab5f07fb65360ab545613f90531b7eb0 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Sun, 23 Feb 2020 17:07:58 +0800 Subject: [PATCH 5/5] go.mod: update tidb (#168) Signed-off-by: Neil Shen --- go.mod | 15 ++++++------ go.sum | 77 ++++++++++++++++++++++------------------------------------ 2 files changed, 36 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 7d9e6b77e..02aaa51ef 100644 --- a/go.mod +++ b/go.mod @@ -18,15 +18,15 @@ require ( github.com/mattn/go-runewidth v0.0.7 // indirect github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 + github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 - github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d - github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d - github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938 + github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 + github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd + github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2 + github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 + github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e github.com/pingcap/tidb-tools v4.0.0-beta+incompatible - github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc + github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 @@ -35,7 +35,6 @@ require ( go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 ) diff --git a/go.sum b/go.sum index 24f73f5c9..a58f8bf55 100644 --- a/go.sum +++ b/go.sum @@ -117,7 +117,6 @@ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZp github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -136,11 +135,12 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -180,7 +180,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= @@ -221,7 +220,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -242,7 +240,6 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -280,12 +277,14 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7fJc= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= -github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= -github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -295,52 +294,52 @@ github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0h github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 h1:6DMbRqPI1qzQ8N1xc3+nKY8IxSACd9VqQKkRVvbyoIg= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d h1:rCmRK0lCRrHMUbS99BKFYhK9YxJDNw0xB033cQbYo0s= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 h1:RUxQExD5yubAjWGnw8kcxfO9abbiVHIE1rbuCyQCWDE= -github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 h1:lsoIoCoXMpcHvW6jHcqP/prA4I6duAp1DVyG2ULz4bM= -github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 h1:DhGKu4ACa5v0Z70J1NWrc9ti+OqihhxmyzsK7YDTpVQ= +github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d h1:4QwSJRxmBjTB9ssJNWg2f2bDm5rqnHCUUjMh4N1QOOY= -github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= -github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d h1:Ui80aiLTyd0EZD56o2tjFRYpHfhazBjtBdKeR8UoTFY= -github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d/go.mod h1:CML+b1JVjN+VbDijaIcUSmuPgpDjXEY7UiOx5yDP8eE= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2 h1:DsymejjOFdljM1q0BJ8yBZUYQ7718+7JTO046Rqd/3k= +github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 h1:FzLErYtcXnSxtC469OuVDlgBbh0trJZzNxw0mNKzyls= +github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497/go.mod h1:cfT/xu4Zz+Tkq95QrLgEBZ9ikRcgzy4alHqqoaTftqI= github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b h1:EEyo/SCRswLGuSk+7SB86Ak1p8bS6HL1Mi4Dhyuv6zg= github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938 h1:Jt9ENNiS1ZNC9jV2Pd3wdegXQYFq3U6z1xFlzZNMNC8= -github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938/go.mod h1:DlMN+GGqC/WpREnzcH8xgxbXnntjybLhT84AbUSvMVM= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd h1:k7CIHMFVKjHsda3PKkiN4zv++NEnexlUwiJEhryWpG0= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e h1:HPSJdnkI6mt0qEIbSkJzVsq99929Ki5VblkJMmlqhI0= +github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e/go.mod h1:zzO/kysmwHMkr0caH2NmuSAKLdsySXKDQGTCYrb7Gx8= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= github.com/pingcap/tidb-tools v4.0.0-beta+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= -github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc h1:IOKsFObJ4GZwAgyuhdJKg3oKCzWcoBFfHhpq2TOn5H0= -github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 h1:aJPXrT1u4VfUSGFA2oQVwl4pOXzqe+YI6wed01cjDH4= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -359,7 +358,6 @@ github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49 github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -401,9 +399,7 @@ github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1 github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= @@ -415,10 +411,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -449,13 +443,11 @@ go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -477,13 +469,14 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -500,8 +493,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -531,8 +522,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a h1:mIzbOulag9/gXacgxKlFVwpCOWSfBT3/pDyyCwGA9as= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -570,13 +559,11 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= -golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177 h1:E2vxBajJgSA3TcJhDGTh/kP3VnsvXKl9jSijv+h7svQ= +golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -591,7 +578,6 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -601,14 +587,11 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -617,7 +600,6 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= @@ -629,7 +611,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=