diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 000000000..674895cd1 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,10 @@ +codecov: + require_ci_to_pass: yes + +coverage: + status: + project: + default: + # Allow the coverage to drop by 3% + threshold: 3% + patch: off diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 000000000..3e4e48b0b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.gitignore b/.gitignore index e104ab6e8..e61a56bde 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ backupmeta *.ngo *.coverprofile coverage.txt +docker/data/ +docker/logs/ diff --git a/.golangci.yml b/.golangci.yml index 969cac759..1b025678e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,8 @@ issues: text: "Potential HTTP request made with variable url" linters: - gosec - - path: .go - text: "Use of weak random number generator" + # TODO Remove it. + - path: split_client.go + text: "SA1019:" linters: - - gosec + - staticcheck diff --git a/LICENSE.md b/LICENSE.md index 675c2ec95..4eedc0116 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -186,7 +186,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -198,4 +198,4 @@ Apache License distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/Makefile b/Makefile index 839a27b9e..779bfdb10 100644 --- a/Makefile +++ b/Makefile @@ -12,25 +12,28 @@ LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRBuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRGitHash=$(shell git rev-parse HEAD)" LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRGitBranch=$(shell git rev-parse --abbrev-ref HEAD)" -all: check test build +ifeq ("$(WITH_RACE)", "1") + RACEFLAG = -race +endif -release: - GO111MODULE=on go build -ldflags '$(LDFLAGS)' -o bin/br +all: check test build build: - GO111MODULE=on go build -ldflags '$(LDFLAGS)' -race -o bin/br + GO111MODULE=on go build -ldflags '$(LDFLAGS)' ${RACEFLAG} -o bin/br build_for_integration_test: GO111MODULE=on go test -c -cover -covermode=count \ -coverpkg=$(BR_PKG)/... \ -o bin/br.test # build key locker - GO111MODULE=on go build -race -o bin/locker tests/br_key_locked/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/locker tests/br_key_locked/*.go # build gc - GO111MODULE=on go build -race -o bin/gc tests/br_z_gc_safepoint/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/gc tests/br_z_gc_safepoint/*.go + # build rawkv client + GO111MODULE=on go build ${RACEFLAG} -o bin/rawkv tests/br_rawkv/*.go test: - GO111MODULE=on go test -race -tags leak ./... + GO111MODULE=on go test ${RACEFLAG} -tags leak ./... testcover: GO111MODULE=on retool do overalls \ @@ -46,6 +49,7 @@ integration_test: build build_for_integration_test @which bin/pd-server @which bin/pd-ctl @which bin/go-ycsb + @which bin/minio @which bin/br tests/run.sh @@ -71,6 +75,12 @@ static: --disable interfacer \ --disable goimports \ --disable gofmt \ + --disable wsl \ + --disable funlen \ + --disable whitespace \ + --disable gocognit \ + --disable godox \ + --disable gomnd \ $$($(PACKAGE_DIRECTORIES)) lint: diff --git a/README.md b/README.md index 55444fdec..6207d98eb 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,41 @@ Notice BR supports building with Go version `Go >= 1.13` When BR is built successfully, you can find binary in the `bin` directory. +## Quick start + +```sh +# Start TiDB cluster +docker-compose -f docker-compose.yaml rm -s -v && \ +docker-compose -f docker-compose.yaml build && \ +docker-compose -f docker-compose.yaml up --remove-orphans + +# Attach to control container to run BR +docker exec -it br_control_1 bash + +# Load testing data to TiDB +go-ycsb load mysql -p workload=core \ + -p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \ + -p recordcount=100000 -p threadcount=100 + +# How many rows do we get? 100000 rows. +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" + +# Build BR and backup! +make release && \ +bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_backup.log" + +# Let's drop database. +mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" + +# Restore! +bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_restore.log" + +# How many rows do we get again? Expected to be 100000 rows. +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" +``` + ## Contributing Contributions are welcomed and greatly appreciated. See [CONTRIBUTING](./CONTRIBUTING.md) diff --git a/cmd/backup.go b/cmd/backup.go index 73ae6106f..3aed2147f 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,183 +1,40 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( - "context" - - "github.com/pingcap/errors" - "github.com/pingcap/log" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/pingcap/br/pkg/backup" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) -const ( - flagBackupTimeago = "timeago" - flagBackupRateLimit = "ratelimit" - flagBackupRateLimitUnit = "ratelimit-unit" - flagBackupConcurrency = "concurrency" - flagBackupChecksum = "checksum" - flagLastBackupTS = "lastbackupts" -) - -func defineBackupFlags(flagSet *pflag.FlagSet) { - flagSet.StringP( - flagBackupTimeago, "", "", - "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") - flagSet.Uint64P( - flagBackupRateLimit, "", 0, "The rate limit of the backup task, MB/s per node") - flagSet.Uint32P( - flagBackupConcurrency, "", 4, "The size of thread pool on each node that execute the backup task") - flagSet.BoolP(flagBackupChecksum, "", true, - "Run checksum after backup") - flagSet.Uint64P(flagLastBackupTS, "", 0, "the last time backup ts") - _ = flagSet.MarkHidden(flagLastBackupTS) - - // Test only flag. - flagSet.Uint64P( - flagBackupRateLimitUnit, "", utils.MB, "The unit of rate limit of the backup task") - _ = flagSet.MarkHidden(flagBackupRateLimitUnit) -} - -func runBackup(flagSet *pflag.FlagSet, cmdName, db, table string) error { - ctx, cancel := context.WithCancel(defaultContext) - defer cancel() - - mgr, err := GetDefaultMgr() - if err != nil { - return err - } - defer mgr.Close() - - timeago, err := flagSet.GetString(flagBackupTimeago) - if err != nil { - return err - } - - ratelimit, err := flagSet.GetUint64(flagBackupRateLimit) - if err != nil { - return err - } - ratelimitUnit, err := flagSet.GetUint64(flagBackupRateLimitUnit) - if err != nil { - return err - } - ratelimit *= ratelimitUnit - - concurrency, err := flagSet.GetUint32(flagBackupConcurrency) - if err != nil { - return err - } - if concurrency == 0 { - err = errors.New("at least one thread required") - return err - } - - checksum, err := flagSet.GetBool(flagBackupChecksum) - if err != nil { - return err - } - - lastBackupTS, err := flagSet.GetUint64(flagLastBackupTS) - if err != nil { - return nil - } - - u, err := storage.ParseBackendFromFlags(flagSet, FlagStorage) - if err != nil { - return err - } - - client, err := backup.NewBackupClient(ctx, mgr) - if err != nil { - return nil - } - - err = client.SetStorage(ctx, u) - if err != nil { - return err - } - - backupTS, err := client.GetTS(ctx, timeago) - if err != nil { +func runBackupCommand(command *cobra.Command, cmdName string) error { + cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } + return task.RunBackup(GetDefaultContext(), tidbGlue, cmdName, &cfg) +} - defer summary.Summary(cmdName) - - ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( - mgr.GetDomain(), mgr.GetTiKV(), backupTS, db, table) - if err != nil { - return err - } - - // The number of regions need to backup - approximateRegions := 0 - for _, r := range ranges { - var regionCount int - regionCount, err = mgr.GetRegionCount(ctx, r.StartKey, r.EndKey) - if err != nil { - return err - } - approximateRegions += regionCount - } - - summary.CollectInt("backup total regions", approximateRegions) - // Backup - // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( - ctx, cmdName, int64(approximateRegions), !HasLogFile()) - err = client.BackupRanges( - ctx, ranges, lastBackupTS, backupTS, ratelimit, concurrency, updateCh) - if err != nil { - return err - } - // Backup has finished - close(updateCh) - - // Checksum - backupSchemasConcurrency := backup.DefaultSchemaConcurrency - if backupSchemas.Len() < backupSchemasConcurrency { - backupSchemasConcurrency = backupSchemas.Len() - } - updateCh = utils.StartProgress( - ctx, "Checksum", int64(backupSchemas.Len()), !HasLogFile()) - backupSchemas.SetSkipChecksum(!checksum) - backupSchemas.Start( - ctx, mgr.GetTiKV(), backupTS, uint(backupSchemasConcurrency), updateCh) - - err = client.CompleteMeta(backupSchemas) - if err != nil { - return err - } - - valid, err := client.FastChecksum() - if err != nil { - return err - } - if !valid { - log.Error("backup FastChecksum failed!") - } - // Checksum has finished - close(updateCh) - - err = client.SaveBackupMeta(ctx) - if err != nil { +func runBackupRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return nil + return task.RunBackupRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. func NewBackupCommand() *cobra.Command { command := &cobra.Command{ - Use: "backup", - Short: "backup a TiDB cluster", + Use: "backup", + Short: "backup a TiDB/TiKV cluster", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err @@ -198,9 +55,10 @@ func NewBackupCommand() *cobra.Command { newFullBackupCommand(), newDbBackupCommand(), newTableBackupCommand(), + newRawBackupCommand(), ) - defineBackupFlags(command.PersistentFlags()) + task.DefineBackupFlags(command.PersistentFlags()) return command } @@ -211,7 +69,7 @@ func newFullBackupCommand() *cobra.Command { Short: "backup all database", RunE: func(command *cobra.Command, _ []string) error { // empty db/table means full backup. - return runBackup(command.Flags(), "Full backup", "", "") + return runBackupCommand(command, "Full backup") }, } return command @@ -223,19 +81,10 @@ func newDbBackupCommand() *cobra.Command { Use: "db", Short: "backup a database", RunE: func(command *cobra.Command, _ []string) error { - db, err := command.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.Errorf("empty database name is not allowed") - } - return runBackup(command.Flags(), "Database backup", db, "") + return runBackupCommand(command, "Database backup") }, } - command.Flags().StringP(flagDatabase, "", "", "backup a table in the specific db") - _ = command.MarkFlagRequired(flagDatabase) - + task.DefineDatabaseFlags(command) return command } @@ -245,26 +94,24 @@ func newTableBackupCommand() *cobra.Command { Use: "table", Short: "backup a table", RunE: func(command *cobra.Command, _ []string) error { - db, err := command.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.Errorf("empty database name is not allowed") - } - table, err := command.Flags().GetString(flagTable) - if err != nil { - return err - } - if len(table) == 0 { - return errors.Errorf("empty table name is not allowed") - } - return runBackup(command.Flags(), "Table backup", db, table) + return runBackupCommand(command, "Table backup") }, } - command.Flags().StringP(flagDatabase, "", "", "backup a table in the specific db") - command.Flags().StringP(flagTable, "t", "", "backup the specific table") - _ = command.MarkFlagRequired(flagDatabase) - _ = command.MarkFlagRequired(flagTable) + task.DefineTableFlags(command) + return command +} + +// newRawBackupCommand return a raw kv range backup subcommand. +func newRawBackupCommand() *cobra.Command { + // TODO: remove experimental tag if it's stable + command := &cobra.Command{ + Use: "raw", + Short: "(experimental) backup a raw kv range from TiKV cluster", + RunE: func(command *cobra.Command, _ []string) error { + return runBackupRawCommand(command, "Raw backup") + }, + } + + task.DefineRawBackupFlags(command) return command } diff --git a/cmd/cmd.go b/cmd/cmd.go index 468c35232..5b2801894 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,48 +1,33 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( "context" - "fmt" "net/http" "net/http/pprof" "sync" "sync/atomic" - "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/logutil" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) var ( initOnce = sync.Once{} defaultContext context.Context - pdAddress string hasLogFile uint64 - - connOnce = sync.Once{} - defaultMgr *conn.Mgr + tidbGlue = gluetidb.Glue{} ) const ( - // FlagPD is the name of url flag. - FlagPD = "pd" - // FlagCA is the name of CA flag. - FlagCA = "ca" - // FlagCert is the name of cert flag. - FlagCert = "cert" - // FlagKey is the name of key flag. - FlagKey = "key" - // FlagStorage is the name of storage flag. - FlagStorage = "storage" // FlagLogLevel is the name of log-level flag. FlagLogLevel = "log-level" // FlagLogFile is the name of log-file flag. @@ -52,9 +37,6 @@ const ( // FlagSlowLogFile is the name of slow-log-file flag. FlagSlowLogFile = "slow-log-file" - flagDatabase = "db" - flagTable = "table" - flagVersion = "version" flagVersionShort = "V" ) @@ -65,19 +47,13 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().BoolP(flagVersion, flagVersionShort, false, "Display version information about BR") cmd.SetVersionTemplate("{{printf \"%s\" .Version}}\n") - cmd.PersistentFlags().StringP(FlagPD, "u", "127.0.0.1:2379", "PD address") - cmd.PersistentFlags().String(FlagCA, "", "CA certificate path for TLS connection") - cmd.PersistentFlags().String(FlagCert, "", "Certificate path for TLS connection") - cmd.PersistentFlags().String(FlagKey, "", "Private key path for TLS connection") - cmd.PersistentFlags().StringP(FlagStorage, "s", "", - `specify the url where backup storage, eg, "local:///path/to/save"`) cmd.PersistentFlags().StringP(FlagLogLevel, "L", "info", "Set the log level") cmd.PersistentFlags().String(FlagLogFile, "", "Set the log file path. If not set, logs will output to stdout") cmd.PersistentFlags().String(FlagStatusAddr, "", "Set the HTTP listening address for the status report service. Set to empty string to disable") - storage.DefineFlags(cmd.PersistentFlags()) + task.DefineCommonFlags(cmd.PersistentFlags()) cmd.PersistentFlags().StringP(FlagSlowLogFile, "", "", "Set the slow log file path. If not set, discard slow logs") @@ -112,16 +88,20 @@ func Init(cmd *cobra.Command) (err error) { err = e return } + tidbLogCfg := logutil.LogConfig{} if len(slowLogFilename) != 0 { - slowCfg := logutil.LogConfig{SlowQueryFile: slowLogFilename} - e = logutil.InitLogger(&slowCfg) - if e != nil { - err = e - return - } + tidbLogCfg.SlowQueryFile = slowLogFilename } else { // Hack! Discard slow log by setting log level to PanicLevel logutil.SlowQueryLogger.SetLevel(logrus.PanicLevel) + // Disable annoying TiDB Log. + // TODO: some error logs outputs randomly, we need to fix them in TiDB. + tidbLogCfg.Level = "fatal" + } + e = logutil.InitLogger(&tidbLogCfg) + if e != nil { + err = e + return } // Initialize the pprof server. @@ -140,12 +120,6 @@ func Init(cmd *cobra.Command) (err error) { } } }() - // Set the PD server address. - pdAddress, e = cmd.Flags().GetString(FlagPD) - if e != nil { - err = e - return - } }) return err } @@ -155,30 +129,6 @@ func HasLogFile() bool { return atomic.LoadUint64(&hasLogFile) != uint64(0) } -// GetDefaultMgr returns the default mgr for command line usage. -func GetDefaultMgr() (*conn.Mgr, error) { - if pdAddress == "" { - return nil, errors.New("pd address can not be empty") - } - - // Lazy initialize and defaultMgr - var err error - connOnce.Do(func() { - var storage kv.Storage - storage, err = tikv.Driver{}.Open( - // Disable GC because TiDB enables GC already. - fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) - if err != nil { - return - } - defaultMgr, err = conn.NewMgr(defaultContext, pdAddress, storage.(tikv.Storage)) - }) - if err != nil { - return nil, err - } - return defaultMgr, nil -} - // SetDefaultContext sets the default context for command line usage. func SetDefaultContext(ctx context.Context) { defaultContext = ctx diff --git a/cmd/restore.go b/cmd/restore.go index eee65ba86..bc74bea84 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,40 +1,50 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( - "context" - "strings" - - "github.com/gogo/protobuf/proto" - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" - "github.com/pingcap/log" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/restore" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) -var schedulers = map[string]struct{}{ - "balance-leader-scheduler": {}, - "balance-hot-region-scheduler": {}, - "balance-region-scheduler": {}, +func runRestoreCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) +} + +func runRestoreRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreRawConfig{ + RawKvConfig: task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}, + } + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) +} + +func runRestoreTiflashReplicaCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } - "shuffle-leader-scheduler": {}, - "shuffle-region-scheduler": {}, - "shuffle-hot-region-scheduler": {}, + return task.RunRestoreTiflashReplica(GetDefaultContext(), tidbGlue, cmdName, &cfg) } // NewRestoreCommand returns a restore subcommand func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ - Use: "restore", - Short: "restore a TiKV cluster from a backup", + Use: "restore", + Short: "restore a TiDB/TiKV cluster", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err @@ -53,167 +63,20 @@ func NewRestoreCommand() *cobra.Command { newFullRestoreCommand(), newDbRestoreCommand(), newTableRestoreCommand(), + newRawRestoreCommand(), + newTiflashReplicaRestoreCommand(), ) - - command.PersistentFlags().Uint("concurrency", 128, - "The size of thread pool that execute the restore task") - command.PersistentFlags().Uint64("ratelimit", 0, - "The rate limit of the restore task, MB/s per node. Set to 0 for unlimited speed.") - command.PersistentFlags().BoolP("checksum", "", true, - "Run checksum after restore") - command.PersistentFlags().BoolP("online", "", false, - "Whether online when restore") - // TODO remove hidden flag if it's stable - _ = command.PersistentFlags().MarkHidden("online") + task.DefineRestoreFlags(command.PersistentFlags()) return command } -func runRestore(flagSet *flag.FlagSet, cmdName, dbName, tableName string) error { - ctx, cancel := context.WithCancel(GetDefaultContext()) - defer cancel() - - mgr, err := GetDefaultMgr() - if err != nil { - return err - } - defer mgr.Close() - - client, err := restore.NewRestoreClient( - ctx, mgr.GetPDClient(), mgr.GetTiKV()) - if err != nil { - return errors.Trace(err) - } - defer client.Close() - err = initRestoreClient(ctx, client, flagSet) - if err != nil { - return errors.Trace(err) - } - - files := make([]*backup.File, 0) - tables := make([]*utils.Table, 0) - - defer summary.Summary(cmdName) - - switch { - case len(dbName) == 0 && len(tableName) == 0: - // full restore - for _, db := range client.GetDatabases() { - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - for _, table := range db.Tables { - files = append(files, table.Files...) - } - tables = append(tables, db.Tables...) - } - case len(dbName) != 0 && len(tableName) == 0: - // database restore - db := client.GetDatabase(dbName) - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - for _, table := range db.Tables { - files = append(files, table.Files...) - } - tables = db.Tables - case len(dbName) != 0 && len(tableName) != 0: - // table restore - db := client.GetDatabase(dbName) - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - table := db.GetTable(tableName) - files = table.Files - tables = append(tables, table) - default: - return errors.New("must set db when table was set") - } - var newTS uint64 - if client.IsIncremental() { - newTS, err = client.GetTS(ctx) - if err != nil { - return err - } - } - summary.CollectInt("restore files", len(files)) - rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) - if err != nil { - return errors.Trace(err) - } - ranges, err := restore.ValidateFileRanges(files, rewriteRules) - if err != nil { - return err - } - summary.CollectInt("restore ranges", len(ranges)) - - // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( - ctx, - cmdName, - // Split/Scatter + Download/Ingest - int64(len(ranges)+len(files)), - !HasLogFile()) - - err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) - if err != nil { - log.Error("split regions failed", zap.Error(err)) - return errors.Trace(err) - } - - if !client.IsIncremental() { - var pdAddr string - pdAddr, err = flagSet.GetString(FlagPD) - if err != nil { - return errors.Trace(err) - } - pdAddrs := strings.Split(pdAddr, ",") - err = client.ResetTS(pdAddrs) - if err != nil { - log.Error("reset pd TS failed", zap.Error(err)) - return errors.Trace(err) - } - } - - removedSchedulers, err := RestorePrepareWork(ctx, client, mgr) - if err != nil { - return errors.Trace(err) - } - - err = client.RestoreAll(rewriteRules, updateCh) - if err != nil { - return errors.Trace(err) - } - - err = RestorePostWork(ctx, client, mgr, removedSchedulers) - if err != nil { - return errors.Trace(err) - } - // Restore has finished. - close(updateCh) - - // Checksum - updateCh = utils.StartProgress( - ctx, "Checksum", int64(len(newTables)), !HasLogFile()) - err = client.ValidateChecksum( - ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) - if err != nil { - return err - } - close(updateCh) - - return nil -} - func newFullRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "full", Short: "restore all tables", RunE: func(cmd *cobra.Command, _ []string) error { - return runRestore(cmd.Flags(), "Full Restore", "", "") + return runRestoreCommand(cmd, "Full restore") }, } return command @@ -224,18 +87,10 @@ func newDbRestoreCommand() *cobra.Command { Use: "db", Short: "restore tables in a database", RunE: func(cmd *cobra.Command, _ []string) error { - db, err := cmd.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.New("empty database name is not allowed") - } - return runRestore(cmd.Flags(), "Database Restore", db, "") + return runRestoreCommand(cmd, "Database restore") }, } - command.Flags().String(flagDatabase, "", "database name") - _ = command.MarkFlagRequired(flagDatabase) + task.DefineDatabaseFlags(command) return command } @@ -244,129 +99,33 @@ func newTableRestoreCommand() *cobra.Command { Use: "table", Short: "restore a table", RunE: func(cmd *cobra.Command, _ []string) error { - db, err := cmd.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.New("empty database name is not allowed") - } - table, err := cmd.Flags().GetString(flagTable) - if err != nil { - return err - } - if len(table) == 0 { - return errors.New("empty table name is not allowed") - } - return runRestore(cmd.Flags(), "Table Restore", db, table) + return runRestoreCommand(cmd, "Table restore") }, } - - command.Flags().String(flagDatabase, "", "database name") - command.Flags().String(flagTable, "", "table name") - - _ = command.MarkFlagRequired(flagDatabase) - _ = command.MarkFlagRequired(flagTable) + task.DefineTableFlags(command) return command } -func initRestoreClient(ctx context.Context, client *restore.Client, flagSet *flag.FlagSet) error { - u, err := storage.ParseBackendFromFlags(flagSet, FlagStorage) - if err != nil { - return err - } - rateLimit, err := flagSet.GetUint64("ratelimit") - if err != nil { - return err - } - client.SetRateLimit(rateLimit * utils.MB) - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) - } - metaData, err := s.Read(ctx, utils.MetaFile) - if err != nil { - return errors.Trace(err) - } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) - } - err = client.InitBackupMeta(backupMeta, u) - if err != nil { - return errors.Trace(err) - } - - concurrency, err := flagSet.GetUint("concurrency") - if err != nil { - return err - } - client.SetConcurrency(concurrency) - - isOnline, err := flagSet.GetBool("online") - if err != nil { - return err - } - if isOnline { - client.EnableOnline() - } - - return nil -} - -// RestorePrepareWork execute some prepare work before restore -func RestorePrepareWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) ([]string, error) { - if client.IsOnline() { - return nil, nil - } - err := client.SwitchToImportMode(ctx) - if err != nil { - return nil, errors.Trace(err) - } - existSchedulers, err := mgr.ListSchedulers(ctx) - if err != nil { - return nil, errors.Trace(err) - } - needRemoveSchedulers := make([]string, 0, len(existSchedulers)) - for _, s := range existSchedulers { - if _, ok := schedulers[s]; ok { - needRemoveSchedulers = append(needRemoveSchedulers, s) - } - } - return removePDLeaderScheduler(ctx, mgr, needRemoveSchedulers) -} - -func removePDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, existSchedulers []string) ([]string, error) { - removedSchedulers := make([]string, 0, len(existSchedulers)) - for _, scheduler := range existSchedulers { - err := mgr.RemoveScheduler(ctx, scheduler) - if err != nil { - return nil, err - } - removedSchedulers = append(removedSchedulers, scheduler) +func newTiflashReplicaRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "tiflash-replica", + Short: "restore the tiflash replica before the last restore, it must only be used after the last restore failed", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreTiflashReplicaCommand(cmd, "Restore TiFlash Replica") + }, } - return removedSchedulers, nil + return command } -// RestorePostWork execute some post work after restore -func RestorePostWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, removedSchedulers []string) error { - if client.IsOnline() { - return nil - } - err := client.SwitchToNormalMode(ctx) - if err != nil { - return errors.Trace(err) +func newRawRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "raw", + Short: "(experimental) restore a raw kv range to TiKV cluster", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreRawCommand(cmd, "Raw restore") + }, } - return addPDLeaderScheduler(ctx, mgr, removedSchedulers) -} -func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers []string) error { - for _, scheduler := range removedSchedulers { - err := mgr.AddScheduler(ctx, scheduler) - if err != nil { - return err - } - } - return nil + task.DefineRawRestoreFlags(command) + return command } diff --git a/cmd/validate.go b/cmd/validate.go index dd1e11fb0..386a7bb47 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( @@ -14,21 +16,22 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/pd/pkg/mock/mockid" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" + "github.com/pingcap/pd/v4/pkg/mock/mockid" "github.com/spf13/cobra" "go.uber.org/zap" "github.com/pingcap/br/pkg/restore" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/rtree" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) // NewValidateCommand return a debug subcommand. func NewValidateCommand() *cobra.Command { meta := &cobra.Command{ - Use: "validate ", - Short: "commands to check/debug backup data", + Use: "validate ", + Short: "commands to check/debug backup data", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err @@ -55,24 +58,14 @@ func newCheckSumCommand() *cobra.Command { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) - } - metaData, err := s.Read(ctx, utils.MetaFile) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { - return errors.Trace(err) - } - - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) + return err } dbs, err := utils.LoadBackupTables(backupMeta) @@ -153,24 +146,14 @@ func newBackupMetaCommand() *cobra.Command { if err != nil { return err } - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return err - } - s, err := storage.Create(ctx, u) - if err != nil { - log.Error("create storage failed", zap.Error(err)) - return errors.Trace(err) - } - data, err := s.Read(ctx, utils.MetaFile) - if err != nil { - log.Error("load backupmeta failed", zap.Error(err)) + + var cfg task.Config + if err = cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(data, backupMeta) + _, _, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { - log.Error("parse backupmeta failed", zap.Error(err)) + log.Error("read backupmeta failed", zap.Error(err)) return err } dbs, err := utils.LoadBackupTables(backupMeta) @@ -187,15 +170,15 @@ func newBackupMetaCommand() *cobra.Command { tables = append(tables, db.Tables...) } // Check if the ranges of files overlapped - rangeTree := restore_util.NewRangeTree() + rangeTree := rtree.NewRangeTree() for _, file := range files { - if out := rangeTree.InsertRange(restore_util.Range{ + if out := rangeTree.InsertRange(rtree.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }); out != nil { log.Error( "file ranges overlapped", - zap.Stringer("out", out.(*restore_util.Range)), + zap.Stringer("out", out), zap.Stringer("file", file), ) } @@ -206,7 +189,7 @@ func newBackupMetaCommand() *cobra.Command { for offset := uint64(0); offset < tableIDOffset; offset++ { _, _ = tableIDAllocator.Alloc() // Ignore error } - rewriteRules := &restore_util.RewriteRules{ + rewriteRules := &restore.RewriteRules{ Table: make([]*import_sstpb.RewriteRule, 0), Data: make([]*import_sstpb.RewriteRule, 0), } @@ -217,19 +200,19 @@ func newBackupMetaCommand() *cobra.Command { newTable := new(model.TableInfo) tableID, _ := tableIDAllocator.Alloc() newTable.ID = int64(tableID) - newTable.Name = table.Schema.Name - newTable.Indices = make([]*model.IndexInfo, len(table.Schema.Indices)) - for i, indexInfo := range table.Schema.Indices { + newTable.Name = table.Info.Name + newTable.Indices = make([]*model.IndexInfo, len(table.Info.Indices)) + for i, indexInfo := range table.Info.Indices { indexID, _ := indexIDAllocator.Alloc() newTable.Indices[i] = &model.IndexInfo{ ID: int64(indexID), Name: indexInfo.Name, } } - rules := restore.GetRewriteRules(newTable, table.Schema, 0) + rules := restore.GetRewriteRules(newTable, table.Info, 0) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) - tableIDMap[table.Schema.ID] = int64(tableID) + tableIDMap[table.Info.ID] = int64(tableID) } // Validate rewrite rules for _, file := range files { @@ -242,8 +225,7 @@ func newBackupMetaCommand() *cobra.Command { return nil }, } - command.Flags().String("path", "", "the path of backupmeta") - command.Flags().Uint64P("offset", "", 0, "the offset of table id alloctor") + command.Flags().Uint64("offset", 0, "the offset of table id alloctor") command.Hidden = true return command } @@ -255,24 +237,16 @@ func decodeBackupMetaCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return errors.Trace(err) - } - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) + + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err } - metaData, err := s.Read(ctx, utils.MetaFile) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { - return errors.Trace(err) + return err } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) - } backupMetaJSON, err := json.Marshal(backupMeta) if err != nil { return errors.Trace(err) @@ -310,14 +284,16 @@ func encodeBackupMetaCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return errors.Trace(err) + + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err } - s, err := storage.Create(ctx, u) + _, s, err := task.GetStorage(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } + metaData, err := s.Read(ctx, utils.MetaJSONFile) if err != nil { return errors.Trace(err) diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 000000000..4d84c67fa --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,194 @@ +--- +# Source: tidb-docker-compose/templates/docker-compose.yml +version: '2.1' + +services: + control: + image: control:latest + build: + context: . + dockerfile: ./docker/Dockerfile + volumes: + - ./docker/data:/data + - ./docker/logs:/logs + command: -c "/usr/bin/tail -f /dev/null" + depends_on: + - "tidb" + restart: on-failure + + pd0: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./docker/config/pd.toml:/pd.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd0 + - --config=/pd.toml + - --log-file=/logs/pd0.log + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv0: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv0.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv1: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv1:20160 + - --data-dir=/data/tikv1 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv1.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv2: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv2:20160 + - --data-dir=/data/tikv2 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv2.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv3: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv3:20160 + - --data-dir=/data/tikv3 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv3.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv4: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv4:20160 + - --data-dir=/data/tikv4 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv4.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tidb: + image: pingcap/tidb:latest + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./docker/config/tidb.toml:/tidb.toml:ro + - ./docker/logs:/logs + command: + - --store=tikv + - --path=pd0:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + - --advertise-address=tidb + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + - "tikv3" + - "tikv4" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tidb-vision: + image: pingcap/tidb-vision:latest + environment: + PD_ENDPOINT: pd0:2379 + ports: + - "8010:8010" + restart: on-failure diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..c93d22ab4 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.13.8-buster as builder + +# For loading data to TiDB +WORKDIR /go/src/github.com/pingcap/ +RUN git clone https://github.com/pingcap/go-ycsb.git && \ + cd go-ycsb && \ + make + +FROM golang:1.13.8-buster + +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + curl \ + vim \ + less \ + default-mysql-client \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /go/src/github.com/pingcap/br +COPY . . + +COPY --from=builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb + +ENTRYPOINT ["/bin/bash"] diff --git a/docker/config/pd.toml b/docker/config/pd.toml new file mode 100644 index 000000000..e6fb173d1 --- /dev/null +++ b/docker/config/pd.toml @@ -0,0 +1,18 @@ +# PD Configuration. +[schedule] +# Disbale Region Merge +max-merge-region-size = 0 +max-merge-region-key = 0 +merge-schedule-limit = 0 + +max-snapshot-count = 10 +max-pending-peer-count = 32 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +[replication] +# The number of replicas for each region. +max-replicas = 3 diff --git a/docker/config/tidb.toml b/docker/config/tidb.toml new file mode 100644 index 000000000..3ef20cc07 --- /dev/null +++ b/docker/config/tidb.toml @@ -0,0 +1,9 @@ +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "360s" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true diff --git a/docker/config/tikv.toml b/docker/config/tikv.toml new file mode 100644 index 000000000..6528e447f --- /dev/null +++ b/docker/config/tikv.toml @@ -0,0 +1,22 @@ +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = true + +[coprocessor] +# Make region split more aggressive. +region-max-keys = 100 +region-split-keys = 80 + +[rocksdb] +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +[raftdb] +max-open-files = 1024 diff --git a/go.mod b/go.mod index 8e50bbf35..94f4022f9 100644 --- a/go.mod +++ b/go.mod @@ -8,35 +8,37 @@ require ( github.com/cheggaaa/pb/v3 v3.0.1 github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 // indirect + github.com/fatih/color v1.9.0 // indirect github.com/fsouza/fake-gcs-server v1.15.0 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 - github.com/golang/snappy v0.0.1 // indirect github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 - github.com/onsi/ginkgo v1.10.3 // indirect - github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 - github.com/pingcap/errors v0.11.4 - github.com/pingcap/kvproto v0.0.0-20191212110315-d6a9d626988c - github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01 - github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5 - github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834 - github.com/pingcap/tidb-tools v3.1.0-beta.0.20191223064326-e9c7a23a8dcb+incompatible - github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33 + github.com/klauspost/cpuid v1.2.0 // indirect + github.com/mattn/go-runewidth v0.0.7 // indirect + github.com/montanaflynn/stats v0.5.0 // indirect + github.com/onsi/ginkgo v1.11.0 // indirect + github.com/onsi/gomega v1.8.1 // indirect + github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 + github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 + github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 + github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd + github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 + github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 + github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc + github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible + github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/common v0.4.1 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 - github.com/spf13/pflag v1.0.3 + github.com/spf13/pflag v1.0.5 + github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect + go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.opencensus.io v0.22.2 // indirect - go.uber.org/atomic v1.5.1 // indirect - go.uber.org/zap v1.13.0 - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect - golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect + go.uber.org/zap v1.14.0 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/tools v0.0.0-20191213032237-7093a17b0467 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 ) diff --git a/go.sum b/go.sum index 696ccee81..31fd50bcc 100644 --- a/go.sum +++ b/go.sum @@ -20,12 +20,25 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0= +github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.26.1 h1:JGQggXhOiNJIqsmbYUl3cYtJZUffeOWlHtxfzGK7WPI= github.com/aws/aws-sdk-go v1.26.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -36,6 +49,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d h1:rQlvB2AYWme2bIB18r/SipGiMEVJYE9U0z+MGoU/LtQ= github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cheggaaa/pb/v3 v3.0.1 h1:m0BngUk2LuSRYdx4fujDKNRXNDpbNCfptPfVT2m6OJY= github.com/cheggaaa/pb/v3 v3.0.1/go.mod h1:SqqeMF/pMOIu3xgGoxtPYhMNQP258xE4x/XRTYua+KU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -48,10 +63,12 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -59,19 +76,26 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM= github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4= github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgraph-io/ristretto v0.0.1 h1:cJwdnj42uV8Jg4+KLrYovLiCgIfz9wtWm6E6KA+1tLs= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -84,30 +108,57 @@ github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3C github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.15.0 h1:ss/ztlt10Y64A5qslmxZKsiqW/i28t5DkRtv6qSFaLQ= github.com/fsouza/fake-gcs-server v1.15.0/go.mod h1:HNxAJ/+FY/XSsxuwz8iIYdp2GtMmPbJ8WQjjGMxd6Qk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= +github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= -github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -116,16 +167,19 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -149,12 +203,6 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -165,11 +213,15 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -177,13 +229,21 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jeremywohl/flatten v0.0.0-20190921043622-d936035e55cf h1:Ut4tTtPNmInWiEWJRernsWm688R0RN6PFO8sZhwI0sk= github.com/jeremywohl/flatten v0.0.0-20190921043622-d936035e55cf/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= +github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= @@ -191,9 +251,12 @@ github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSg github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -201,25 +264,41 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -230,6 +309,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk= +github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= @@ -237,93 +318,114 @@ github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7fJc= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= +github.com/pingcap-incubator/tidb-dashboard v0.0.0-20200302022638-35a6e979dca9/go.mod h1:YUceA4BHY/MTtp63yZLTYP22waFSwMNo9lXq2FDtzVw= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 h1:6DMbRqPI1qzQ8N1xc3+nKY8IxSACd9VqQKkRVvbyoIg= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d h1:rCmRK0lCRrHMUbS99BKFYhK9YxJDNw0xB033cQbYo0s= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20191030021250-51b332bcb20b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191121022655-4c654046831d/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03 h1:IyJl+qesVPf3UfFFmKtX69y1K5KC8uXlot3U0QgH7V4= -github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191212110315-d6a9d626988c h1:CwVCq7XA/NvTQ6X9ZAhZlvcEvseUsHiPFQf2mL3LVl4= -github.com/pingcap/kvproto v0.0.0-20191212110315-d6a9d626988c/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20200214064158-62d31900d88e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200221034943-a2aa1d1e20a8/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 h1:knEvP4R5v5b2T107/Q6VzB0C8/6T7NXB/V7Vl1FtQsg= +github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 h1:DB3NTM0ilba/6sW+vccdEnP10bVvrVunDwWvRa0hSKc= +github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01 h1:q1rGnV/296//bArDP7cDWWaSrhaeEKZY+gIo+Jb0Gyk= -github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= -github.com/pingcap/pd v1.1.0-beta.0.20191210055626-676ddd3fbd2d/go.mod h1:Z/VMtXHpkOP+MnYnk4TL5VHc3ZwO1qHwc89zDuf5n8Q= -github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5 h1:sbpL1uNynq4yjGh0Xxb8MMePaOOXb9fdml3kB1NMQu4= -github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5/go.mod h1:NJYtcyKOqSWTJXoMF9CDdQc1xymxyBuQ8QSH6jJWqgc= -github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3 h1:HCNif3lukL83gNC2EBAoh2Qbz36+2p0bm0LjgnNfl1s= -github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3/go.mod h1:Futrrmuw98pEsbEmoPsjw8aKLCmixwHEmT2rF+AsXGw= -github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834 h1:eNf7bDY39moIzzcs5+PhLLW0BM2D2yrzFbjW/X42y0s= -github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834/go.mod h1:VWx47QOXISBHHtZeWrDQlBOdbvth9TE9gei6QpoqJ4g= -github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tidb-tools v3.1.0-beta.0.20191223064326-e9c7a23a8dcb+incompatible h1:GxWxXVqA2aAZIgS+bEpasJkkspu9Jom1/oB2NmP7t/o= -github.com/pingcap/tidb-tools v3.1.0-beta.0.20191223064326-e9c7a23a8dcb+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33 h1:cTSaVv1hue17BCPqt+sURADTFSMpSD26ZuvKRyYIjJs= -github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 h1:u5FOwUw9muF8mBTZVV1dQhoAKiEo2Ci54CxN9XchEEY= +github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= +github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1 h1:YUnUZ914SHFMsOSe/xgH5DKK/thtRma8X8hcszRo3CA= +github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb h1:bDbgLaNTRNK6Qw7KjvEqqfCQstY8WMEcXyXTU7yzYKg= +github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc h1:1aW3qTRJZjnosvXt1b75KL73b28XRJWBx6jtTtHsybg= +github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc/go.mod h1:WTmfs5zrUGMPw3Enn5FI3buzkU8BDuJ6BhsO/JC239U= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible h1:84F7MFMfdAYObrznvRslmVu43aoihrlL+7mMyMlOi0o= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 h1:aJPXrT1u4VfUSGFA2oQVwl4pOXzqe+YI6wed01cjDH4= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.10+incompatible h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto= github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -332,57 +434,80 @@ github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= +github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= +github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= +github.com/swaggo/http-swagger v0.0.0-20200103000832-0e9263c4b516/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= +github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= +github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= +github.com/swaggo/swag v1.6.5/go.mod h1:Y7ZLSS0d0DdxhWGVhQdu+Bu1QhaF5k0RD7FKdiAykeY= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q= +github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= +github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= +github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3 h1:ZsIlNwu/G0zbChIZaWOeZ2TPGNmKMt46jZLXi3e8LFc= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -390,8 +515,8 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= @@ -400,6 +525,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -407,15 +534,18 @@ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -436,13 +566,17 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -456,11 +590,15 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= -golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -471,12 +609,15 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -485,12 +626,14 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a h1:mIzbOulag9/gXacgxKlFVwpCOWSfBT3/pDyyCwGA9as= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -511,7 +654,10 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -525,10 +671,18 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191213032237-7093a17b0467 h1:Jybbe55FT+YYZIJGWmJIA4ZGcglFuZOduakIW3+gHXY= -golang.org/x/tools v0.0.0-20191213032237-7093a17b0467/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200301222351-066e0c02454c h1:FD7jysxM+EJqg5UYYy3XYDsAiUickFsn4UiaanJkf8c= +golang.org/x/tools v0.0.0-20200301222351-066e0c02454c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -542,7 +696,6 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -552,22 +705,21 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -578,7 +730,10 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -586,15 +741,23 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/main.go b/main.go index 103699614..7b7cbfc97 100644 --- a/main.go +++ b/main.go @@ -7,7 +7,6 @@ import ( "os/signal" "syscall" - "github.com/pingcap/errors" "github.com/spf13/cobra" "github.com/pingcap/br/cmd" @@ -42,7 +41,7 @@ func main() { Use: "br", Short: "br is a TiDB/TiKV cluster backup restore tool.", TraverseChildren: true, - SilenceUsage: true, + SilenceUsage: false, } cmd.AddFlags(rootCmd) cmd.SetDefaultContext(ctx) @@ -53,7 +52,6 @@ func main() { ) rootCmd.SetArgs(os.Args[1:]) if err := rootCmd.Execute(); err != nil { - rootCmd.Println(errors.ErrorStack(err)) os.Exit(1) } } diff --git a/metrics/grafana/br.json b/metrics/grafana/br.json new file mode 100644 index 000000000..d211b4914 --- /dev/null +++ b/metrics/grafana/br.json @@ -0,0 +1,1690 @@ +{ + "__inputs": [ + { + "name": "DS_TEST-CLUSTER", + "label": "test-cluster", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.1.6" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_TEST-CLUSTER}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 16, + "iteration": 1577953179687, + "links": [], + "panels": [ + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 15, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\", name=~\"backup_worker.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "backup-worker", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\", name=~\"backup_endpoint\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "backup-endpoint", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Backup CPU Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} - {{device}}", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IO Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 2, + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 8 + }, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(tikv_backup_error_counter[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{error}} {{instance}}", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Backup Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 9, + "x": 7, + "y": 8 + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_backup_range_size_bytes_sum{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "backup-flow", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "rate(tikv_backup_range_size_bytes_sum[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "metric": "", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "BackupSST Generation Throughput", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_backup_range_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - 99%", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_backup_range_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - 95%", + "refId": "B", + "step": 4 + }, + { + "expr": "sum(rate(tikv_backup_range_duration_seconds_sum{instance=~\"$instance\"}[1m])) by (type) / sum(rate(tikv_backup_range_duration_seconds_count{instance=~\"$instance\"}[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - avg", + "refId": "C", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "One Backup Range Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 15 + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_backup_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": " 99%", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_backup_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "95%", + "refId": "B", + "step": 4 + }, + { + "expr": "sum(rate(tikv_backup_request_duration_seconds_sum{instance=~\"$instance\"}[1m])) / sum(rate(tikv_backup_request_duration_seconds_count{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "avg", + "refId": "C", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "One Backup Subtask Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-100%", + "refId": "E" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-99%", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Checksum Request Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Backup", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 17, + "panels": [], + "title": "Restore", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 2 + }, + "id": 21, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\"}[1m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 2 + }, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} - {{device}}", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IO Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": " \tThe number of leaders on each TiKV instance", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(tikv_raftstore_region_count{instance=~\"$instance\", type=\"leader\"}) by (instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + }, + { + "expr": "delta(tikv_raftstore_region_count{instance=~\"$instance\", type=\"leader\"}[30s]) < -10", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Leader", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "The number of Regions on each TiKV instance", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 10 + }, + "id": 29, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(tikv_raftstore_region_count{instance=~\"$instance\", type=\"region\"}) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Region", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 33, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-99%", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-95%", + "refId": "B" + }, + { + "expr": "sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (type) / sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (type)", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "{{type}}-avg", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_import_ingest_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-99%", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_import_ingest_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-95%", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Process SST Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 31, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_import_download_bytes_sum{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "download-flow", + "refId": "A" + }, + { + "expr": "rate(tikv_import_download_bytes_sum[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DownLoad SST Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(tikv_import_error_counter[1m])", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "{{error}}-{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Restore Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 23, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-100%", + "refId": "E" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-99%", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Checksum Request Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 18, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_TEST-CLUSTER}", + "definition": "label_values(tikv_engine_size_bytes, instance)", + "hide": 0, + "includeAll": true, + "label": "Instance", + "multi": false, + "name": "instance", + "options": [], + "query": "label_values(tikv_engine_size_bytes, instance)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Test-Cluster-Backup & Restore", + "uid": "AzvioWLWz", + "version": 25 +} diff --git a/pkg/backup/check.go b/pkg/backup/check.go new file mode 100644 index 000000000..38b2d927d --- /dev/null +++ b/pkg/backup/check.go @@ -0,0 +1,35 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package backup + +import ( + "encoding/hex" + + "github.com/google/btree" + "github.com/pingcap/log" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" +) + +// checkDupFiles checks if there are any files are duplicated. +func checkDupFiles(rangeTree *rtree.RangeTree) { + // Name -> SHA256 + files := make(map[string][]byte) + rangeTree.Ascend(func(i btree.Item) bool { + rg := i.(*rtree.Range) + for _, f := range rg.Files { + old, ok := files[f.Name] + if ok { + log.Error("dup file", + zap.String("Name", f.Name), + zap.String("SHA256_1", hex.EncodeToString(old)), + zap.String("SHA256_2", hex.EncodeToString(f.Sha256)), + ) + } else { + files[f.Name] = f.Sha256 + } + } + return true + }) +} diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 5cba2d9bf..9693b6b5f 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -11,21 +13,26 @@ import ( "github.com/gogo/protobuf/proto" "github.com/google/btree" "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" + kvproto "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -33,7 +40,7 @@ import ( // ClientMgr manages connections needed by backup. type ClientMgr interface { - GetBackupClient(ctx context.Context, storeID uint64) (backup.BackupClient, error) + GetBackupClient(ctx context.Context, storeID uint64) (kvproto.BackupClient, error) GetPDClient() pd.Client GetTiKV() tikv.Storage GetLockResolver() *tikv.LockResolver @@ -50,9 +57,9 @@ type Client struct { mgr ClientMgr clusterID uint64 - backupMeta backup.BackupMeta + backupMeta kvproto.BackupMeta storage storage.ExternalStorage - backend *backup.StorageBackend + backend *kvproto.StorageBackend } // NewBackupClient returns a new backup client @@ -67,29 +74,33 @@ func NewBackupClient(ctx context.Context, mgr ClientMgr) (*Client, error) { } // GetTS returns the latest timestamp. -func (bc *Client) GetTS(ctx context.Context, timeAgo string) (uint64, error) { - p, l, err := bc.mgr.GetPDClient().GetTS(ctx) - if err != nil { - return 0, errors.Trace(err) - } - backupTS := oracle.ComposeTS(p, l) - - if timeAgo != "" { - duration, err := time.ParseDuration(timeAgo) +func (bc *Client) GetTS(ctx context.Context, duration time.Duration, ts uint64) (uint64, error) { + var ( + backupTS uint64 + err error + ) + if ts > 0 { + backupTS = ts + } else { + p, l, err := bc.mgr.GetPDClient().GetTS(ctx) if err != nil { return 0, errors.Trace(err) } - if duration <= 0 { + backupTS = oracle.ComposeTS(p, l) + + switch { + case duration < 0: return 0, errors.New("negative timeago is not allowed") - } - log.Info("backup time ago", zap.Duration("timeago", duration)) + case duration > 0: + log.Info("backup time ago", zap.Duration("timeago", duration)) - backupTime := oracle.GetTimeFromTS(backupTS) - backupAgo := backupTime.Add(-duration) - if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { - return 0, errors.New("backup ts overflow please choose a smaller timeago") + backupTime := oracle.GetTimeFromTS(backupTS) + backupAgo := backupTime.Add(-duration) + if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { + return 0, errors.New("backup ts overflow please choose a smaller timeago") + } + backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } - backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } // check backup time do not exceed GCSafePoint @@ -102,9 +113,9 @@ func (bc *Client) GetTS(ctx context.Context, timeAgo string) (uint64, error) { } // SetStorage set ExternalStorage for client -func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend) error { +func (bc *Client) SetStorage(ctx context.Context, backend *kvproto.StorageBackend, sendCreds bool) error { var err error - bc.storage, err = storage.Create(ctx, backend) + bc.storage, err = storage.Create(ctx, backend, sendCreds) if err != nil { return err } @@ -121,7 +132,12 @@ func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend } // SaveBackupMeta saves the current backup meta at the given path. -func (bc *Client) SaveBackupMeta(ctx context.Context) error { +func (bc *Client) SaveBackupMeta(ctx context.Context, ddlJobs []*model.Job) error { + ddlJobsData, err := json.Marshal(ddlJobs) + if err != nil { + return errors.Trace(err) + } + bc.backupMeta.Ddls = ddlJobsData backupMetaData, err := proto.Marshal(&bc.backupMeta) if err != nil { return errors.Trace(err) @@ -129,7 +145,7 @@ func (bc *Client) SaveBackupMeta(ctx context.Context) error { log.Debug("backup meta", zap.Reflect("meta", bc.backupMeta)) backendURL := storage.FormatBackendURL(bc.backend) - log.Info("save backup meta", zap.Stringer("path", &backendURL)) + log.Info("save backup meta", zap.Stringer("path", &backendURL), zap.Int("jobs", len(ddlJobs))) return bc.storage.Write(ctx, utils.MetaFile, backupMetaData) } @@ -173,63 +189,27 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { func BuildBackupRangeAndSchema( dom *domain.Domain, storage kv.Storage, + tableFilter *filter.Filter, backupTS uint64, - dbName, tableName string, -) ([]Range, *Schemas, error) { - SystemDatabases := [3]string{ - "information_schema", - "performance_schema", - "mysql", - } - +) ([]rtree.Range, *Schemas, error) { info, err := dom.GetSnapshotInfoSchema(backupTS) if err != nil { return nil, nil, errors.Trace(err) } - var dbInfos []*model.DBInfo - var cTableName model.CIStr - switch { - case len(dbName) == 0 && len(tableName) != 0: - return nil, nil, errors.New("no database is not specified") - case len(dbName) != 0 && len(tableName) == 0: - // backup database - cDBName := model.NewCIStr(dbName) - dbInfo, exist := info.SchemaByName(cDBName) - if !exist { - return nil, nil, errors.Errorf("schema %s not found", dbName) - } - dbInfos = append(dbInfos, dbInfo) - case len(dbName) != 0 && len(tableName) != 0: - // backup table - cTableName = model.NewCIStr(tableName) - cDBName := model.NewCIStr(dbName) - dbInfo, exist := info.SchemaByName(cDBName) - if !exist { - return nil, nil, errors.Errorf("schema %s not found", dbName) - } - dbInfos = append(dbInfos, dbInfo) - case len(dbName) == 0 && len(tableName) == 0: - // backup full - dbInfos = info.AllSchemas() - } - ranges := make([]Range, 0) + ranges := make([]rtree.Range, 0) backupSchemas := newBackupSchemas() -LoadDb: - for _, dbInfo := range dbInfos { + for _, dbInfo := range info.AllSchemas() { // skip system databases - for _, sysDbName := range SystemDatabases { - if sysDbName == dbInfo.Name.L { - continue LoadDb - } - } - dbData, err := json.Marshal(dbInfo) - if err != nil { - return nil, nil, errors.Trace(err) + if util.IsMemOrSysDB(dbInfo.Name.L) { + continue } - idAlloc := autoid.NewAllocator(storage, dbInfo.ID, false) + + var dbData []byte + idAlloc := autoid.NewAllocator(storage, dbInfo.ID, false, autoid.RowIDAllocType) + for _, tableInfo := range dbInfo.Tables { - if len(cTableName.L) != 0 && cTableName.L != tableInfo.Name.L { + if !tableFilter.Match(&filter.Table{Schema: dbInfo.Name.L, Name: tableInfo.Name.L}) { // Skip tables other than the given table. continue } @@ -243,12 +223,18 @@ LoadDb: zap.Stringer("table", tableInfo.Name), zap.Int64("AutoIncID", globalAutoID)) + if dbData == nil { + dbData, err = json.Marshal(dbInfo) + if err != nil { + return nil, nil, errors.Trace(err) + } + } tableData, err := json.Marshal(tableInfo) if err != nil { return nil, nil, errors.Trace(err) } - schema := backup.Schema{ + schema := kvproto.Schema{ Db: dbData, Table: tableData, } @@ -259,7 +245,7 @@ LoadDb: return nil, nil, err } for _, r := range tableRanges { - ranges = append(ranges, Range{ + ranges = append(ranges, rtree.Range{ StartKey: r.StartKey, EndKey: r.EndKey, }) @@ -267,23 +253,62 @@ LoadDb: } } - if len(cTableName.L) != 0 { - // Must find the given table. - if backupSchemas.Len() == 0 { - return nil, nil, errors.Errorf("table %s not found", cTableName) - } + if backupSchemas.Len() == 0 { + return nil, nil, errors.New("nothing to backup") } return ranges, backupSchemas, nil } +// GetBackupDDLJobs returns the ddl jobs are done in (lastBackupTS, backupTS] +func GetBackupDDLJobs(dom *domain.Domain, lastBackupTS, backupTS uint64) ([]*model.Job, error) { + snapMeta, err := dom.GetSnapshotMeta(backupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSnapMeta, err := dom.GetSnapshotMeta(lastBackupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSchemaVersion, err := lastSnapMeta.GetSchemaVersion() + if err != nil { + return nil, errors.Trace(err) + } + allJobs := make([]*model.Job, 0) + defaultJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get default jobs", zap.Int("jobs", len(defaultJobs))) + allJobs = append(allJobs, defaultJobs...) + addIndexJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get add index jobs", zap.Int("jobs", len(addIndexJobs))) + allJobs = append(allJobs, addIndexJobs...) + historyJobs, err := snapMeta.GetAllHistoryDDLJobs() + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get history jobs", zap.Int("jobs", len(historyJobs))) + allJobs = append(allJobs, historyJobs...) + + completedJobs := make([]*model.Job, 0) + for _, job := range allJobs { + if (job.State == model.JobStateDone || job.State == model.JobStateSynced) && + (job.BinlogInfo != nil && job.BinlogInfo.SchemaVersion > lastSchemaVersion) { + completedJobs = append(completedJobs, job) + } + } + log.Debug("get completed jobs", zap.Int("jobs", len(completedJobs))) + return completedJobs, nil +} + // BackupRanges make a backup of the given key ranges. func (bc *Client) BackupRanges( ctx context.Context, - ranges []Range, - lastBackupTS uint64, - backupTS uint64, - rateLimit uint64, - concurrency uint32, + ranges []rtree.Range, + req kvproto.BackupRequest, updateCh chan<- struct{}, ) error { start := time.Now() @@ -297,8 +322,8 @@ func (bc *Client) BackupRanges( defer cancel() go func() { for _, r := range ranges { - err := bc.backupRange( - ctx, r.StartKey, r.EndKey, lastBackupTS, backupTS, rateLimit, concurrency, updateCh) + err := bc.BackupRange( + ctx, r.StartKey, r.EndKey, req, updateCh) if err != nil { errCh <- err return @@ -307,17 +332,24 @@ func (bc *Client) BackupRanges( close(errCh) }() - // Check GC safepoint every 30s. - t := time.NewTicker(time.Second * 30) + // Check GC safepoint every 5s. + t := time.NewTicker(time.Second * 5) defer t.Stop() finished := false for { - err := CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), backupTS) + err := CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.EndVersion) if err != nil { log.Error("check GC safepoint failed", zap.Error(err)) return err } + if req.StartVersion > 0 { + err = CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.StartVersion) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + } if finished { // Return error (if there is any) before finishing backup. return err @@ -337,14 +369,11 @@ func (bc *Client) BackupRanges( } } -// backupRange make a backup of the given key range. -func (bc *Client) backupRange( +// BackupRange make a backup of the given key range. +func (bc *Client) BackupRange( ctx context.Context, startKey, endKey []byte, - lastBackupTS uint64, - backupTS uint64, - rateLimit uint64, - concurrency uint32, + req kvproto.BackupRequest, updateCh chan<- struct{}, ) (err error) { start := time.Now() @@ -355,65 +384,70 @@ func (bc *Client) backupRange( if err != nil { summary.CollectFailureUnit(key, err) } else { - summary.CollectSuccessUnit(key, elapsed) + summary.CollectSuccessUnit(key, 1, elapsed) } }() log.Info("backup started", zap.Binary("StartKey", startKey), zap.Binary("EndKey", endKey), - zap.Uint64("RateLimit", rateLimit), - zap.Uint32("Concurrency", concurrency)) + zap.Uint64("RateLimit", req.RateLimit), + zap.Uint32("Concurrency", req.Concurrency)) ctx, cancel := context.WithCancel(ctx) defer cancel() var allStores []*metapb.Store - allStores, err = bc.mgr.GetPDClient().GetAllStores(ctx, pd.WithExcludeTombstone()) + allStores, err = conn.GetAllTiKVStores(ctx, bc.mgr.GetPDClient(), conn.SkipTiFlash) if err != nil { return errors.Trace(err) } - req := backup.BackupRequest{ - ClusterId: bc.clusterID, - StartKey: startKey, - EndKey: endKey, - StartVersion: lastBackupTS, - EndVersion: backupTS, - StorageBackend: bc.backend, - RateLimit: rateLimit, - Concurrency: concurrency, - } + req.ClusterId = bc.clusterID + req.StartKey = startKey + req.EndKey = endKey + req.StorageBackend = bc.backend + push := newPushDown(ctx, bc.mgr, len(allStores)) - var results RangeTree + var results rtree.RangeTree results, err = push.pushBackup(req, allStores, updateCh) if err != nil { return err } - log.Info("finish backup push down", zap.Int("Ok", results.len())) + log.Info("finish backup push down", zap.Int("Ok", results.Len())) // Find and backup remaining ranges. // TODO: test fine grained backup. err = bc.fineGrainedBackup( - ctx, startKey, endKey, lastBackupTS, - backupTS, rateLimit, concurrency, results, updateCh) + ctx, startKey, endKey, req.StartVersion, + req.EndVersion, req.RateLimit, req.Concurrency, results, updateCh) if err != nil { return err } - bc.backupMeta.StartVersion = lastBackupTS - bc.backupMeta.EndVersion = backupTS - log.Info("backup time range", - zap.Reflect("StartVersion", lastBackupTS), - zap.Reflect("EndVersion", backupTS)) - - results.tree.Ascend(func(i btree.Item) bool { - r := i.(*Range) + bc.backupMeta.StartVersion = req.StartVersion + bc.backupMeta.EndVersion = req.EndVersion + bc.backupMeta.IsRawKv = req.IsRawKv + if req.IsRawKv { + bc.backupMeta.RawRanges = append(bc.backupMeta.RawRanges, + &kvproto.RawRange{StartKey: startKey, EndKey: endKey, Cf: req.Cf}) + log.Info("backup raw ranges", + zap.ByteString("startKey", startKey), + zap.ByteString("endKey", endKey), + zap.String("cf", req.Cf)) + } else { + log.Info("backup time range", + zap.Reflect("StartVersion", req.StartVersion), + zap.Reflect("EndVersion", req.EndVersion)) + } + + results.Ascend(func(i btree.Item) bool { + r := i.(*rtree.Range) bc.backupMeta.Files = append(bc.backupMeta.Files, r.Files...) return true }) // Check if there are duplicated files. - results.checkDupFiles() + checkDupFiles(&results) return nil } @@ -451,21 +485,21 @@ func (bc *Client) fineGrainedBackup( backupTS uint64, rateLimit uint64, concurrency uint32, - rangeTree RangeTree, + rangeTree rtree.RangeTree, updateCh chan<- struct{}, ) error { bo := tikv.NewBackoffer(ctx, backupFineGrainedMaxBackoff) for { // Step1, check whether there is any incomplete range - incomplete := rangeTree.getIncompleteRange(startKey, endKey) + incomplete := rangeTree.GetIncompleteRange(startKey, endKey) if len(incomplete) == 0 { return nil } log.Info("start fine grained backup", zap.Int("incomplete", len(incomplete))) // Step2, retry backup on incomplete range - respCh := make(chan *backup.BackupResponse, 4) + respCh := make(chan *kvproto.BackupResponse, 4) errCh := make(chan error, 4) - retry := make(chan Range, 4) + retry := make(chan rtree.Range, 4) max := &struct { ms int @@ -524,7 +558,7 @@ func (bc *Client) fineGrainedBackup( zap.Binary("StartKey", resp.StartKey), zap.Binary("EndKey", resp.EndKey), ) - rangeTree.put(resp.StartKey, resp.EndKey, resp.Files) + rangeTree.Put(resp.StartKey, resp.EndKey, resp.Files) // Update progress updateCh <- struct{}{} @@ -550,15 +584,15 @@ func onBackupResponse( bo *tikv.Backoffer, backupTS uint64, lockResolver *tikv.LockResolver, - resp *backup.BackupResponse, -) (*backup.BackupResponse, int, error) { + resp *kvproto.BackupResponse, +) (*kvproto.BackupResponse, int, error) { log.Debug("onBackupResponse", zap.Reflect("resp", resp)) if resp.Error == nil { return resp, 0, nil } backoffMs := 0 switch v := resp.Error.Detail.(type) { - case *backup.Error_KvError: + case *kvproto.Error_KvError: if lockErr := v.KvError.Locked; lockErr != nil { // Try to resolve lock. log.Warn("backup occur kv error", zap.Reflect("error", v)) @@ -576,7 +610,7 @@ func onBackupResponse( log.Error("unexpect kv error", zap.Reflect("KvError", v.KvError)) return nil, backoffMs, errors.Errorf("onBackupResponse error %v", v) - case *backup.Error_RegionError: + case *kvproto.Error_RegionError: regionErr := v.RegionError // Ignore following errors. if !(regionErr.EpochNotMatch != nil || @@ -594,7 +628,7 @@ func onBackupResponse( // TODO: a better backoff. backoffMs = 1000 /* 1s */ return nil, backoffMs, nil - case *backup.Error_ClusterIdError: + case *kvproto.Error_ClusterIdError: log.Error("backup occur cluster ID error", zap.Reflect("error", v)) err := errors.Errorf("%v", resp.Error) @@ -610,12 +644,12 @@ func onBackupResponse( func (bc *Client) handleFineGrained( ctx context.Context, bo *tikv.Backoffer, - rg Range, + rg rtree.Range, lastBackupTS uint64, backupTS uint64, rateLimit uint64, concurrency uint32, - respCh chan<- *backup.BackupResponse, + respCh chan<- *kvproto.BackupResponse, ) (int, error) { leader, pderr := bc.findRegionLeader(ctx, rg.StartKey) if pderr != nil { @@ -624,7 +658,7 @@ func (bc *Client) handleFineGrained( storeID := leader.GetStoreId() max := 0 - req := backup.BackupRequest{ + req := kvproto.BackupRequest{ ClusterId: bc.clusterID, StartKey: rg.StartKey, // TODO: the range may cross region. EndKey: rg.EndKey, @@ -643,7 +677,7 @@ func (bc *Client) handleFineGrained( err = SendBackup( ctx, storeID, client, req, // Handle responses with the same backoffer. - func(resp *backup.BackupResponse) error { + func(resp *kvproto.BackupResponse) error { response, backoffMs, err1 := onBackupResponse(bo, backupTS, lockResolver, resp) if err1 != nil { @@ -668,9 +702,9 @@ func (bc *Client) handleFineGrained( func SendBackup( ctx context.Context, storeID uint64, - client backup.BackupClient, - req backup.BackupRequest, - respFn func(*backup.BackupResponse) error, + client kvproto.BackupClient, + req kvproto.BackupRequest, + respFn func(*kvproto.BackupResponse) error, ) error { log.Info("try backup", zap.Any("backup request", req)) ctx, cancel := context.WithCancel(ctx) @@ -737,8 +771,8 @@ func (bc *Client) FastChecksum() (bool, error) { totalBytes += file.TotalBytes } - summary.CollectSuccessUnit(summary.TotalKV, totalKvs) - summary.CollectSuccessUnit(summary.TotalBytes, totalBytes) + summary.CollectSuccessUnit(summary.TotalKV, 1, totalKvs) + summary.CollectSuccessUnit(summary.TotalBytes, 1, totalBytes) if schema.Crc64Xor == checksum && schema.TotalKvs == totalKvs && schema.TotalBytes == totalBytes { log.Info("fast checksum success", zap.Stringer("db", dbInfo.Name), zap.Stringer("table", tblInfo.Name)) diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go index 44ca1ad5a..63f3d5d5f 100644 --- a/pkg/backup/client_test.go +++ b/pkg/backup/client_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -50,16 +52,10 @@ func (r *testBackup) TestGetTS(c *C) { deviation = 100 ) - // timeago not valid - timeAgo := "invalid" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) - c.Assert(err, ErrorMatches, "time: invalid duration invalid") - // timeago not work - timeAgo = "" expectedDuration := 0 currentTs := time.Now().UnixNano() / int64(time.Millisecond) - ts, err := r.backupClient.GetTS(r.ctx, timeAgo) + ts, err := r.backupClient.GetTS(r.ctx, 0, 0) c.Assert(err, IsNil) pdTs := oracle.ExtractPhysical(ts) duration := int(currentTs - pdTs) @@ -67,10 +63,9 @@ func (r *testBackup) TestGetTS(c *C) { c.Assert(duration, Less, expectedDuration+deviation) // timeago = "1.5m" - timeAgo = "1.5m" expectedDuration = 90000 currentTs = time.Now().UnixNano() / int64(time.Millisecond) - ts, err = r.backupClient.GetTS(r.ctx, timeAgo) + ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second, 0) c.Assert(err, IsNil) pdTs = oracle.ExtractPhysical(ts) duration = int(currentTs - pdTs) @@ -78,13 +73,11 @@ func (r *testBackup) TestGetTS(c *C) { c.Assert(duration, Less, expectedDuration+deviation) // timeago = "-1m" - timeAgo = "-1m" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) + _, err = r.backupClient.GetTS(r.ctx, -time.Minute, 0) c.Assert(err, ErrorMatches, "negative timeago is not allowed") // timeago = "1000000h" overflows - timeAgo = "1000000h" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) + _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour, 0) c.Assert(err, ErrorMatches, "backup ts overflow.*") // timeago = "10h" exceed GCSafePoint @@ -93,9 +86,15 @@ func (r *testBackup) TestGetTS(c *C) { now := oracle.ComposeTS(p, l) _, err = r.backupClient.mgr.GetPDClient().UpdateGCSafePoint(r.ctx, now) c.Assert(err, IsNil) - timeAgo = "10h" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) + _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour, 0) c.Assert(err, ErrorMatches, "GC safepoint [0-9]+ exceed TS [0-9]+") + + // timeago and backupts both exists, use backupts + backupts := oracle.ComposeTS(p+10, l) + ts, err = r.backupClient.GetTS(r.ctx, time.Minute, backupts) + c.Assert(err, IsNil) + c.Assert(ts, Equals, backupts) + } func (r *testBackup) TestBuildTableRange(c *C) { diff --git a/pkg/backup/metrics.go b/pkg/backup/metrics.go index fb982cc24..67d5fe1e5 100644 --- a/pkg/backup/metrics.go +++ b/pkg/backup/metrics.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/push.go b/pkg/backup/push.go index 23c4f01d4..4aaffa7e2 100644 --- a/pkg/backup/push.go +++ b/pkg/backup/push.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -9,6 +11,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" ) // pushDown warps a backup task. @@ -35,9 +39,9 @@ func (push *pushDown) pushBackup( req backup.BackupRequest, stores []*metapb.Store, updateCh chan<- struct{}, -) (RangeTree, error) { +) (rtree.RangeTree, error) { // Push down backup tasks to all tikv instances. - res := newRangeTree() + res := rtree.NewRangeTree() wg := new(sync.WaitGroup) for _, s := range stores { storeID := s.GetId() @@ -82,7 +86,7 @@ func (push *pushDown) pushBackup( } if resp.GetError() == nil { // None error means range has been backuped successfully. - res.put( + res.Put( resp.GetStartKey(), resp.GetEndKey(), resp.GetFiles()) // Update progress diff --git a/pkg/backup/safe_point.go b/pkg/backup/safe_point.go index bb73bc7d9..d4d431ded 100644 --- a/pkg/backup/safe_point.go +++ b/pkg/backup/safe_point.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -5,7 +7,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "go.uber.org/zap" ) diff --git a/pkg/backup/safe_point_test.go b/pkg/backup/safe_point_test.go index 1bea9e210..cdc071686 100644 --- a/pkg/backup/safe_point_test.go +++ b/pkg/backup/safe_point_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -5,21 +7,21 @@ import ( "sync" . "github.com/pingcap/check" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testSaftPointSuite{}) type testSaftPointSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testSaftPointSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } diff --git a/pkg/backup/schema.go b/pkg/backup/schema.go index 66e4beec7..18583d094 100644 --- a/pkg/backup/schema.go +++ b/pkg/backup/schema.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index 3d10fd967..3b3bef897 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -5,21 +7,22 @@ import ( "math" . "github.com/pingcap/check" + "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testBackupSchemaSuite{}) type testBackupSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackupSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -34,28 +37,32 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk := testkit.NewTestKit(c, s.mock.Storage) // Table t1 is not exist. + testFilter, err := filter.New(false, &filter.Rules{ + DoTables: []*filter.Table{{Schema: "test", Name: "t1"}}, + }) + c.Assert(err, IsNil) _, backupSchemas, err := BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "test", "t1") + s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, NotNil) c.Assert(backupSchemas, IsNil) // Database is not exist. + fooFilter, err := filter.New(false, &filter.Rules{ + DoTables: []*filter.Table{{Schema: "foo", Name: "t1"}}, + }) + c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "foo", "t1") + s.mock.Domain, s.mock.Storage, fooFilter, math.MaxUint64) c.Assert(err, NotNil) c.Assert(backupSchemas, IsNil) // Empty databse. - _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "", "") - c.Assert(err, IsNil) - c.Assert(backupSchemas, NotNil) - c.Assert(backupSchemas.Len(), Equals, 0) - updateCh := make(chan struct{}, 2) - backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) - schemas, err := backupSchemas.finishTableChecksum() + noFilter, err := filter.New(false, &filter.Rules{}) c.Assert(err, IsNil) - c.Assert(len(schemas), Equals, 0) + _, backupSchemas, err = BuildBackupRangeAndSchema( + s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) + c.Assert(err, NotNil) + c.Assert(backupSchemas, IsNil) tk.MustExec("use test") tk.MustExec("drop table if exists t1;") @@ -63,15 +70,16 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk.MustExec("insert into t1 values (10);") _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "test", "t1") + s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 1) + updateCh := make(chan struct{}, 2) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) - schemas, err = backupSchemas.finishTableChecksum() + schemas, err := backupSchemas.finishTableChecksum() <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) @@ -82,7 +90,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk.MustExec("insert into t2 values (11);") _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "", "") + s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 2) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 2, updateCh) @@ -91,7 +99,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 2) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) diff --git a/pkg/checksum/executor.go b/pkg/checksum/executor.go index 2ca5cf66d..fac944fa0 100644 --- a/pkg/checksum/executor.go +++ b/pkg/checksum/executor.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package checksum import ( @@ -61,7 +63,7 @@ func buildChecksumRequest( reqs := make([]*kv.Request, 0, (len(newTable.Indices)+1)*(len(partDefs)+1)) var oldTableID int64 if oldTable != nil { - oldTableID = oldTable.Schema.ID + oldTableID = oldTable.Info.ID } rs, err := buildRequest(newTable, newTable.ID, oldTable, oldTableID, startTS) if err != nil { @@ -72,7 +74,7 @@ func buildChecksumRequest( for _, partDef := range partDefs { var oldPartID int64 if oldTable != nil { - for _, oldPartDef := range oldTable.Schema.Partition.Definitions { + for _, oldPartDef := range oldTable.Info.Partition.Definitions { if oldPartDef.Name == partDef.Name { oldPartID = oldPartDef.ID } @@ -108,7 +110,7 @@ func buildRequest( } var oldIndexInfo *model.IndexInfo if oldTable != nil { - for _, oldIndex := range oldTable.Schema.Indices { + for _, oldIndex := range oldTable.Info.Indices { if oldIndex.Name == indexInfo.Name { oldIndexInfo = oldIndex break @@ -117,7 +119,7 @@ func buildRequest( if oldIndexInfo == nil { log.Panic("index not found", zap.Reflect("table", tableInfo), - zap.Reflect("oldTable", oldTable.Schema), + zap.Reflect("oldTable", oldTable.Info), zap.Stringer("index", indexInfo.Name)) } } diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index ca68628e2..43c90761d 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package checksum import ( @@ -12,6 +14,7 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) @@ -22,12 +25,12 @@ func TestT(t *testing.T) { var _ = Suite(&testChecksumSuite{}) type testChecksumSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testChecksumSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -61,7 +64,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { c.Assert(len(exe1.reqs), Equals, 1) resp, err := exe1.Execute(context.TODO(), s.mock.Storage.GetClient(), func() {}) c.Assert(err, IsNil) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(resp.Checksum, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalKvs, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalBytes, Equals, uint64(1), Commentf("%v", resp)) @@ -83,7 +86,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { // Test rewrite rules tk.MustExec("alter table t1 add index i2(a);") tableInfo1 = s.getTableInfo(c, "test", "t1") - oldTable := utils.Table{Schema: tableInfo1} + oldTable := utils.Table{Info: tableInfo1} exe2, err = NewExecutorBuilder(tableInfo2, math.MaxUint64). SetOldTable(&oldTable).Build() c.Assert(err, IsNil) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 3695a2a0c..c7c4a9c9e 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -1,8 +1,11 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package conn import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "io" @@ -18,15 +21,18 @@ import ( "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/utils" ) const ( @@ -34,6 +40,7 @@ const ( clusterVersionPrefix = "pd/api/v1/config/cluster-version" regionCountPrefix = "pd/api/v1/stats/region" schdulerPrefix = "pd/api/v1/schedulers" + maxMsgSize = int(128 * utils.MB) // pd.ScanRegion may return a large response ) // Mgr manages connections to a TiDB cluster. @@ -43,12 +50,14 @@ type Mgr struct { addrs []string cli *http.Client } + tlsConf *tls.Config dom *domain.Domain storage tikv.Storage grpcClis struct { mu sync.Mutex clis map[uint64]*grpc.ClientConn } + ownsStorage bool } type pdHTTPRequest func(context.Context, string, string, *http.Client, string, io.Reader) ([]byte, error) @@ -57,9 +66,6 @@ func pdRequest( ctx context.Context, addr string, prefix string, cli *http.Client, method string, body io.Reader) ([]byte, error) { - if addr != "" && !strings.HasPrefix("http", addr) { - addr = "http://" + addr - } u, err := url.Parse(addr) if err != nil { return nil, errors.Trace(err) @@ -86,13 +92,90 @@ func pdRequest( return r, nil } +// StoreBehavior is the action to do in GetAllTiKVStores when a non-TiKV +// store (e.g. TiFlash store) is found. +type StoreBehavior uint8 + +const ( + // ErrorOnTiFlash causes GetAllTiKVStores to return error when the store is + // found to be a TiFlash node. + ErrorOnTiFlash StoreBehavior = 0 + // SkipTiFlash causes GetAllTiKVStores to skip the store when it is found to + // be a TiFlash node. + SkipTiFlash StoreBehavior = 1 + // TiFlashOnly caused GetAllTiKVStores to skip the store which is not a + // TiFlash node. + TiFlashOnly StoreBehavior = 2 +) + +// GetAllTiKVStores returns all TiKV stores registered to the PD client. The +// stores must not be a tombstone and must never contain a label `engine=tiflash`. +func GetAllTiKVStores( + ctx context.Context, + pdClient pd.Client, + storeBehavior StoreBehavior, +) ([]*metapb.Store, error) { + // get all live stores. + stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + if err != nil { + return nil, err + } + + // filter out all stores which are TiFlash. + j := 0 +skipStore: + for _, store := range stores { + var isTiFlash bool + for _, label := range store.Labels { + if label.Key == "engine" && label.Value == "tiflash" { + if storeBehavior == SkipTiFlash { + continue skipStore + } else if storeBehavior == ErrorOnTiFlash { + return nil, errors.Errorf( + "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) + } + isTiFlash = true + } + } + if !isTiFlash && storeBehavior == TiFlashOnly { + continue skipStore + } + stores[j] = store + j++ + } + return stores[:j], nil +} + // NewMgr creates a new Mgr. -func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, error) { +func NewMgr( + ctx context.Context, + g glue.Glue, + pdAddrs string, + storage tikv.Storage, + tlsConf *tls.Config, + securityOption pd.SecurityOption, + storeBehavior StoreBehavior, +) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") failure := errors.Errorf("pd address (%s) has wrong format", pdAddrs) cli := &http.Client{Timeout: 30 * time.Second} + if tlsConf != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + + processedAddrs := make([]string, 0, len(addrs)) for _, addr := range addrs { + if addr != "" && !strings.HasPrefix("http", addr) { + if tlsConf != nil { + addr = "https://" + addr + } else { + addr = "http://" + addr + } + } + processedAddrs = append(processedAddrs, addr) _, failure = pdRequest(ctx, addr, clusterVersionPrefix, cli, http.MethodGet, nil) // TODO need check cluster version >= 3.1 when br release if failure == nil { @@ -103,7 +186,12 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er return nil, errors.Annotatef(failure, "pd address (%s) not available, please check network", pdAddrs) } - pdClient, err := pd.NewClient(addrs, pd.SecurityOption{}) + maxCallMsgSize := []grpc.DialOption{ + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(maxMsgSize)), + } + pdClient, err := pd.NewClient( + addrs, securityOption, pd.WithGRPCDialOptions(maxCallMsgSize...)) if err != nil { log.Error("fail to create pd client", zap.Error(err)) return nil, err @@ -111,7 +199,7 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er log.Info("new mgr", zap.String("pdAddrs", pdAddrs)) // Check live tikv. - stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + stores, err := GetAllTiKVStores(ctx, pdClient, storeBehavior) if err != nil { log.Error("fail to get store", zap.Error(err)) return nil, err @@ -130,17 +218,19 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er return nil, errors.Errorf("tikv cluster not health %+v", stores) } - dom, err := session.BootstrapSession(storage) + dom, err := g.GetDomain(storage) if err != nil { return nil, errors.Trace(err) } mgr := &Mgr{ - pdClient: pdClient, - storage: storage, - dom: dom, + pdClient: pdClient, + storage: storage, + dom: dom, + tlsConf: tlsConf, + ownsStorage: g.OwnsStorage(), } - mgr.pdHTTP.addrs = addrs + mgr.pdHTTP.addrs = processedAddrs mgr.pdHTTP.cli = cli mgr.grpcClis.clis = make(map[uint64]*grpc.ClientConn) return mgr, nil @@ -216,6 +306,9 @@ func (mgr *Mgr) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc.Cl return nil, errors.Trace(err) } opt := grpc.WithInsecure() + if mgr.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(mgr.tlsConf)) + } ctx, cancel := context.WithTimeout(ctx, dialTimeout) keepAlive := 10 keepAliveTimeout := 3 @@ -268,6 +361,11 @@ func (mgr *Mgr) GetTiKV() tikv.Storage { return mgr.storage } +// GetTLSConfig returns the tls config +func (mgr *Mgr) GetTLSConfig() *tls.Config { + return mgr.tlsConf +} + // GetLockResolver gets the LockResolver. func (mgr *Mgr) GetLockResolver() *tikv.LockResolver { return mgr.storage.GetLockResolver() @@ -348,9 +446,14 @@ func (mgr *Mgr) Close() { // Gracefully shutdown domain so it does not affect other TiDB DDL. // Must close domain before closing storage, otherwise it gets stuck forever. - mgr.dom.Close() + if mgr.ownsStorage { + if mgr.dom != nil { + mgr.dom.Close() + } + + atomic.StoreUint32(&tikv.ShuttingDown, 1) + mgr.storage.Close() + } - atomic.StoreUint32(&tikv.ShuttingDown, 1) - mgr.storage.Close() mgr.pdClient.Close() } diff --git a/pkg/conn/conn_test.go b/pkg/conn/conn_test.go index 90516ae92..26278035b 100644 --- a/pkg/conn/conn_test.go +++ b/pkg/conn/conn_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package conn import ( @@ -13,8 +15,9 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/pd/server/core" - "github.com/pingcap/pd/server/statistics" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/server/core" + "github.com/pingcap/pd/v4/server/statistics" "github.com/pingcap/tidb/util/codec" ) @@ -147,3 +150,102 @@ func (s *testClientSuite) TestRegionCount(c *C) { c.Assert(err, IsNil) c.Assert(resp, Equals, 2) } + +type fakePDClient struct { + pd.Client + stores []*metapb.Store +} + +func (fpdc fakePDClient) GetAllStores(context.Context, ...pd.GetStoreOption) ([]*metapb.Store, error) { + return append([]*metapb.Store{}, fpdc.stores...), nil +} + +func (s *testClientSuite) TestGetAllTiKVStores(c *C) { + testCases := []struct { + stores []*metapb.Store + storeBehavior StoreBehavior + expectedStores map[uint64]int + expectedError string + }{ + { + stores: []*metapb.Store{ + {Id: 1}, + }, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + }, + storeBehavior: ErrorOnTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + }, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + }, + storeBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1, 3: 1, 4: 1, 6: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: TiFlashOnly, + expectedStores: map[uint64]int{2: 1, 5: 1}, + }, + } + + for _, testCase := range testCases { + pdClient := fakePDClient{stores: testCase.stores} + stores, err := GetAllTiKVStores(context.Background(), pdClient, testCase.storeBehavior) + if len(testCase.expectedError) != 0 { + c.Assert(err, ErrorMatches, testCase.expectedError) + continue + } + foundStores := make(map[uint64]int) + for _, store := range stores { + foundStores[store.Id]++ + } + c.Assert(foundStores, DeepEquals, testCase.expectedStores) + } +} diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go new file mode 100644 index 000000000..f2f3ff55e --- /dev/null +++ b/pkg/glue/glue.go @@ -0,0 +1,32 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package glue + +import ( + "context" + + "github.com/pingcap/parser/model" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" +) + +// Glue is an abstraction of TiDB function calls used in BR. +type Glue interface { + GetDomain(store kv.Storage) (*domain.Domain, error) + CreateSession(store kv.Storage) (Session, error) + Open(path string, option pd.SecurityOption) (kv.Storage, error) + + // OwnsStorage returns whether the storage returned by Open() is owned + // If this method returns false, the connection manager will never close the storage. + OwnsStorage() bool +} + +// Session is an abstraction of the session.Session interface. +type Session interface { + Execute(ctx context.Context, sql string) error + ShowCreateDatabase(schema *model.DBInfo) (string, error) + ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) + Close() +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go new file mode 100644 index 000000000..80756d2c2 --- /dev/null +++ b/pkg/gluetidb/glue.go @@ -0,0 +1,81 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package gluetidb + +import ( + "bytes" + "context" + + "github.com/pingcap/parser/model" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/session" + + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/gluetikv" +) + +// Glue is an implementation of glue.Glue using a new TiDB session. +type Glue struct { + tikvGlue gluetikv.Glue +} + +type tidbSession struct { + se session.Session +} + +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return session.GetDomain(store) +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + se, err := session.CreateSession(store) + if err != nil { + return nil, err + } + return &tidbSession{se: se}, nil +} + +// Open implements glue.Glue +func (g Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + return g.tikvGlue.Open(path, option) +} + +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} + +// Execute implements glue.Session +func (gs *tidbSession) Execute(ctx context.Context, sql string) error { + _, err := gs.se.Execute(ctx, sql) + return err +} + +// ShowCreateDatabase implements glue.Session +func (gs *tidbSession) ShowCreateDatabase(schema *model.DBInfo) (string, error) { + var buf bytes.Buffer + if err := executor.ConstructResultOfShowCreateDatabase(gs.se, schema, true, &buf); err != nil { + return "", err + } + return buf.String(), nil +} + +// ShowCreateTable implements glue.Session +func (gs *tidbSession) ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) { + var buf bytes.Buffer + if err := executor.ConstructResultOfShowCreateTable(gs.se, table, allocator, &buf); err != nil { + return "", err + } + return buf.String(), nil +} + +// Close implements glue.Session +func (gs *tidbSession) Close() { + gs.se.Close() +} diff --git a/pkg/gluetikv/glue.go b/pkg/gluetikv/glue.go new file mode 100644 index 000000000..e63b35b95 --- /dev/null +++ b/pkg/gluetikv/glue.go @@ -0,0 +1,43 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package gluetikv + +import ( + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv" + + "github.com/pingcap/br/pkg/glue" +) + +// Glue is an implementation of glue.Glue that accesses only TiKV without TiDB. +type Glue struct{} + +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return nil, nil +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + return nil, nil +} + +// Open implements glue.Glue +func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + if option.CAPath != "" { + conf := config.GetGlobalConfig() + conf.Security.ClusterSSLCA = option.CAPath + conf.Security.ClusterSSLCert = option.CertPath + conf.Security.ClusterSSLKey = option.KeyPath + config.StoreGlobalConfig(conf) + } + return tikv.Driver{}.Open(path) +} + +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} diff --git a/pkg/utils/mock_cluster.go b/pkg/mock/mock_cluster.go similarity index 90% rename from pkg/utils/mock_cluster.go rename to pkg/mock/mock_cluster.go index dc7b87c3c..f43702ed3 100644 --- a/pkg/utils/mock_cluster.go +++ b/pkg/mock/mock_cluster.go @@ -1,4 +1,6 @@ -package utils +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package mock import ( "database/sql" @@ -14,8 +16,8 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" - "github.com/pingcap/pd/pkg/tempurl" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/pkg/tempurl" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" @@ -28,8 +30,8 @@ import ( var pprofOnce sync.Once -// MockCluster is mock tidb cluster, includes tikv and pd. -type MockCluster struct { +// Cluster is mock tidb cluster, includes tikv and pd. +type Cluster struct { *server.Server *mocktikv.Cluster mocktikv.MVCCStore @@ -40,8 +42,8 @@ type MockCluster struct { PDClient pd.Client } -// NewMockCluster create a new mock cluster. -func NewMockCluster() (*MockCluster, error) { +// NewCluster create a new mock cluster. +func NewCluster() (*Cluster, error) { pprofOnce.Do(func() { go func() { // Make sure pprof is registered. @@ -72,7 +74,7 @@ func NewMockCluster() (*MockCluster, error) { if err != nil { return nil, err } - return &MockCluster{ + return &Cluster{ Cluster: cluster, MVCCStore: mvccStore, Storage: storage, @@ -82,7 +84,7 @@ func NewMockCluster() (*MockCluster, error) { } // Start runs a mock cluster -func (mock *MockCluster) Start() error { +func (mock *Cluster) Start() error { statusURL, err := url.Parse(tempurl.Alloc()) if err != nil { return err @@ -124,7 +126,7 @@ func (mock *MockCluster) Start() error { } // Stop stops a mock cluster -func (mock *MockCluster) Stop() { +func (mock *Cluster) Stop() { if mock.Domain != nil { mock.Domain.Close() } diff --git a/pkg/mock/mock_cluster_test.go b/pkg/mock/mock_cluster_test.go new file mode 100644 index 000000000..1db0f5a8c --- /dev/null +++ b/pkg/mock/mock_cluster_test.go @@ -0,0 +1,29 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package mock + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testClusterSuite{}) + +type testClusterSuite struct { + mock *Cluster +} + +func (s *testClusterSuite) SetUpSuite(c *C) { + var err error + s.mock, err = NewCluster() + c.Assert(err, IsNil) +} + +func (s *testClusterSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testClusterSuite) TestSmoke(c *C) { + c.Assert(s.mock.Start(), IsNil) + s.mock.Stop() +} diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go new file mode 100644 index 000000000..21048dd13 --- /dev/null +++ b/pkg/restore/backoff.go @@ -0,0 +1,118 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/utils" +) + +var ( + errEpochNotMatch = errors.NewNoStackError("epoch not match") + errKeyNotInRegion = errors.NewNoStackError("key not in region") + errRegionNotFound = errors.NewNoStackError("region not found") + errResp = errors.NewNoStackError("response error") + errRewriteRuleNotFound = errors.NewNoStackError("rewrite rule not found") + errRangeIsEmpty = errors.NewNoStackError("range is empty") + errGrpc = errors.NewNoStackError("gRPC error") + + // TODO: add `error` field to `DownloadResponse` for distinguish the errors of gRPC + // and the errors of request + errBadFormat = errors.NewNoStackError("bad format") + errWrongKeyPrefix = errors.NewNoStackError("wrong key prefix") + errFileCorrupted = errors.NewNoStackError("file corrupted") + errCannotRead = errors.NewNoStackError("cannot read externel storage") +) + +const ( + importSSTRetryTimes = 16 + importSSTWaitInterval = 10 * time.Millisecond + importSSTMaxWaitInterval = 1 * time.Second + + downloadSSTRetryTimes = 8 + downloadSSTWaitInterval = 10 * time.Millisecond + downloadSSTMaxWaitInterval = 1 * time.Second + + resetTsRetryTime = 16 + resetTSWaitInterval = 50 * time.Millisecond + resetTSMaxWaitInterval = 500 * time.Millisecond +) + +type importerBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newImportSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: importSSTRetryTimes, + delayTime: importSSTWaitInterval, + maxDelayTime: importSSTMaxWaitInterval, + } +} + +func newDownloadSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: downloadSSTRetryTimes, + delayTime: downloadSSTWaitInterval, + maxDelayTime: downloadSSTMaxWaitInterval, + } +} + +func (bo *importerBackoffer) NextBackoff(err error) time.Duration { + switch errors.Cause(err) { + case errResp, errGrpc, errEpochNotMatch: + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + case errRangeIsEmpty, errRewriteRuleNotFound: + // Excepted error, finish the operation + bo.delayTime = 0 + bo.attempt = 0 + default: + // Unexcepted error + bo.delayTime = 0 + bo.attempt = 0 + log.Warn("unexcepted error, stop to retry", zap.Error(err)) + } + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *importerBackoffer) Attempt() int { + return bo.attempt +} + +type pdReqBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newPDReqBackoffer() utils.Backoffer { + return &pdReqBackoffer{ + attempt: resetTsRetryTime, + delayTime: resetTSWaitInterval, + maxDelayTime: resetTSMaxWaitInterval, + } +} + +func (bo *pdReqBackoffer) NextBackoff(err error) time.Duration { + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *pdReqBackoffer) Attempt() int { + return bo.attempt +} diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go new file mode 100644 index 000000000..11accedd2 --- /dev/null +++ b/pkg/restore/backoff_test.go @@ -0,0 +1,61 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" + + "github.com/pingcap/br/pkg/mock" + "github.com/pingcap/br/pkg/utils" +) + +var _ = Suite(&testBackofferSuite{}) + +type testBackofferSuite struct { + mock *mock.Cluster +} + +func (s *testBackofferSuite) SetUpSuite(c *C) { + var err error + s.mock, err = mock.NewCluster() + c.Assert(err, IsNil) +} + +func (s *testBackofferSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testBackofferSuite) TestImporterBackoffer(c *C) { + var counter int + err := utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + switch counter { + case 0: + return errGrpc + case 1: + return errResp + case 2: + return errRangeIsEmpty + } + return nil + }, newImportSSTBackoffer()) + c.Assert(counter, Equals, 3) + c.Assert(err, Equals, errRangeIsEmpty) + + counter = 0 + backoffer := importerBackoffer{ + attempt: 10, + delayTime: time.Nanosecond, + maxDelayTime: time.Nanosecond, + } + err = utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + return errResp + }, &backoffer) + c.Assert(counter, Equals, 10) + c.Assert(err, Equals, errResp) +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 9714edc2a..2453f2974 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -1,79 +1,99 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( + "bytes" "context" + "crypto/tls" + "encoding/hex" + "encoding/json" "fmt" "math" + "sort" + "strconv" "sync" "time" + "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/server/schedule/placement" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) -const ( - resetTsRetryTime = 16 - resetTSWaitInterval = 50 * time.Millisecond - resetTSMaxWaitInterval = 500 * time.Millisecond - - // defaultChecksumConcurrency is the default number of the concurrent - // checksum tasks. - defaultChecksumConcurrency = 64 -) +// defaultChecksumConcurrency is the default number of the concurrent +// checksum tasks. +const defaultChecksumConcurrency = 64 // Client sends requests to restore files type Client struct { ctx context.Context cancel context.CancelFunc - pdClient pd.Client - fileImporter FileImporter - workerPool *utils.WorkerPool - tableWorkerPool *utils.WorkerPool + pdClient pd.Client + toolClient SplitClient + fileImporter FileImporter + workerPool *utils.WorkerPool + tlsConf *tls.Config databases map[string]*utils.Database + ddlJobs []*model.Job backupMeta *backup.BackupMeta db *DB rateLimit uint64 isOnline bool hasSpeedLimited bool + + restoreStores []uint64 + + storage storage.ExternalStorage + backend *backup.StorageBackend } // NewRestoreClient returns a new RestoreClient func NewRestoreClient( ctx context.Context, + g glue.Glue, pdClient pd.Client, store kv.Storage, + tlsConf *tls.Config, ) (*Client, error) { ctx, cancel := context.WithCancel(ctx) - db, err := NewDB(store) + db, err := NewDB(g, store) if err != nil { cancel() return nil, errors.Trace(err) } return &Client{ - ctx: ctx, - cancel: cancel, - pdClient: pdClient, - tableWorkerPool: utils.NewWorkerPool(128, "table"), - db: db, + ctx: ctx, + cancel: cancel, + pdClient: pdClient, + toolClient: NewSplitClient(pdClient, tlsConf), + db: db, + tlsConf: tlsConf, }, nil } @@ -82,6 +102,17 @@ func (rc *Client) SetRateLimit(rateLimit uint64) { rc.rateLimit = rateLimit } +// SetStorage set ExternalStorage for client +func (rc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { + var err error + rc.storage, err = storage.Create(ctx, backend, sendCreds) + if err != nil { + return err + } + rc.backend = backend + return nil +} + // GetPDClient returns a pd client. func (rc *Client) GetPDClient() pd.Client { return rc.pdClient @@ -94,26 +125,97 @@ func (rc *Client) IsOnline() bool { // Close a client func (rc *Client) Close() { - rc.db.Close() + // rc.db can be nil in raw kv mode. + if rc.db != nil { + rc.db.Close() + } rc.cancel() log.Info("Restore client closed") } // InitBackupMeta loads schemas from BackupMeta to initialize RestoreClient func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup.StorageBackend) error { - databases, err := utils.LoadBackupTables(backupMeta) - if err != nil { - return errors.Trace(err) + if !backupMeta.IsRawKv { + databases, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return errors.Trace(err) + } + rc.databases = databases + + var ddlJobs []*model.Job + err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) + if err != nil { + return errors.Trace(err) + } + rc.ddlJobs = ddlJobs } - rc.databases = databases rc.backupMeta = backupMeta + log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) - metaClient := restore_util.NewClient(rc.pdClient) - importClient := NewImportClient(metaClient) - rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) + metaClient := NewSplitClient(rc.pdClient, rc.tlsConf) + importClient := NewImportClient(metaClient, rc.tlsConf) + rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, backupMeta.IsRawKv, rc.rateLimit) return nil } +// IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden. +func (rc *Client) IsRawKvMode() bool { + return rc.backupMeta.IsRawKv +} + +// GetFilesInRawRange gets all files that are in the given range or intersects with the given range. +func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backup.File, error) { + if !rc.IsRawKvMode() { + return nil, errors.New("the backup data is not in raw kv mode") + } + + for _, rawRange := range rc.backupMeta.RawRanges { + // First check whether the given range is backup-ed. If not, we cannot perform the restore. + if rawRange.Cf != cf { + continue + } + + if (len(rawRange.EndKey) > 0 && bytes.Compare(startKey, rawRange.EndKey) >= 0) || + (len(endKey) > 0 && bytes.Compare(rawRange.StartKey, endKey) >= 0) { + // The restoring range is totally out of the current range. Skip it. + continue + } + + if bytes.Compare(startKey, rawRange.StartKey) < 0 || + utils.CompareEndKey(endKey, rawRange.EndKey) > 0 { + // Only partial of the restoring range is in the current backup-ed range. So the given range can't be fully + // restored. + return nil, errors.New("the given range to restore is not fully covered by the range that was backed up") + } + + // We have found the range that contains the given range. Find all necessary files. + files := make([]*backup.File, 0) + + for _, file := range rc.backupMeta.Files { + if file.Cf != cf { + continue + } + + if len(file.EndKey) > 0 && bytes.Compare(file.EndKey, startKey) < 0 { + // The file is before the range to be restored. + continue + } + if len(endKey) > 0 && bytes.Compare(endKey, file.StartKey) <= 0 { + // The file is after the range to be restored. + // The specified endKey is exclusive, so when it equals to a file's startKey, the file is still skipped. + continue + } + + files = append(files, file) + } + + // There should be at most one backed up range that covers the restoring range. + return files, nil + } + + return nil, errors.New("no backup data in the range") +} + // SetConcurrency sets the concurrency of dbs tables files func (rc *Client) SetConcurrency(c uint) { rc.workerPool = utils.NewWorkerPool(c, "file") @@ -124,6 +226,11 @@ func (rc *Client) EnableOnline() { rc.isOnline = true } +// GetTLSConfig returns the tls config +func (rc *Client) GetTLSConfig() *tls.Config { + return rc.tlsConf +} + // GetTS gets a new timestamp from PD func (rc *Client) GetTS(ctx context.Context) (uint64, error) { p, l, err := rc.pdClient.GetTS(ctx) @@ -139,13 +246,25 @@ func (rc *Client) ResetTS(pdAddrs []string) error { restoreTS := rc.backupMeta.GetEndVersion() log.Info("reset pd timestamp", zap.Uint64("ts", restoreTS)) i := 0 - return withRetry(func() error { + return utils.WithRetry(rc.ctx, func() error { + idx := i % len(pdAddrs) + i++ + return utils.ResetTS(pdAddrs[idx], restoreTS, rc.tlsConf) + }, newPDReqBackoffer()) +} + +// GetPlacementRules return the current placement rules +func (rc *Client) GetPlacementRules(pdAddrs []string) ([]placement.Rule, error) { + var placementRules []placement.Rule + i := 0 + errRetry := utils.WithRetry(rc.ctx, func() error { + var err error idx := i % len(pdAddrs) - return utils.ResetTS(pdAddrs[idx], restoreTS) - }, func(e error) bool { i++ - return true - }, resetTsRetryTime, resetTSWaitInterval, resetTSMaxWaitInterval) + placementRules, err = utils.GetPlacementRules(pdAddrs[idx], rc.tlsConf) + return err + }, newPDReqBackoffer()) + return placementRules, errRetry } // GetDatabases returns all databases. @@ -162,6 +281,11 @@ func (rc *Client) GetDatabase(name string) *utils.Database { return rc.databases[name] } +// GetDDLJobs returns ddl jobs +func (rc *Client) GetDDLJobs() []*model.Job { + return rc.ddlJobs +} + // GetTableSchema returns the schema of a table from TiDB. func (rc *Client) GetTableSchema( dom *domain.Domain, @@ -189,8 +313,8 @@ func (rc *Client) CreateTables( dom *domain.Domain, tables []*utils.Table, newTS uint64, -) (*restore_util.RewriteRules, []*model.TableInfo, error) { - rewriteRules := &restore_util.RewriteRules{ +) (*RewriteRules, []*model.TableInfo, error) { + rewriteRules := &RewriteRules{ Table: make([]*import_sstpb.RewriteRule, 0), Data: make([]*import_sstpb.RewriteRule, 0), } @@ -200,11 +324,11 @@ func (rc *Client) CreateTables( if err != nil { return nil, nil, err } - newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Schema.Name) + newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Info.Name) if err != nil { return nil, nil, err } - rules := GetRewriteRules(newTableInfo, table.Schema, newTS) + rules := GetRewriteRules(newTableInfo, table.Info, newTS) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) newTables = append(newTables, newTableInfo) @@ -212,9 +336,97 @@ func (rc *Client) CreateTables( return rewriteRules, newTables, nil } +// RemoveTiFlashReplica removes all the tiflash replicas of a table +// TODO: remove this after tiflash supports restore +func (rc *Client) RemoveTiFlashReplica(tables []*utils.Table, placementRules []placement.Rule) error { + schemas := make([]*backup.Schema, 0, len(tables)) + var updateReplica bool + for _, table := range tables { + if rule := utils.SearchPlacementRule(table.Info.ID, placementRules, placement.Learner); rule != nil { + table.TiFlashReplicas = rule.Count + updateReplica = true + } + tableData, err := json.Marshal(table.Info) + if err != nil { + return errors.Trace(err) + } + dbData, err := json.Marshal(table.Db) + if err != nil { + return errors.Trace(err) + } + schemas = append(schemas, &backup.Schema{ + Db: dbData, + Table: tableData, + Crc64Xor: table.Crc64Xor, + TotalKvs: table.TotalKvs, + TotalBytes: table.TotalBytes, + TiflashReplicas: uint32(table.TiFlashReplicas), + }) + } + + if updateReplica { + // Update backup meta + rc.backupMeta.Schemas = schemas + backupMetaData, err := proto.Marshal(rc.backupMeta) + if err != nil { + return errors.Trace(err) + } + backendURL := storage.FormatBackendURL(rc.backend) + log.Info("update backup meta", zap.Stringer("path", &backendURL)) + err = rc.storage.Write(rc.ctx, utils.SavedMetaFile, backupMetaData) + if err != nil { + return errors.Trace(err) + } + } + + for _, table := range tables { + if table.TiFlashReplicas > 0 { + err := rc.db.AlterTiflashReplica(rc.ctx, table, 0) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// RecoverTiFlashReplica recovers all the tiflash replicas of a table +// TODO: remove this after tiflash supports restore +func (rc *Client) RecoverTiFlashReplica(tables []*utils.Table) error { + for _, table := range tables { + if table.TiFlashReplicas > 0 { + err := rc.db.AlterTiflashReplica(rc.ctx, table, table.TiFlashReplicas) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// ExecDDLs executes the queries of the ddl jobs. +func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { + // Sort the ddl jobs by schema version in ascending order. + sort.Slice(ddlJobs, func(i, j int) bool { + return ddlJobs[i].BinlogInfo.SchemaVersion < ddlJobs[j].BinlogInfo.SchemaVersion + }) + + for _, job := range ddlJobs { + err := rc.db.ExecDDL(rc.ctx, job) + if err != nil { + return errors.Trace(err) + } + log.Info("execute ddl query", + zap.String("db", job.SchemaName), + zap.String("query", job.Query), + zap.Int64("historySchemaVersion", job.BinlogInfo.SchemaVersion)) + } + return nil +} + func (rc *Client) setSpeedLimit() error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { - stores, err := rc.pdClient.GetAllStores(rc.ctx, pd.WithExcludeTombstone()) + stores, err := conn.GetAllTiKVStores(rc.ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return err } @@ -229,31 +441,26 @@ func (rc *Client) setSpeedLimit() error { return nil } -// RestoreTable tries to restore the data of a table. -func (rc *Client) RestoreTable( - table *utils.Table, - rewriteRules *restore_util.RewriteRules, +// RestoreFiles tries to restore the files. +func (rc *Client) RestoreFiles( + files []*backup.File, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() defer func() { elapsed := time.Since(start) - log.Info("restore table", - zap.Stringer("table", table.Schema.Name), zap.Duration("take", elapsed)) - key := fmt.Sprintf("%s.%s", table.Db.Name.String(), table.Schema.Name.String()) - if err != nil { - summary.CollectFailureUnit(key, err) - } else { - summary.CollectSuccessUnit(key, elapsed) + if err == nil { + log.Info("Restore Files", + zap.Int("files", len(files)), zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("files", len(files), elapsed) } }() - log.Debug("start to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - zap.Array("files", files(table.Files)), + log.Debug("start to restore files", + zap.Int("files", len(files)), ) - errCh := make(chan error, len(table.Files)) + errCh := make(chan error, len(files)) wg := new(sync.WaitGroup) defer close(errCh) err = rc.setSpeedLimit() @@ -261,7 +468,7 @@ func (rc *Client) RestoreTable( return err } - for _, file := range table.Files { + for _, file := range files { wg.Add(1) fileReplica := file rc.workerPool.Apply( @@ -275,99 +482,76 @@ func (rc *Client) RestoreTable( } }) } - for range table.Files { + for i := range files { err := <-errCh if err != nil { + summary.CollectFailureUnit(fmt.Sprintf("file:%d", i), err) rc.cancel() wg.Wait() log.Error( - "restore table failed", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), + "restore files failed", zap.Error(err), ) return err } } - log.Info( - "finish to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - ) return nil } -// RestoreDatabase tries to restore the data of a database -func (rc *Client) RestoreDatabase( - db *utils.Database, - rewriteRules *restore_util.RewriteRules, - updateCh chan<- struct{}, -) (err error) { +// RestoreRaw tries to restore raw keys in the specified range. +func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.File, updateCh chan<- struct{}) error { start := time.Now() defer func() { elapsed := time.Since(start) - log.Info("Restore Database", zap.Stringer("db", db.Schema.Name), zap.Duration("take", elapsed)) + log.Info("Restore Raw", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Duration("take", elapsed)) }() - errCh := make(chan error, len(db.Tables)) + errCh := make(chan error, len(files)) wg := new(sync.WaitGroup) defer close(errCh) - for _, table := range db.Tables { - wg.Add(1) - tblReplica := table - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreTable( - tblReplica, rewriteRules, updateCh): - } - }) - } - for range db.Tables { - err = <-errCh - if err != nil { - wg.Wait() - return err - } + + err := rc.fileImporter.SetRawRange(startKey, endKey) + if err != nil { + + return errors.Trace(err) } - return nil -} -// RestoreAll tries to restore all the data of backup files. -func (rc *Client) RestoreAll( - rewriteRules *restore_util.RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore All", zap.Duration("take", elapsed)) - }() - errCh := make(chan error, len(rc.databases)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, db := range rc.databases { + emptyRules := &RewriteRules{} + for _, file := range files { wg.Add(1) - dbReplica := db - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreDatabase( - dbReplica, rewriteRules, updateCh): - } - }) + fileReplica := file + rc.workerPool.Apply( + func() { + defer wg.Done() + select { + case <-rc.ctx.Done(): + errCh <- nil + case errCh <- rc.fileImporter.Import(fileReplica, emptyRules): + updateCh <- struct{}{} + } + }) } - - for range rc.databases { - err = <-errCh + for range files { + err := <-errCh if err != nil { + rc.cancel() wg.Wait() + log.Error( + "restore raw range failed", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Error(err), + ) return err } } + log.Info( + "finish to restore raw range", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + ) return nil } @@ -382,7 +566,7 @@ func (rc *Client) SwitchToNormalMode(ctx context.Context) error { } func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMode) error { - stores, err := rc.pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return errors.Trace(err) } @@ -390,6 +574,9 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo bfConf.MaxDelay = time.Second * 3 for _, store := range stores { opt := grpc.WithInsecure() + if rc.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) + } gctx, cancel := context.WithTimeout(ctx, time.Second*5) keepAlive := 10 keepAliveTimeout := 3 @@ -475,7 +662,7 @@ func (rc *Client) ValidateChecksum( checksumResp.TotalBytes != table.TotalBytes { log.Error("failed in validate checksum", zap.String("database", table.Db.Name.L), - zap.String("table", table.Schema.Name.L), + zap.String("table", table.Info.Name.L), zap.Uint64("origin tidb crc64", table.Crc64Xor), zap.Uint64("calculated crc64", checksumResp.Checksum), zap.Uint64("origin tidb total kvs", table.TotalKvs), @@ -502,6 +689,159 @@ func (rc *Client) ValidateChecksum( return nil } +const ( + restoreLabelKey = "exclusive" + restoreLabelValue = "restore" +) + +// LoadRestoreStores loads the stores used to restore data. +func (rc *Client) LoadRestoreStores(ctx context.Context) error { + if !rc.isOnline { + return nil + } + + stores, err := rc.pdClient.GetAllStores(ctx) + if err != nil { + return err + } + for _, s := range stores { + if s.GetState() != metapb.StoreState_Up { + continue + } + for _, l := range s.GetLabels() { + if l.GetKey() == restoreLabelKey && l.GetValue() == restoreLabelValue { + rc.restoreStores = append(rc.restoreStores, s.GetId()) + break + } + } + } + log.Info("load restore stores", zap.Uint64s("store-ids", rc.restoreStores)) + return nil +} + +// ResetRestoreLabels removes the exclusive labels of the restore stores. +func (rc *Client) ResetRestoreLabels(ctx context.Context) error { + if !rc.isOnline { + return nil + } + log.Info("start reseting store labels") + return rc.toolClient.SetStoresLabel(ctx, rc.restoreStores, restoreLabelKey, "") +} + +// SetupPlacementRules sets rules for the tables' regions. +func (rc *Client) SetupPlacementRules(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start setting placement rules") + rule, err := rc.toolClient.GetPlacementRule(ctx, "pd", "default") + if err != nil { + return err + } + rule.Index = 100 + rule.Override = true + rule.LabelConstraints = append(rule.LabelConstraints, placement.LabelConstraint{ + Key: restoreLabelKey, + Op: "in", + Values: []string{restoreLabelValue}, + }) + for _, t := range tables { + rule.ID = rc.getRuleID(t.ID) + rule.StartKeyHex = hex.EncodeToString(codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID))) + rule.EndKeyHex = hex.EncodeToString(codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID+1))) + err = rc.toolClient.SetPlacementRule(ctx, rule) + if err != nil { + return err + } + } + log.Info("finish setting placement rules") + return nil +} + +// WaitPlacementSchedule waits PD to move tables to restore stores. +func (rc *Client) WaitPlacementSchedule(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start waiting placement schedule") + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for { + select { + case <-ticker.C: + ok, progress, err := rc.checkRegions(ctx, tables) + if err != nil { + return err + } + if ok { + log.Info("finish waiting placement schedule") + return nil + } + log.Info("placement schedule progress: " + progress) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (rc *Client) checkRegions(ctx context.Context, tables []*model.TableInfo) (bool, string, error) { + for i, t := range tables { + start := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID)) + end := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID+1)) + ok, regionProgress, err := rc.checkRange(ctx, start, end) + if err != nil { + return false, "", err + } + if !ok { + return false, fmt.Sprintf("table %v/%v, %s", i, len(tables), regionProgress), nil + } + } + return true, "", nil +} + +func (rc *Client) checkRange(ctx context.Context, start, end []byte) (bool, string, error) { + regions, err := rc.toolClient.ScanRegions(ctx, start, end, -1) + if err != nil { + return false, "", err + } + for i, r := range regions { + NEXT_PEER: + for _, p := range r.Region.GetPeers() { + for _, storeID := range rc.restoreStores { + if p.GetStoreId() == storeID { + continue NEXT_PEER + } + } + return false, fmt.Sprintf("region %v/%v", i, len(regions)), nil + } + } + return true, "", nil +} + +// ResetPlacementRules removes placement rules for tables. +func (rc *Client) ResetPlacementRules(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start reseting placement rules") + var failedTables []int64 + for _, t := range tables { + err := rc.toolClient.DeletePlacementRule(ctx, "pd", rc.getRuleID(t.ID)) + if err != nil { + log.Info("failed to delete placement rule for table", zap.Int64("table-id", t.ID)) + failedTables = append(failedTables, t.ID) + } + } + if len(failedTables) > 0 { + return errors.Errorf("failed to delete placement rules for tables %v", failedTables) + } + return nil +} + +func (rc *Client) getRuleID(tableID int64) string { + return "restore-t" + strconv.FormatInt(tableID, 10) +} + // IsIncremental returns whether this backup is incremental func (rc *Client) IsIncremental() bool { return !(rc.backupMeta.StartVersion == rc.backupMeta.EndVersion || diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 5007f1281..3f8cb71f8 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -12,18 +14,20 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreClientSuite{}) type testRestoreClientSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreClientSuite) SetUpTest(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -36,7 +40,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() @@ -52,7 +56,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { for i := len(tables) - 1; i >= 0; i-- { tables[i] = &utils.Table{ Db: dbSchema, - Schema: &model.TableInfo{ + Info: &model.TableInfo{ ID: int64(i), Name: model.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ @@ -93,7 +97,7 @@ func (s *testRestoreClientSuite) TestIsOnline(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() diff --git a/pkg/restore/db.go b/pkg/restore/db.go index b114b7629..be24a1ad9 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -1,35 +1,40 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( - "bytes" "context" "fmt" + "sort" "strings" "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/session" "go.uber.org/zap" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/utils" ) // DB is a TiDB instance, not thread-safe. type DB struct { - se session.Session + se glue.Session } // NewDB returns a new DB -func NewDB(store kv.Storage) (*DB, error) { - se, err := session.CreateSession(store) +func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { + se, err := g.CreateSession(store) if err != nil { return nil, errors.Trace(err) } + // The session may be nil in raw kv mode + if se == nil { + return nil, nil + } // Set SQL mode to None for avoiding SQL compatibility problem - _, err = se.Execute(context.Background(), "set @@sql_mode=''") + err = se.Execute(context.Background(), "set @@sql_mode=''") if err != nil { return nil, errors.Trace(err) } @@ -38,38 +43,59 @@ func NewDB(store kv.Storage) (*DB, error) { }, nil } +// ExecDDL executes the query of a ddl job. +func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { + var err error + if ddlJob.BinlogInfo.TableInfo != nil { + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(ddlJob.SchemaName)) + err = db.se.Execute(ctx, switchDbSQL) + if err != nil { + log.Error("switch db failed", + zap.String("query", switchDbSQL), + zap.String("db", ddlJob.SchemaName), + zap.Error(err)) + return errors.Trace(err) + } + } + err = db.se.Execute(ctx, ddlJob.Query) + if err != nil { + log.Error("execute ddl query failed", + zap.String("query", ddlJob.Query), + zap.String("db", ddlJob.SchemaName), + zap.Int64("historySchemaVersion", ddlJob.BinlogInfo.SchemaVersion), + zap.Error(err)) + } + return errors.Trace(err) +} + // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { - var buf bytes.Buffer - err := executor.ConstructResultOfShowCreateDatabase(db.se, schema, true, &buf) + createSQL, err := db.se.ShowCreateDatabase(schema) if err != nil { log.Error("build create database SQL failed", zap.Stringer("db", schema.Name), zap.Error(err)) return errors.Trace(err) } - createSQL := buf.String() - _, err = db.se.Execute(ctx, createSQL) + err = db.se.Execute(ctx, createSQL) if err != nil { - log.Error("create database failed", zap.String("SQL", createSQL), zap.Error(err)) - return errors.Trace(err) + log.Error("create database failed", zap.String("query", createSQL), zap.Error(err)) } - return nil + return errors.Trace(err) } // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { - var buf bytes.Buffer - schema := table.Schema - err := executor.ConstructResultOfShowCreateTable(db.se, schema, newIDAllocator(schema.AutoIncID), &buf) + tableInfo := table.Info + createSQL, err := db.se.ShowCreateTable(tableInfo, newIDAllocator(tableInfo.AutoIncID)) if err != nil { log.Error( "build create table SQL failed", zap.Stringer("db", table.Db.Name), - zap.Stringer("table", schema.Name), + zap.Stringer("table", tableInfo.Name), zap.Error(err)) return errors.Trace(err) } - switchDbSQL := fmt.Sprintf("use %s;", table.Db.Name) - _, err = db.se.Execute(ctx, switchDbSQL) + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) + err = db.se.Execute(ctx, switchDbSQL) if err != nil { log.Error("switch db failed", zap.String("SQL", switchDbSQL), @@ -77,34 +103,68 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { zap.Error(err)) return errors.Trace(err) } - createSQL := buf.String() // Insert `IF NOT EXISTS` statement to skip the created tables words := strings.SplitN(createSQL, " ", 3) if len(words) > 2 && strings.ToUpper(words[0]) == "CREATE" && strings.ToUpper(words[1]) == "TABLE" { createSQL = "CREATE TABLE IF NOT EXISTS " + words[2] } - _, err = db.se.Execute(ctx, createSQL) + err = db.se.Execute(ctx, createSQL) if err != nil { log.Error("create table failed", zap.String("SQL", createSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) return errors.Trace(err) } alterAutoIncIDSQL := fmt.Sprintf( "alter table %s auto_increment = %d", - escapeTableName(schema.Name), - schema.AutoIncID) - _, err = db.se.Execute(ctx, alterAutoIncIDSQL) + utils.EncloseName(tableInfo.Name.O), + tableInfo.AutoIncID) + err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", - zap.String("SQL", alterAutoIncIDSQL), + zap.String("query", alterAutoIncIDSQL), + zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), + zap.Error(err)) + } + + return errors.Trace(err) +} + +// AlterTiflashReplica alters the replica count of tiflash +func (db *DB) AlterTiflashReplica(ctx context.Context, table *utils.Table, count int) error { + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) + err := db.se.Execute(ctx, switchDbSQL) + if err != nil { + log.Error("switch db failed", + zap.String("SQL", switchDbSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), zap.Error(err)) return errors.Trace(err) } + alterTiFlashSQL := fmt.Sprintf( + "alter table %s set tiflash replica %d", + utils.EncloseName(table.Info.Name.O), + count, + ) + err = db.se.Execute(ctx, alterTiFlashSQL) + if err != nil { + log.Error("alter tiflash replica failed", + zap.String("query", alterTiFlashSQL), + zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), + zap.Error(err)) + return err + } else if table.TiFlashReplicas > 0 { + log.Warn("alter tiflash replica done", + zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), + zap.Int("originalReplicaCount", table.TiFlashReplicas), + zap.Int("replicaCount", count)) + + } return nil } @@ -112,3 +172,64 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { func (db *DB) Close() { db.se.Close() } + +// FilterDDLJobs filters ddl jobs +func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*model.Job) { + // Sort the ddl jobs by schema version in descending order. + sort.Slice(allDDLJobs, func(i, j int) bool { + return allDDLJobs[i].BinlogInfo.SchemaVersion > allDDLJobs[j].BinlogInfo.SchemaVersion + }) + dbs := getDatabases(tables) + for _, db := range dbs { + // These maps is for solving some corner case. + // e.g. let "t=2" indicates that the id of database "t" is 2, if the ddl execution sequence is: + // rename "a" to "b"(a=1) -> drop "b"(b=1) -> create "b"(b=2) -> rename "b" to "a"(a=2) + // Which we cannot find the "create" DDL by name and id directly. + // To cover †his case, we must find all names and ids the database/table ever had. + dbIDs := make(map[int64]bool) + dbIDs[db.ID] = true + dbNames := make(map[string]bool) + dbNames[db.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.DBInfo != nil { + if dbIDs[job.SchemaID] || dbNames[job.BinlogInfo.DBInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + // The the jobs executed with the old id, like the step 2 in the example above. + dbIDs[job.SchemaID] = true + // For the jobs executed after rename, like the step 3 in the example above. + dbNames[job.BinlogInfo.DBInfo.Name.String()] = true + } + } + } + } + + for _, table := range tables { + tableIDs := make(map[int64]bool) + tableIDs[table.Info.ID] = true + tableNames := make(map[string]bool) + tableNames[table.Info.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.TableInfo != nil { + if tableIDs[job.TableID] || tableNames[job.BinlogInfo.TableInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + tableIDs[job.TableID] = true + // For truncate table, the id may be changed + tableIDs[job.BinlogInfo.TableInfo.ID] = true + tableNames[job.BinlogInfo.TableInfo.Name.String()] = true + } + } + } + } + return ddlJobs +} + +func getDatabases(tables []*utils.Table) (dbs []*model.DBInfo) { + dbIDs := make(map[int64]bool) + for _, table := range tables { + if !dbIDs[table.Db.ID] { + dbs = append(dbs, table.Db) + dbIDs[table.Db.ID] = true + } + } + return +} diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 9583f7f8c..3f77a53dd 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -12,32 +14,34 @@ import ( "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreSchemaSuite{}) type testRestoreSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) + c.Assert(s.mock.Start(), IsNil) } func TestT(t *testing.T) { TestingT(t) } func (s *testRestoreSchemaSuite) TearDownSuite(c *C) { + s.mock.Stop() testleak.AfterTest(c)() } func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { - c.Assert(s.mock.Start(), IsNil) - defer s.mock.Stop() - tk := testkit.NewTestKit(c, s.mock.Storage) tk.MustExec("use test") tk.MustExec("set @@sql_mode=''") @@ -60,17 +64,17 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { tableInfo, err := info.TableByName(model.NewCIStr("test"), model.NewCIStr("\"t\"")) c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) table := utils.Table{ - Schema: tableInfo.Meta(), - Db: dbInfo, + Info: tableInfo.Meta(), + Db: dbInfo, } // Get the next AutoIncID - idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false) - globalAutoID, err := idAlloc.NextGlobalAutoID(table.Schema.ID) + idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false, autoid.RowIDAllocType) + globalAutoID, err := idAlloc.NextGlobalAutoID(table.Info.ID) c.Assert(err, IsNil, Commentf("Error allocate next auto id")) c.Assert(autoIncID, Equals, uint64(globalAutoID)) // Alter AutoIncID to the next AutoIncID + 100 - table.Schema.AutoIncID = globalAutoID + 100 - db, err := NewDB(s.mock.Storage) + table.Info.AutoIncID = globalAutoID + 100 + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil, Commentf("Error create DB")) tk.MustExec("drop database if exists test;") // Test empty collate value @@ -92,3 +96,39 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err)) c.Assert(autoIncID, Equals, uint64(globalAutoID+100)) } + +func (s *testRestoreSchemaSuite) TestFilterDDLJobs(c *C) { + tk := testkit.NewTestKit(c, s.mock.Storage) + tk.MustExec("CREATE DATABASE IF NOT EXISTS test_db;") + tk.MustExec("CREATE TABLE IF NOT EXISTS test_db.test_table (c1 INT);") + lastTs, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get last ts: %s", err)) + tk.MustExec("RENAME TABLE test_db.test_table to test_db.test_table1;") + tk.MustExec("DROP TABLE test_db.test_table1;") + tk.MustExec("DROP DATABASE test_db;") + tk.MustExec("CREATE DATABASE test_db;") + tk.MustExec("USE test_db;") + tk.MustExec("CREATE TABLE test_table1 (c2 CHAR(255));") + tk.MustExec("RENAME TABLE test_table1 to test_table;") + tk.MustExec("TRUNCATE TABLE test_table;") + + ts, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get ts: %s", err)) + allDDLJobs, err := backup.GetBackupDDLJobs(s.mock.Domain, lastTs, ts) + c.Assert(err, IsNil, Commentf("Error get ddl jobs: %s", err)) + infoSchema, err := s.mock.Domain.GetSnapshotInfoSchema(ts) + c.Assert(err, IsNil, Commentf("Error get snapshot info schema: %s", err)) + dbInfo, ok := infoSchema.SchemaByName(model.NewCIStr("test_db")) + c.Assert(ok, IsTrue, Commentf("DB info not exist")) + tableInfo, err := infoSchema.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_table")) + c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) + tables := []*utils.Table{{ + Db: dbInfo, + Info: tableInfo.Meta(), + }} + ddlJobs := FilterDDLJobs(allDDLJobs, tables) + for _, job := range ddlJobs { + c.Logf("get ddl job: %s", job.Query) + } + c.Assert(len(ddlJobs), Equals, 7) +} diff --git a/pkg/restore/import.go b/pkg/restore/import.go index fc09b7b16..fec07a870 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -1,7 +1,12 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( + "bytes" "context" + "crypto/tls" + "strings" "sync" "time" @@ -11,31 +16,17 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" - "github.com/pingcap/pd/pkg/codec" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" + "github.com/pingcap/pd/v4/pkg/codec" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" ) -var ( - errNotLeader = errors.New("not leader") - errEpochNotMatch = errors.New("epoch not match") - errRewriteRuleNotFound = errors.New("rewrite rule not found") - errRangeIsEmpty = errors.New("range is empty") -) - -const ( - importScanResgionTime = 10 * time.Second - importFileRetryTimes = 16 - importFileWaitInterval = 10 * time.Millisecond - importFileMaxWaitInterval = 1 * time.Second - - downloadSSTRetryTimes = 8 - downloadSSTWaitInterval = 10 * time.Millisecond - downloadSSTMaxWaitInterval = 1 * time.Second -) +const importScanRegionTime = 10 * time.Second +const scanRegionPaginationLimit = int(128) // ImporterClient is used to import a file to TiKV type ImporterClient interface { @@ -60,15 +51,17 @@ type ImporterClient interface { type importClient struct { mu sync.Mutex - metaClient restore_util.Client + metaClient SplitClient clients map[uint64]import_sstpb.ImportSSTClient + tlsConf *tls.Config } // NewImportClient returns a new ImporterClient -func NewImportClient(metaClient restore_util.Client) ImporterClient { +func NewImportClient(metaClient SplitClient, tlsConf *tls.Config) ImporterClient { return &importClient{ metaClient: metaClient, clients: make(map[uint64]import_sstpb.ImportSSTClient), + tlsConf: tlsConf, } } @@ -122,7 +115,11 @@ func (ic *importClient) getImportClient( if err != nil { return nil, err } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + opt := grpc.WithInsecure() + if ic.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(ic.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) if err != nil { return nil, err } @@ -133,11 +130,15 @@ func (ic *importClient) getImportClient( // FileImporter used to import a file to TiKV. type FileImporter struct { - metaClient restore_util.Client + metaClient SplitClient importClient ImporterClient backend *backup.StorageBackend rateLimit uint64 + isRawKvMode bool + rawStartKey []byte + rawEndKey []byte + ctx context.Context cancel context.CancelFunc } @@ -145,9 +146,10 @@ type FileImporter struct { // NewFileImporter returns a new file importClient. func NewFileImporter( ctx context.Context, - metaClient restore_util.Client, + metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, + isRawKvMode bool, rateLimit uint64, ) FileImporter { ctx, cancel := context.WithCancel(ctx) @@ -157,92 +159,144 @@ func NewFileImporter( ctx: ctx, cancel: cancel, importClient: importClient, + isRawKvMode: isRawKvMode, rateLimit: rateLimit, } } +// SetRawRange sets the range to be restored in raw kv mode. +func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error { + if !importer.isRawKvMode { + return errors.New("file importer is not in raw kv mode") + } + importer.rawStartKey = startKey + importer.rawEndKey = endKey + return nil +} + // Import tries to import a file. // All rules must contain encoded keys. -func (importer *FileImporter) Import(file *backup.File, rewriteRules *restore_util.RewriteRules) error { +func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions - startKey, endKey, err := rewriteFileKeys(file, rewriteRules) + var startKey, endKey []byte + var err error + if importer.isRawKvMode { + startKey = file.StartKey + endKey = file.EndKey + } else { + startKey, endKey, err = rewriteFileKeys(file, rewriteRules) + } if err != nil { return err } log.Debug("rewrite file keys", zap.Stringer("file", file), zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - ) - err = withRetry(func() error { - ctx, cancel := context.WithTimeout(importer.ctx, importScanResgionTime) + zap.Binary("endKey", endKey)) + err = utils.WithRetry(importer.ctx, func() error { + ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range - regionInfos, err := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) - if err != nil { - return errors.Trace(err) + regionInfos, errScanRegion := paginateScanRegion( + ctx, importer.metaClient, startKey, endKey, scanRegionPaginationLimit) + if errScanRegion != nil { + return errors.Trace(errScanRegion) } log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { - var downloadMeta *import_sstpb.SSTMeta info := regionInfo // Try to download file. - err = withRetry(func() error { - var err error - var isEmpty bool - downloadMeta, isEmpty, err = importer.downloadSST(info, file, rewriteRules) - if err != nil { - if err != errRewriteRuleNotFound { - log.Warn("download file failed", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - zap.Error(err), - ) - } - return err - } - if isEmpty { - log.Info( - "file don't have any key in this region, skip it", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - ) - return errRangeIsEmpty + var downloadMeta *import_sstpb.SSTMeta + errDownload := utils.WithRetry(importer.ctx, func() error { + var e error + if importer.isRawKvMode { + downloadMeta, e = importer.downloadRawKVSST(info, file) + } else { + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) } - return nil - }, func(e error) bool { - // Scan regions may return some regions which cannot match any rewrite rule, - // like [t{tableID}, t{tableID}_r), those regions should be skipped - return e != errRewriteRuleNotFound && e != errRangeIsEmpty - }, downloadSSTRetryTimes, downloadSSTWaitInterval, downloadSSTMaxWaitInterval) - if err != nil { - if err == errRewriteRuleNotFound || err == errRangeIsEmpty { + return e + }, newDownloadSSTBackoffer()) + if errDownload != nil { + if errDownload == errRewriteRuleNotFound || errDownload == errRangeIsEmpty { // Skip this region continue } - return err + log.Error("download file failed", + zap.Stringer("file", file), + zap.Stringer("region", info.Region), + zap.Binary("startKey", startKey), + zap.Binary("endKey", endKey), + zap.Error(errDownload)) + return errDownload + } + + ingestResp, errIngest := importer.ingestSST(downloadMeta, info) + ingestRetry: + for errIngest == nil { + errPb := ingestResp.GetError() + if errPb == nil { + // Ingest success + break ingestRetry + } + switch { + case errPb.NotLeader != nil: + // If error is `NotLeader`, update the region info and retry + var newInfo *RegionInfo + if newLeader := errPb.GetNotLeader().GetLeader(); newLeader != nil { + newInfo = &RegionInfo{ + Leader: newLeader, + Region: info.Region, + } + } else { + // Slow path, get region from PD + newInfo, errIngest = importer.metaClient.GetRegion( + importer.ctx, info.Region.GetStartKey()) + if errIngest != nil { + break ingestRetry + } + } + log.Debug("ingest sst returns not leader error, retry it", + zap.Stringer("region", info.Region), + zap.Stringer("newLeader", newInfo.Leader)) + + if !checkRegionEpoch(newInfo, info) { + errIngest = errors.AddStack(errEpochNotMatch) + break ingestRetry + } + ingestResp, errIngest = importer.ingestSST(downloadMeta, newInfo) + case errPb.EpochNotMatch != nil: + // TODO handle epoch not match error + // 1. retry download if needed + // 2. retry ingest + errIngest = errors.AddStack(errEpochNotMatch) + break ingestRetry + case errPb.RegionNotFound != nil: + errIngest = errors.AddStack(errRegionNotFound) + break ingestRetry + case errPb.KeyNotInRegion != nil: + errIngest = errors.AddStack(errKeyNotInRegion) + break ingestRetry + default: + errIngest = errors.Errorf("ingest error %s", errPb) + break ingestRetry + } } - err = importer.ingestSST(downloadMeta, info) - if err != nil { - log.Warn("ingest file failed", + + if errIngest != nil { + log.Error("ingest file failed", zap.Stringer("file", file), zap.Stringer("range", downloadMeta.GetRange()), zap.Stringer("region", info.Region), - zap.Error(err), - ) - return err + zap.Error(errIngest)) + return errIngest } - summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) - summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) + summary.CollectSuccessUnit(summary.TotalKV, 1, file.TotalKvs) + summary.CollectSuccessUnit(summary.TotalBytes, 1, file.TotalBytes) } return nil - }, func(e error) bool { - return true - }, importFileRetryTimes, importFileWaitInterval, importFileMaxWaitInterval) + }, newImportSSTBackoffer()) return err } @@ -255,36 +309,29 @@ func (importer *FileImporter) setDownloadSpeedLimit(storeID uint64) error { } func (importer *FileImporter) downloadSST( - regionInfo *restore_util.RegionInfo, + regionInfo *RegionInfo, file *backup.File, - rewriteRules *restore_util.RewriteRules, -) (*import_sstpb.SSTMeta, bool, error) { + rewriteRules *RewriteRules, +) (*import_sstpb.SSTMeta, error) { id, err := uuid.New().MarshalBinary() if err != nil { - return nil, true, errors.Trace(err) + return nil, errors.Trace(err) } // Assume one region reflects to one rewrite rule _, key, err := codec.DecodeBytes(regionInfo.Region.GetStartKey()) if err != nil { - return nil, true, err + return nil, err } regionRule := matchNewPrefix(key, rewriteRules) if regionRule == nil { - log.Debug("cannot find rewrite rule, skip region", - zap.Stringer("region", regionInfo.Region), - zap.Array("tableRule", rules(rewriteRules.Table)), - zap.Array("dataRule", rules(rewriteRules.Data)), - zap.Binary("key", key), - ) - return nil, true, errRewriteRuleNotFound + return nil, errors.Trace(errRewriteRuleNotFound) } rule := import_sstpb.RewriteRule{ OldKeyPrefix: encodeKeyPrefix(regionRule.GetOldKeyPrefix()), NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) - sstMeta.RegionId = regionInfo.Region.GetId() - sstMeta.RegionEpoch = regionInfo.Region.GetRegionEpoch() + req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -299,21 +346,70 @@ func (importer *FileImporter) downloadSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, true, err + return nil, extractDownloadSSTError(err) } if resp.GetIsEmpty() { - return &sstMeta, true, nil + return nil, errors.Trace(errRangeIsEmpty) } } sstMeta.Range.Start = truncateTS(resp.Range.GetStart()) sstMeta.Range.End = truncateTS(resp.Range.GetEnd()) - return &sstMeta, false, nil + return &sstMeta, nil +} + +func (importer *FileImporter) downloadRawKVSST( + regionInfo *RegionInfo, + file *backup.File, +) (*import_sstpb.SSTMeta, error) { + id, err := uuid.New().MarshalBinary() + if err != nil { + return nil, errors.Trace(err) + } + // Empty rule + var rule import_sstpb.RewriteRule + sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + + // Cut the SST file's range to fit in the restoring range. + if bytes.Compare(importer.rawStartKey, sstMeta.Range.GetStart()) > 0 { + sstMeta.Range.Start = importer.rawStartKey + } + // TODO: importer.RawEndKey is exclusive but sstMeta.Range.End is inclusive. How to exclude importer.RawEndKey? + if len(importer.rawEndKey) > 0 && bytes.Compare(importer.rawEndKey, sstMeta.Range.GetEnd()) < 0 { + sstMeta.Range.End = importer.rawEndKey + } + if bytes.Compare(sstMeta.Range.GetStart(), sstMeta.Range.GetEnd()) > 0 { + return nil, errors.Trace(errRangeIsEmpty) + } + + req := &import_sstpb.DownloadRequest{ + Sst: sstMeta, + StorageBackend: importer.backend, + Name: file.GetName(), + RewriteRule: rule, + } + log.Debug("download SST", + zap.Stringer("sstMeta", &sstMeta), + zap.Stringer("region", regionInfo.Region), + ) + var resp *import_sstpb.DownloadResponse + for _, peer := range regionInfo.Region.GetPeers() { + resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) + if err != nil { + return nil, extractDownloadSSTError(err) + } + if resp.GetIsEmpty() { + return nil, errors.Trace(errRangeIsEmpty) + } + } + sstMeta.Range.Start = resp.Range.GetStart() + sstMeta.Range.End = resp.Range.GetEnd() + return &sstMeta, nil } func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, - regionInfo *restore_util.RegionInfo, -) error { + regionInfo *RegionInfo, +) (*import_sstpb.IngestResponse, error) { leader := regionInfo.Leader if leader == nil { leader = regionInfo.Region.GetPeers()[0] @@ -327,20 +423,34 @@ func (importer *FileImporter) ingestSST( Context: reqCtx, Sst: sstMeta, } - log.Debug("download SST", zap.Stringer("sstMeta", sstMeta)) + log.Debug("ingest SST", zap.Stringer("sstMeta", sstMeta), zap.Reflect("leader", leader)) resp, err := importer.importClient.IngestSST(importer.ctx, leader.GetStoreId(), req) if err != nil { - return err + return nil, errors.Trace(err) } - respErr := resp.GetError() - if respErr != nil { - if respErr.EpochNotMatch != nil { - return errEpochNotMatch - } - if respErr.NotLeader != nil { - return errNotLeader - } - return errors.Errorf("ingest failed: %v", respErr) + return resp, nil +} + +func checkRegionEpoch(new, old *RegionInfo) bool { + if new.Region.GetId() == old.Region.GetId() && + new.Region.GetRegionEpoch().GetVersion() == old.Region.GetRegionEpoch().GetVersion() && + new.Region.GetRegionEpoch().GetConfVer() == old.Region.GetRegionEpoch().GetConfVer() { + return true } - return nil + return false +} + +func extractDownloadSSTError(e error) error { + err := errGrpc + switch { + case strings.Contains(e.Error(), "bad format"): + err = errBadFormat + case strings.Contains(e.Error(), "wrong prefix"): + err = errWrongKeyPrefix + case strings.Contains(e.Error(), "corrupted"): + err = errFileCorrupted + case strings.Contains(e.Error(), "Cannot read"): + err = errCannotRead + } + return errors.Annotatef(err, "%s", e) } diff --git a/pkg/restore/range.go b/pkg/restore/range.go new file mode 100644 index 000000000..0d5192ca9 --- /dev/null +++ b/pkg/restore/range.go @@ -0,0 +1,70 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" +) + +// sortRanges checks if the range overlapped and sort them +func sortRanges(ranges []rtree.Range, rewriteRules *RewriteRules) ([]rtree.Range, error) { + rangeTree := rtree.NewRangeTree() + for _, rg := range ranges { + if rewriteRules != nil { + startID := tablecodec.DecodeTableID(rg.StartKey) + endID := tablecodec.DecodeTableID(rg.EndKey) + var rule *import_sstpb.RewriteRule + if startID == endID { + rg.StartKey, rule = replacePrefix(rg.StartKey, rewriteRules) + if rule == nil { + log.Warn("cannot find rewrite rule", zap.Binary("key", rg.StartKey)) + } else { + log.Debug( + "rewrite start key", + zap.Binary("key", rg.StartKey), + zap.Stringer("rule", rule)) + } + rg.EndKey, rule = replacePrefix(rg.EndKey, rewriteRules) + if rule == nil { + log.Warn("cannot find rewrite rule", zap.Binary("key", rg.EndKey)) + } else { + log.Debug( + "rewrite end key", + zap.Binary("key", rg.EndKey), + zap.Stringer("rule", rule)) + } + } else { + log.Warn("table id does not match", + zap.Binary("startKey", rg.StartKey), + zap.Binary("endKey", rg.EndKey), + zap.Int64("startID", startID), + zap.Int64("endID", endID)) + return nil, errors.New("table id does not match") + } + } + if out := rangeTree.InsertRange(rg); out != nil { + return nil, errors.Errorf("ranges overlapped: %s, %s", out, rg) + } + } + sortedRanges := rangeTree.GetSortedRanges() + return sortedRanges, nil +} + +// RegionInfo includes a region and the leader of the region. +type RegionInfo struct { + Region *metapb.Region + Leader *metapb.Peer +} + +// RewriteRules contains rules for rewriting keys of tables. +type RewriteRules struct { + Table []*import_sstpb.RewriteRule + Data []*import_sstpb.RewriteRule +} diff --git a/pkg/restore/range_test.go b/pkg/restore/range_test.go new file mode 100644 index 000000000..37561f6b4 --- /dev/null +++ b/pkg/restore/range_test.go @@ -0,0 +1,79 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "bytes" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/tidb/tablecodec" + + "github.com/pingcap/br/pkg/rtree" +) + +type testRangeSuite struct{} + +var _ = Suite(&testRangeSuite{}) + +type rangeEquals struct { + *CheckerInfo +} + +var RangeEquals Checker = &rangeEquals{ + &CheckerInfo{Name: "RangeEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *rangeEquals) Check(params []interface{}, names []string) (result bool, error string) { + obtained := params[0].([]rtree.Range) + expected := params[1].([]rtree.Range) + if len(obtained) != len(expected) { + return false, "" + } + for i := range obtained { + if !bytes.Equal(obtained[i].StartKey, expected[i].StartKey) || + !bytes.Equal(obtained[i].EndKey, expected[i].EndKey) { + return false, "" + } + } + return true, "" +} + +func (s *testRangeSuite) TestSortRange(c *C) { + dataRules := []*import_sstpb.RewriteRule{ + {OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), NewKeyPrefix: tablecodec.GenTableRecordPrefix(4)}, + {OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), NewKeyPrefix: tablecodec.GenTableRecordPrefix(5)}, + } + rewriteRules := &RewriteRules{ + Table: make([]*import_sstpb.RewriteRule, 0), + Data: dataRules, + } + ranges1 := []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...), Files: nil}, + } + rs1, err := sortRanges(ranges1, rewriteRules) + c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) + c.Assert(rs1, RangeEquals, []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...), Files: nil}, + }) + + ranges2 := []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...), Files: nil}, + } + _, err = sortRanges(ranges2, rewriteRules) + c.Assert(err, ErrorMatches, ".*table id does not match.*") + + ranges3 := initRanges() + rewriteRules1 := initRewriteRules() + rs3, err := sortRanges(ranges3, rewriteRules1) + c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) + c.Assert(rs3, RangeEquals, []rtree.Range{ + {StartKey: []byte("bbd"), EndKey: []byte("bbf"), Files: nil}, + {StartKey: []byte("bbf"), EndKey: []byte("bbj"), Files: nil}, + {StartKey: []byte("xxa"), EndKey: []byte("xxe"), Files: nil}, + {StartKey: []byte("xxe"), EndKey: []byte("xxz"), Files: nil}, + }) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go new file mode 100644 index 000000000..03153097a --- /dev/null +++ b/pkg/restore/split.go @@ -0,0 +1,413 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "bytes" + "context" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" +) + +// Constants for split retry machinery. +const ( + SplitRetryTimes = 32 + SplitRetryInterval = 50 * time.Millisecond + SplitMaxRetryInterval = time.Second + + SplitCheckMaxRetryTimes = 64 + SplitCheckInterval = 8 * time.Millisecond + SplitMaxCheckInterval = time.Second + + ScatterWaitMaxRetryTimes = 64 + ScatterWaitInterval = 50 * time.Millisecond + ScatterMaxWaitInterval = time.Second + ScatterWaitUpperInterval = 180 * time.Second + + RejectStoreCheckRetryTimes = 64 + RejectStoreCheckInterval = 100 * time.Millisecond + RejectStoreMaxCheckInterval = 2 * time.Second +) + +// RegionSplitter is a executor of region split by rules. +type RegionSplitter struct { + client SplitClient +} + +// NewRegionSplitter returns a new RegionSplitter. +func NewRegionSplitter(client SplitClient) *RegionSplitter { + return &RegionSplitter{ + client: client, + } +} + +// OnSplitFunc is called before split a range. +type OnSplitFunc func(key [][]byte) + +// Split executes a region split. It will split regions by the rewrite rules, +// then it will split regions by the end key of each range. +// tableRules includes the prefix of a table, since some ranges may have +// a prefix with record sequence or index sequence. +// note: all ranges and rewrite rules must have raw key. +func (rs *RegionSplitter) Split( + ctx context.Context, + ranges []rtree.Range, + rewriteRules *RewriteRules, + rejectStores map[uint64]bool, + onSplit OnSplitFunc, +) error { + if len(ranges) == 0 { + return nil + } + startTime := time.Now() + // Sort the range for getting the min and max key of the ranges + sortedRanges, errSplit := sortRanges(ranges, rewriteRules) + if errSplit != nil { + return errors.Trace(errSplit) + } + minKey := codec.EncodeBytes([]byte{}, sortedRanges[0].StartKey) + maxKey := codec.EncodeBytes([]byte{}, sortedRanges[len(sortedRanges)-1].EndKey) + for _, rule := range rewriteRules.Table { + if bytes.Compare(minKey, rule.GetNewKeyPrefix()) > 0 { + minKey = rule.GetNewKeyPrefix() + } + if bytes.Compare(maxKey, rule.GetNewKeyPrefix()) < 0 { + maxKey = rule.GetNewKeyPrefix() + } + } + for _, rule := range rewriteRules.Data { + if bytes.Compare(minKey, rule.GetNewKeyPrefix()) > 0 { + minKey = rule.GetNewKeyPrefix() + } + if bytes.Compare(maxKey, rule.GetNewKeyPrefix()) < 0 { + maxKey = rule.GetNewKeyPrefix() + } + } + interval := SplitRetryInterval + scatterRegions := make([]*RegionInfo, 0) + allRegions := make([]*RegionInfo, 0) +SplitRegions: + for i := 0; i < SplitRetryTimes; i++ { + regions, errScan := paginateScanRegion(ctx, rs.client, minKey, maxKey, scanRegionPaginationLimit) + if errScan != nil { + return errors.Trace(errScan) + } + allRegions = append(allRegions, regions...) + if len(regions) == 0 { + log.Warn("cannot scan any region") + return nil + } + splitKeyMap := getSplitKeys(rewriteRules, sortedRanges, regions) + regionMap := make(map[uint64]*RegionInfo) + for _, region := range regions { + regionMap[region.Region.GetId()] = region + } + for regionID, keys := range splitKeyMap { + var newRegions []*RegionInfo + region := regionMap[regionID] + newRegions, errSplit = rs.splitAndScatterRegions(ctx, region, keys) + if errSplit != nil { + if strings.Contains(errSplit.Error(), "no valid key") { + for _, key := range keys { + log.Error("no valid key", + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey), + zap.Binary("key", codec.EncodeBytes([]byte{}, key))) + } + return errors.Trace(errSplit) + } + interval = 2 * interval + if interval > SplitMaxRetryInterval { + interval = SplitMaxRetryInterval + } + time.Sleep(interval) + if i > 3 { + log.Warn("splitting regions failed, retry it", zap.Error(errSplit), zap.ByteStrings("keys", keys)) + } + continue SplitRegions + } + log.Debug("split regions", zap.Stringer("region", region.Region), zap.ByteStrings("keys", keys)) + scatterRegions = append(scatterRegions, newRegions...) + onSplit(keys) + } + break + } + if errSplit != nil { + return errors.Trace(errSplit) + } + if len(rejectStores) > 0 { + startTime = time.Now() + log.Info("start to wait for removing rejected stores", zap.Reflect("rejectStores", rejectStores)) + for _, region := range allRegions { + if !rs.waitForRemoveRejectStores(ctx, region, rejectStores) { + log.Error("waiting for removing rejected stores failed", + zap.Stringer("region", region.Region)) + return errors.New("waiting for removing rejected stores failed") + } + } + log.Info("waiting for removing rejected stores done", + zap.Int("regions", len(allRegions)), zap.Duration("take", time.Since(startTime))) + } + log.Info("start to wait for scattering regions", + zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) + startTime = time.Now() + scatterCount := 0 + for _, region := range scatterRegions { + rs.waitForScatterRegion(ctx, region) + if time.Since(startTime) > ScatterWaitUpperInterval { + break + } + scatterCount++ + } + if scatterCount == len(scatterRegions) { + log.Info("waiting for scattering regions done", + zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) + } else { + log.Warn("waiting for scattering regions timeout", + zap.Int("scatterCount", scatterCount), + zap.Int("regions", len(scatterRegions)), + zap.Duration("take", time.Since(startTime))) + } + return nil +} + +func (rs *RegionSplitter) hasRegion(ctx context.Context, regionID uint64) (bool, error) { + regionInfo, err := rs.client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + return regionInfo != nil, nil +} + +func (rs *RegionSplitter) isScatterRegionFinished(ctx context.Context, regionID uint64) (bool, error) { + resp, err := rs.client.GetOperator(ctx, regionID) + if err != nil { + return false, err + } + // Heartbeat may not be sent to PD + if respErr := resp.GetHeader().GetError(); respErr != nil { + if respErr.GetType() == pdpb.ErrorType_REGION_NOT_FOUND { + return true, nil + } + return false, errors.Errorf("get operator error: %s", respErr.GetType()) + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 3 { + log.Warn("get operator", zap.Uint64("regionID", regionID), zap.Stringer("resp", resp)) + } + // If the current operator of the region is not 'scatter-region', we could assume + // that 'scatter-operator' has finished or timeout + ok := string(resp.GetDesc()) != "scatter-region" || resp.GetStatus() != pdpb.OperatorStatus_RUNNING + return ok, nil +} + +func (rs *RegionSplitter) hasRejectStorePeer( + ctx context.Context, + regionID uint64, + rejectStores map[uint64]bool, +) (bool, error) { + regionInfo, err := rs.client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + if regionInfo == nil { + return false, nil + } + for _, peer := range regionInfo.Region.GetPeers() { + if rejectStores[peer.GetStoreId()] { + return true, nil + } + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 10 { + log.Warn("get region info", zap.Stringer("region", regionInfo.Region)) + } + return false, nil +} + +func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { + interval := SplitCheckInterval + for i := 0; i < SplitCheckMaxRetryTimes; i++ { + ok, err := rs.hasRegion(ctx, regionID) + if err != nil { + log.Warn("wait for split failed", zap.Error(err)) + return + } + if ok { + break + } + interval = 2 * interval + if interval > SplitMaxCheckInterval { + interval = SplitMaxCheckInterval + } + time.Sleep(interval) + } +} + +type retryTimeKey struct{} + +var retryTimes = new(retryTimeKey) + +func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo *RegionInfo) { + interval := ScatterWaitInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < ScatterWaitMaxRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := rs.isScatterRegionFinished(ctx1, regionID) + if err != nil { + log.Warn("scatter region failed: do not have the region", + zap.Stringer("region", regionInfo.Region)) + return + } + if ok { + break + } + interval = 2 * interval + if interval > ScatterMaxWaitInterval { + interval = ScatterMaxWaitInterval + } + time.Sleep(interval) + } +} + +func (rs *RegionSplitter) waitForRemoveRejectStores( + ctx context.Context, + regionInfo *RegionInfo, + rejectStores map[uint64]bool, +) bool { + interval := RejectStoreCheckInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < RejectStoreCheckRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := rs.hasRejectStorePeer(ctx1, regionID, rejectStores) + if err != nil { + log.Warn("wait for rejecting store failed", + zap.Stringer("region", regionInfo.Region), + zap.Error(err)) + return false + } + // Do not have any peer in the rejected store, return true + if !ok { + return true + } + interval = 2 * interval + if interval > RejectStoreMaxCheckInterval { + interval = RejectStoreMaxCheckInterval + } + time.Sleep(interval) + } + + return false +} + +func (rs *RegionSplitter) splitAndScatterRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + newRegions, err := rs.client.BatchSplitRegions(ctx, regionInfo, keys) + if err != nil { + return nil, err + } + for _, region := range newRegions { + // Wait for a while until the regions successfully splits. + rs.waitForSplit(ctx, region.Region.Id) + if err = rs.client.ScatterRegion(ctx, region); err != nil { + log.Warn("scatter region failed", zap.Stringer("region", region.Region), zap.Error(err)) + } + } + return newRegions, nil +} + +// getSplitKeys checks if the regions should be split by the new prefix of the rewrites rule and the end key of +// the ranges, groups the split keys by region id +func getSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*RegionInfo) map[uint64][][]byte { + splitKeyMap := make(map[uint64][][]byte) + checkKeys := make([][]byte, 0) + for _, rule := range rewriteRules.Table { + checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) + } + for _, rule := range rewriteRules.Data { + checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) + } + for _, rg := range ranges { + checkKeys = append(checkKeys, truncateRowKey(rg.EndKey)) + } + for _, key := range checkKeys { + if region := needSplit(key, regions); region != nil { + splitKeys, ok := splitKeyMap[region.Region.GetId()] + if !ok { + splitKeys = make([][]byte, 0, 1) + } + splitKeyMap[region.Region.GetId()] = append(splitKeys, key) + log.Debug("get key for split region", + zap.Binary("key", key), + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey)) + } + } + return splitKeyMap +} + +// needSplit checks whether a key is necessary to split, if true returns the split region +func needSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { + // If splitKey is the max key. + if len(splitKey) == 0 { + return nil + } + splitKey = codec.EncodeBytes([]byte{}, splitKey) + for _, region := range regions { + // If splitKey is the boundary of the region + if bytes.Equal(splitKey, region.Region.GetStartKey()) { + return nil + } + // If splitKey is in a region + if bytes.Compare(splitKey, region.Region.GetStartKey()) > 0 && beforeEnd(splitKey, region.Region.GetEndKey()) { + return region + } + } + return nil +} + +var ( + tablePrefix = []byte{'t'} + idLen = 8 + recordPrefix = []byte("_r") +) + +func truncateRowKey(key []byte) []byte { + if bytes.HasPrefix(key, tablePrefix) && + len(key) > tablecodec.RecordRowKeyLen && + bytes.HasPrefix(key[len(tablePrefix)+idLen:], recordPrefix) { + return key[:tablecodec.RecordRowKeyLen] + } + return key +} + +func beforeEnd(key []byte, end []byte) bool { + return bytes.Compare(key, end) < 0 || len(end) == 0 +} + +func replacePrefix(s []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) { + // We should search the dataRules firstly. + for _, rule := range rewriteRules.Data { + if bytes.HasPrefix(s, rule.GetOldKeyPrefix()) { + return append(append([]byte{}, rule.GetNewKeyPrefix()...), s[len(rule.GetOldKeyPrefix()):]...), rule + } + } + for _, rule := range rewriteRules.Table { + if bytes.HasPrefix(s, rule.GetOldKeyPrefix()) { + return append(append([]byte{}, rule.GetNewKeyPrefix()...), s[len(rule.GetOldKeyPrefix()):]...), rule + } + } + + return s, nil +} diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go new file mode 100644 index 000000000..d9b5f8677 --- /dev/null +++ b/pkg/restore/split_client.go @@ -0,0 +1,363 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path" + "strconv" + "strings" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/kvproto/pkg/tikvpb" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/server/schedule/placement" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// SplitClient is an external client used by RegionSplitter. +type SplitClient interface { + // GetStore gets a store by a store id. + GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) + // GetRegion gets a region which includes a specified key. + GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) + // GetRegionByID gets a region by a region id. + GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) + // SplitRegion splits a region from a key, if key is not included in the region, it will return nil. + // note: the key should not be encoded + SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) + // BatchSplitRegions splits a region from a batch of keys. + // note: the keys should not be encoded + BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error) + // ScatterRegion scatters a specified region. + ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error + // GetOperator gets the status of operator of the specified region. + GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) + // ScanRegion gets a list of regions, starts from the region that contains key. + // Limit limits the maximum number of regions returned. + ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) + // GetPlacementRule loads a placement rule from PD. + GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) + // SetPlacementRule insert or update a placement rule to PD. + SetPlacementRule(ctx context.Context, rule placement.Rule) error + // DeletePlacementRule removes a placement rule from PD. + DeletePlacementRule(ctx context.Context, groupID, ruleID string) error + // SetStoreLabel add or update specified label of stores. If labelValue + // is empty, it clears the label. + SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error +} + +// pdClient is a wrapper of pd client, can be used by RegionSplitter. +type pdClient struct { + mu sync.Mutex + client pd.Client + tlsConf *tls.Config + storeCache map[uint64]*metapb.Store +} + +// NewSplitClient returns a client used by RegionSplitter. +func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient { + return &pdClient{ + client: client, + tlsConf: tlsConf, + storeCache: make(map[uint64]*metapb.Store), + } +} + +func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.Lock() + defer c.mu.Unlock() + store, ok := c.storeCache[storeID] + if ok { + return store, nil + } + store, err := c.client.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + c.storeCache[storeID] = store + return store, nil + +} + +func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + region, leader, err := c.client.GetRegion(ctx, key) + if err != nil { + return nil, err + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region, + Leader: leader, + }, nil +} + +func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + region, leader, err := c.client.GetRegionByID(ctx, regionID) + if err != nil { + return nil, err + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region, + Leader: leader, + }, nil +} + +func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.New("region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + defer conn.Close() + + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKey: key, + }) + if err != nil { + return nil, err + } + if resp.RegionError != nil { + return nil, errors.Errorf("split region failed: region=%v, key=%x, err=%v", regionInfo.Region, key, resp.RegionError) + } + + // BUG: Left is deprecated, it may be nil even if split is succeed! + // Assume the new region is the left one. + newRegion := resp.GetLeft() + if newRegion == nil { + regions := resp.GetRegions() + for _, r := range regions { + if bytes.Equal(r.GetStartKey(), regionInfo.Region.GetStartKey()) { + newRegion = r + break + } + } + } + if newRegion == nil { + return nil, errors.New("split region failed: new region is nil") + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range newRegion.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + return &RegionInfo{ + Region: newRegion, + Leader: leader, + }, nil +} + +func (c *pdClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.New("region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + opt := grpc.WithInsecure() + if c.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) + if err != nil { + return nil, err + } + defer conn.Close() + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKeys: keys, + }) + if err != nil { + return nil, err + } + if resp.RegionError != nil { + return nil, errors.Errorf("split region failed: region=%v, err=%v", regionInfo.Region, resp.RegionError) + } + + regions := resp.GetRegions() + newRegionInfos := make([]*RegionInfo, 0, len(regions)) + for _, region := range regions { + // Skip the original region + if region.GetId() == regionInfo.Region.GetId() { + continue + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range region.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + newRegionInfos = append(newRegionInfos, &RegionInfo{ + Region: region, + Leader: leader, + }) + } + return newRegionInfos, nil +} + +func (c *pdClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + return c.client.ScatterRegion(ctx, regionInfo.Region.GetId()) +} + +func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return c.client.GetOperator(ctx, regionID) +} + +func (c *pdClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + regions, leaders, err := c.client.ScanRegions(ctx, key, endKey, limit) + if err != nil { + return nil, err + } + regionInfos := make([]*RegionInfo, 0, len(regions)) + for i := range regions { + regionInfos = append(regionInfos, &RegionInfo{ + Region: regions[i], + Leader: leaders[i], + }) + } + return regionInfos, nil +} + +func (c *pdClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) { + var rule placement.Rule + addr := c.getPDAPIAddr() + if addr == "" { + return rule, errors.New("failed to add stores labels: no leader") + } + req, _ := http.NewRequestWithContext(ctx, "GET", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + res, err := http.DefaultClient.Do(req) + if err != nil { + return rule, errors.WithStack(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return rule, errors.WithStack(err) + } + res.Body.Close() + err = json.Unmarshal(b, &rule) + if err != nil { + return rule, errors.WithStack(err) + } + return rule, nil +} + +func (c *pdClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + m, _ := json.Marshal(rule) + req, _ := http.NewRequestWithContext(ctx, "POST", addr+path.Join("/pd/api/v1/config/rule"), bytes.NewReader(m)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + req, _ := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) SetStoresLabel( + ctx context.Context, stores []uint64, labelKey, labelValue string, +) error { + b := []byte(fmt.Sprintf(`{"%s": "%s"}`, labelKey, labelValue)) + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + for _, id := range stores { + req, _ := http.NewRequestWithContext( + ctx, "POST", + addr+path.Join("/pd/api/v1/store", strconv.FormatUint(id, 10), "label"), + bytes.NewReader(b), + ) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + err = res.Body.Close() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (c *pdClient) getPDAPIAddr() string { + addr := c.client.GetLeaderAddr() + if addr != "" && !strings.HasPrefix(addr, "http") { + addr = "http://" + addr + } + return strings.TrimRight(addr, "/") +} diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go new file mode 100644 index 000000000..06dab1cf1 --- /dev/null +++ b/pkg/restore/split_test.go @@ -0,0 +1,332 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package restore + +import ( + "bytes" + "context" + "sync" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/pd/v4/server/core" + "github.com/pingcap/pd/v4/server/schedule/placement" + "github.com/pingcap/tidb/util/codec" + + "github.com/pingcap/br/pkg/rtree" +) + +type testClient struct { + mu sync.RWMutex + stores map[uint64]*metapb.Store + regions map[uint64]*RegionInfo + regionsInfo *core.RegionsInfo // For now it's only used in ScanRegions + nextRegionID uint64 +} + +func newTestClient(stores map[uint64]*metapb.Store, regions map[uint64]*RegionInfo, nextRegionID uint64) *testClient { + regionsInfo := core.NewRegionsInfo() + for _, regionInfo := range regions { + regionsInfo.AddRegion(core.NewRegionInfo(regionInfo.Region, regionInfo.Leader)) + } + return &testClient{ + stores: stores, + regions: regions, + regionsInfo: regionsInfo, + nextRegionID: nextRegionID, + } +} + +func (c *testClient) GetAllRegions() map[uint64]*RegionInfo { + c.mu.RLock() + defer c.mu.RUnlock() + return c.regions +} + +func (c *testClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.RLock() + defer c.mu.RUnlock() + store, ok := c.stores[storeID] + if !ok { + return nil, errors.Errorf("store not found") + } + return store, nil +} + +func (c *testClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + for _, region := range c.regions { + if bytes.Compare(key, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(key, region.Region.EndKey) < 0) { + return region, nil + } + } + return nil, errors.Errorf("region not found: key=%s", string(key)) +} + +func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + region, ok := c.regions[regionID] + if !ok { + return nil, errors.Errorf("region not found: id=%d", regionID) + } + return region, nil +} + +func (c *testClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.regions { + if bytes.Compare(splitKey, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(splitKey, region.Region.EndKey) < 0) { + target = region + } + } + if target == nil { + return nil, errors.Errorf("region not found: key=%s", string(key)) + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.regions[target.Region.Id] = target + return newRegion, nil +} + +func (c *testClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + newRegions := make([]*RegionInfo, 0) + for _, key := range keys { + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.regions { + if bytes.Compare(splitKey, region.Region.GetStartKey()) > 0 && + beforeEnd(splitKey, region.Region.GetEndKey()) { + target = region + } + } + if target == nil { + continue + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.regions[target.Region.Id] = target + newRegions = append(newRegions, newRegion) + } + return newRegions, nil +} + +func (c *testClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + return nil +} + +func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return &pdpb.GetOperatorResponse{ + Header: new(pdpb.ResponseHeader), + }, nil +} + +func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + infos := c.regionsInfo.ScanRange(key, endKey, limit) + regions := make([]*RegionInfo, 0, len(infos)) + for _, info := range infos { + regions = append(regions, &RegionInfo{ + Region: info.GetMeta(), + Leader: info.GetLeader(), + }) + } + return regions, nil +} + +func (c *testClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (r placement.Rule, err error) { + return +} + +func (c *testClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + return nil +} + +func (c *testClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + return nil +} + +func (c *testClient) SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error { + return nil +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +// rewrite rules: aa -> xx, cc -> bb +// expected regions after split: +// [, aay), [aay, bb), [bb, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xx), [xx, xxe), [xxe, xxz), [xxz, ) +func (s *testRestoreUtilSuite) TestSplit(c *C) { + client := initTestClient() + ranges := initRanges() + rewriteRules := initRewriteRules() + regionSplitter := NewRegionSplitter(client) + + ctx := context.Background() + err := regionSplitter.Split(ctx, ranges, rewriteRules, map[uint64]bool{}, func(key [][]byte) {}) + if err != nil { + c.Assert(err, IsNil, Commentf("split regions failed: %v", err)) + } + regions := client.GetAllRegions() + if !validateRegions(regions) { + for _, region := range regions { + c.Logf("region: %v\n", region.Region) + } + c.Log("get wrong result") + c.Fail() + } +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +func initTestClient() *testClient { + peers := make([]*metapb.Peer, 1) + peers[0] = &metapb.Peer{ + Id: 1, + StoreId: 1, + } + keys := [6]string{"", "aay", "bba", "bbh", "cca", ""} + regions := make(map[uint64]*RegionInfo) + for i := uint64(1); i < 6; i++ { + startKey := []byte(keys[i-1]) + if len(startKey) != 0 { + startKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey := []byte(keys[i]) + if len(endKey) != 0 { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + regions[i] = &RegionInfo{ + Region: &metapb.Region{ + Id: i, + Peers: peers, + StartKey: startKey, + EndKey: endKey, + }, + } + } + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } + return newTestClient(stores, regions, 6) +} + +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +func initRanges() []rtree.Range { + var ranges [4]rtree.Range + ranges[0] = rtree.Range{ + StartKey: []byte("aaa"), + EndKey: []byte("aae"), + } + ranges[1] = rtree.Range{ + StartKey: []byte("aae"), + EndKey: []byte("aaz"), + } + ranges[2] = rtree.Range{ + StartKey: []byte("ccd"), + EndKey: []byte("ccf"), + } + ranges[3] = rtree.Range{ + StartKey: []byte("ccf"), + EndKey: []byte("ccj"), + } + return ranges[:] +} + +func initRewriteRules() *RewriteRules { + var rules [2]*import_sstpb.RewriteRule + rules[0] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("aa"), + NewKeyPrefix: []byte("xx"), + } + rules[1] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("cc"), + NewKeyPrefix: []byte("bb"), + } + return &RewriteRules{ + Table: rules[:], + Data: rules[:], + } +} + +// expected regions after split: +// [, aay), [aay, bb), [bb, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xx), [xx, xxe), [xxe, xxz), [xxz, ) +func validateRegions(regions map[uint64]*RegionInfo) bool { + keys := [12]string{"", "aay", "bb", "bba", "bbf", "bbh", "bbj", "cca", "xx", "xxe", "xxz", ""} + if len(regions) != 11 { + return false + } +FindRegion: + for i := 1; i < len(keys); i++ { + for _, region := range regions { + startKey := []byte(keys[i-1]) + if len(startKey) != 0 { + startKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey := []byte(keys[i]) + if len(endKey) != 0 { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + if bytes.Equal(region.Region.GetStartKey(), startKey) && + bytes.Equal(region.Region.GetEndKey(), endKey) { + continue FindRegion + } + } + return false + } + return true +} + +func (s *testRestoreUtilSuite) TestNeedSplit(c *C) { + regions := []*RegionInfo{ + { + Region: &metapb.Region{ + StartKey: codec.EncodeBytes([]byte{}, []byte("b")), + EndKey: codec.EncodeBytes([]byte{}, []byte("d")), + }, + }, + } + // Out of region + c.Assert(needSplit([]byte("a"), regions), IsNil) + // Region start key + c.Assert(needSplit([]byte("b"), regions), IsNil) + // In region + region := needSplit([]byte("c"), regions) + c.Assert(bytes.Compare(region.Region.GetStartKey(), codec.EncodeBytes([]byte{}, []byte("b"))), Equals, 0) + c.Assert(bytes.Compare(region.Region.GetEndKey(), codec.EncodeBytes([]byte{}, []byte("d"))), Equals, 0) + // Region end key + c.Assert(needSplit([]byte("d"), regions), IsNil) + // Out of region + c.Assert(needSplit([]byte("e"), regions), IsNil) +} diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 126e864fd..c49c07994 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -1,8 +1,11 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( "bytes" "context" + "encoding/hex" "strings" "time" @@ -13,35 +16,18 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" + "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" - "go.uber.org/zap/zapcore" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" ) var recordPrefixSep = []byte("_r") -type files []*backup.File - -func (fs files) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range fs { - arr.AppendString(fs[i].String()) - } - return nil -} - -type rules []*import_sstpb.RewriteRule - -func (rs rules) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range rs { - arr.AppendString(rs[i].String()) - } - return nil -} - // idAllocator always returns a specified ID type idAllocator struct { id int64 @@ -51,10 +37,15 @@ func newIDAllocator(id int64) *idAllocator { return &idAllocator{id: id} } -func (alloc *idAllocator) Alloc(tableID int64, n uint64) (min int64, max int64, err error) { +func (alloc *idAllocator) Alloc(tableID int64, n uint64, increment, offset int64) (min int64, max int64, err error) { return alloc.id, alloc.id, nil } +func (alloc *idAllocator) AllocSeqCache(sequenceID int64) (min int64, max int64, round int64, err error) { + // TODO fix this function after support backup sequence + return 0, 0, 0, nil +} + func (alloc *idAllocator) Rebase(tableID, newBase int64, allocIDs bool) error { return nil } @@ -71,12 +62,16 @@ func (alloc *idAllocator) NextGlobalAutoID(tableID int64) (int64, error) { return alloc.id, nil } +func (alloc *idAllocator) GetType() autoid.AllocatorType { + return autoid.RowIDAllocType +} + // GetRewriteRules returns the rewrite rule of the new table and the old table. func GetRewriteRules( newTable *model.TableInfo, oldTable *model.TableInfo, newTimeStamp uint64, -) *restore_util.RewriteRules { +) *RewriteRules { tableIDs := make(map[int64]int64) tableIDs[oldTable.ID] = newTable.ID if oldTable.Partition != nil { @@ -119,7 +114,7 @@ func GetRewriteRules( } } - return &restore_util.RewriteRules{ + return &RewriteRules{ Table: tableRules, Data: dataRules, } @@ -159,46 +154,17 @@ func getSSTMetaFromFile( Start: rangeStart, End: rangeEnd, }, + RegionId: region.GetId(), + RegionEpoch: region.GetRegionEpoch(), } } -type retryableFunc func() error -type continueFunc func(error) bool - -func withRetry( - retryableFunc retryableFunc, - continueFunc continueFunc, - attempts uint, - delayTime time.Duration, - maxDelayTime time.Duration, -) error { - var lastErr error - for i := uint(0); i < attempts; i++ { - err := retryableFunc() - if err != nil { - lastErr = err - // If this is the last attempt, do not wait - if !continueFunc(err) || i == attempts-1 { - break - } - delayTime = 2 * delayTime - if delayTime > maxDelayTime { - delayTime = maxDelayTime - } - time.Sleep(delayTime) - } else { - return nil - } - } - return lastErr -} - // ValidateFileRanges checks and returns the ranges of the files. func ValidateFileRanges( files []*backup.File, - rewriteRules *restore_util.RewriteRules, -) ([]restore_util.Range, error) { - ranges := make([]restore_util.Range, 0, len(files)) + rewriteRules *RewriteRules, +) ([]rtree.Range, error) { + ranges := make([]rtree.Range, 0, len(files)) fileAppended := make(map[string]bool) for _, file := range files { @@ -217,7 +183,7 @@ func ValidateFileRanges( zap.Stringer("file", file)) return nil, errors.New("table ids dont match") } - ranges = append(ranges, restore_util.Range{ + ranges = append(ranges, rtree.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }) @@ -227,8 +193,41 @@ func ValidateFileRanges( return ranges, nil } +// AttachFilesToRanges attach files to ranges. +// Panic if range is overlapped or no range for files. +func AttachFilesToRanges( + files []*backup.File, + ranges []rtree.Range, +) []rtree.Range { + rangeTree := rtree.NewRangeTree() + for _, rg := range ranges { + rangeTree.Update(rg) + } + for _, f := range files { + + rg := rangeTree.Find(&rtree.Range{ + StartKey: f.GetStartKey(), + EndKey: f.GetEndKey(), + }) + if rg == nil { + log.Fatal("range not found", + zap.Binary("startKey", f.GetStartKey()), + zap.Binary("endKey", f.GetEndKey())) + } + file := *f + rg.Files = append(rg.Files, &file) + } + if rangeTree.Len() != len(ranges) { + log.Fatal("ranges overlapped", + zap.Int("ranges length", len(ranges)), + zap.Int("tree length", rangeTree.Len())) + } + sortedRanges := rangeTree.GetSortedRanges() + return sortedRanges +} + // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file -func ValidateFileRewriteRule(file *backup.File, rewriteRules *restore_util.RewriteRules) error { +func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error { // Check if the start key has a matched rewrite key _, startRule := rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && startRule == nil { @@ -269,7 +268,7 @@ func ValidateFileRewriteRule(file *backup.File, rewriteRules *restore_util.Rewri } // Rewrites a raw key and returns a encoded key -func rewriteRawKey(key []byte, rewriteRules *restore_util.RewriteRules) ([]byte, *import_sstpb.RewriteRule) { +func rewriteRawKey(key []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) { if rewriteRules == nil { return codec.EncodeBytes([]byte{}, key), nil } @@ -281,7 +280,7 @@ func rewriteRawKey(key []byte, rewriteRules *restore_util.RewriteRules) ([]byte, return nil, nil } -func matchOldPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import_sstpb.RewriteRule { +func matchOldPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.RewriteRule { for _, rule := range rewriteRules.Data { if bytes.HasPrefix(key, rule.GetOldKeyPrefix()) { return rule @@ -295,7 +294,7 @@ func matchOldPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import return nil } -func matchNewPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import_sstpb.RewriteRule { +func matchNewPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.RewriteRule { for _, rule := range rewriteRules.Data { if bytes.HasPrefix(key, rule.GetNewKeyPrefix()) { return rule @@ -319,8 +318,8 @@ func truncateTS(key []byte) []byte { func SplitRanges( ctx context.Context, client *Client, - ranges []restore_util.Range, - rewriteRules *restore_util.RewriteRules, + ranges []rtree.Range, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) error { start := time.Now() @@ -328,21 +327,34 @@ func SplitRanges( elapsed := time.Since(start) summary.CollectDuration("split region", elapsed) }() - splitter := restore_util.NewRegionSplitter(restore_util.NewClient(client.GetPDClient())) - return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { + splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) + tiflashStores, err := conn.GetAllTiKVStores(ctx, client.GetPDClient(), conn.TiFlashOnly) + if err != nil { + return errors.Trace(err) + } + storeMap := make(map[uint64]bool) + for _, store := range tiflashStores { + storeMap[store.GetId()] = true + } + + return splitter.Split(ctx, ranges, rewriteRules, storeMap, func(keys [][]byte) { for range keys { updateCh <- struct{}{} } }) } -func rewriteFileKeys(file *backup.File, rewriteRules *restore_util.RewriteRules) (startKey, endKey []byte, err error) { +func rewriteFileKeys(file *backup.File, rewriteRules *RewriteRules) (startKey, endKey []byte, err error) { startID := tablecodec.DecodeTableID(file.GetStartKey()) endID := tablecodec.DecodeTableID(file.GetEndKey()) var rule *import_sstpb.RewriteRule if startID == endID { startKey, rule = rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && rule == nil { + log.Error("cannot find rewrite rule", + zap.Binary("startKey", file.GetStartKey()), + zap.Reflect("rewrite table", rewriteRules.Table), + zap.Reflect("rewrite data", rewriteRules.Data)) err = errors.New("cannot find rewrite rule for start key") return } @@ -369,11 +381,34 @@ func encodeKeyPrefix(key []byte) []byte { return append(encodedPrefix[:len(encodedPrefix)-9], key[len(key)-ungroupedLen:]...) } -// escape the identifier for pretty-printing. -// For instance, the identifier "foo `bar`" will become "`foo ``bar```". -// The sqlMode controls whether to escape with backquotes (`) or double quotes -// (`"`) depending on whether mysql.ModeANSIQuotes is enabled. -func escapeTableName(cis model.CIStr) string { - quote := "`" - return quote + strings.Replace(cis.O, quote, quote+quote, -1) + quote +// paginateScanRegion scan regions with a limit pagination and +// return all regions at once. +// It reduces max gRPC message size. +func paginateScanRegion( + ctx context.Context, client SplitClient, startKey, endKey []byte, limit int, +) ([]*RegionInfo, error) { + if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 { + return nil, errors.Errorf("startKey >= endKey, startKey %s, endkey %s", + hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + } + + regions := []*RegionInfo{} + for { + batch, err := client.ScanRegions(ctx, startKey, endKey, limit) + if err != nil { + return nil, errors.Trace(err) + } + regions = append(regions, batch...) + if len(batch) < limit { + // No more region + break + } + startKey = batch[len(batch)-1].Region.GetEndKey() + if len(startKey) == 0 || + (len(endKey) > 0 && bytes.Compare(startKey, endKey) >= 0) { + // All key space have scanned + break + } + } + return regions, nil } diff --git a/pkg/restore/util_test.go b/pkg/restore/util_test.go index 5da5c9ab7..d1a738fdb 100644 --- a/pkg/restore/util_test.go +++ b/pkg/restore/util_test.go @@ -1,12 +1,17 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( + "context" + "encoding/binary" + . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" ) var _ = Suite(&testRestoreUtilSuite{}) @@ -34,7 +39,7 @@ func (s *testRestoreUtilSuite) TestGetSSTMetaFromFile(c *C) { } func (s *testRestoreUtilSuite) TestValidateFileRanges(c *C) { - rules := &restore_util.RewriteRules{ + rules := &RewriteRules{ Table: []*import_sstpb.RewriteRule{&import_sstpb.RewriteRule{ OldKeyPrefix: []byte(tablecodec.EncodeTablePrefix(1)), NewKeyPrefix: []byte(tablecodec.EncodeTablePrefix(2)), @@ -104,3 +109,105 @@ func (s *testRestoreUtilSuite) TestValidateFileRanges(c *C) { ) c.Assert(err, ErrorMatches, "unexpected rewrite rules") } + +func (s *testRestoreUtilSuite) TestPaginateScanRegion(c *C) { + peers := make([]*metapb.Peer, 1) + peers[0] = &metapb.Peer{ + Id: 1, + StoreId: 1, + } + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } + + makeRegions := func(num uint64) (map[uint64]*RegionInfo, []*RegionInfo) { + regionsMap := make(map[uint64]*RegionInfo, num) + regions := make([]*RegionInfo, 0, num) + endKey := make([]byte, 8) + for i := uint64(0); i < num-1; i++ { + ri := &RegionInfo{ + Region: &metapb.Region{ + Id: i + 1, + Peers: peers, + }, + } + + if i != 0 { + startKey := make([]byte, 8) + binary.BigEndian.PutUint64(startKey, i) + ri.Region.StartKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey = make([]byte, 8) + binary.BigEndian.PutUint64(endKey, i+1) + ri.Region.EndKey = codec.EncodeBytes([]byte{}, endKey) + + regionsMap[i] = ri + regions = append(regions, ri) + } + + if num == 1 { + endKey = []byte{} + } else { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + ri := &RegionInfo{ + Region: &metapb.Region{ + Id: num, + Peers: peers, + StartKey: endKey, + EndKey: []byte{}, + }, + } + regionsMap[num] = ri + regions = append(regions, ri) + + return regionsMap, regions + } + + ctx := context.Background() + regionMap := make(map[uint64]*RegionInfo) + regions := []*RegionInfo{} + batch, err := paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(1) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(2) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(3) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(8) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(8) + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), regions[1].Region.StartKey, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[1:]) + + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), []byte{}, regions[6].Region.EndKey, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[:7]) + + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), regions[1].Region.StartKey, regions[1].Region.EndKey, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[1:2]) + + _, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{2}, []byte{1}, 3) + c.Assert(err, ErrorMatches, "startKey >= endKey.*") +} diff --git a/pkg/backup/range_tree.go b/pkg/rtree/rtree.go similarity index 61% rename from pkg/backup/range_tree.go rename to pkg/rtree/rtree.go index 4d4b3c695..08b757af5 100644 --- a/pkg/backup/range_tree.go +++ b/pkg/rtree/rtree.go @@ -1,8 +1,10 @@ -package backup +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package rtree import ( "bytes" - "encoding/hex" + "fmt" "github.com/google/btree" "github.com/pingcap/kvproto/pkg/backup" @@ -15,10 +17,15 @@ type Range struct { StartKey []byte EndKey []byte Files []*backup.File - Error *backup.Error } -func (rg *Range) intersect( +// String formats a range to a string +func (rg *Range) String() string { + return fmt.Sprintf("[%x %x]", rg.StartKey, rg.EndKey) +} + +// Intersect returns +func (rg *Range) Intersect( start, end []byte, ) (subStart, subEnd []byte, isIntersect bool) { // empty mean the max end key @@ -49,8 +56,8 @@ func (rg *Range) intersect( return } -// contains check if the range contains the given key, [start, end) -func (rg *Range) contains(key []byte) bool { +// Contains check if the range contains the given key, [start, end) +func (rg *Range) Contains(key []byte) bool { start, end := rg.StartKey, rg.EndKey return bytes.Compare(key, start) >= 0 && (len(end) == 0 || bytes.Compare(key, end) < 0) @@ -65,31 +72,29 @@ func (rg *Range) Less(than btree.Item) bool { var _ btree.Item = &Range{} -// RangeTree is the result of a backup task +// RangeTree is sorted tree for Ranges. +// All the ranges it stored do not overlap. type RangeTree struct { - tree *btree.BTree + *btree.BTree } -func newRangeTree() RangeTree { +// NewRangeTree returns an empty range tree. +func NewRangeTree() RangeTree { return RangeTree{ - tree: btree.New(32), + BTree: btree.New(32), } } -func (rangeTree *RangeTree) len() int { - return rangeTree.tree.Len() -} - -// find is a helper function to find an item that contains the range start +// Find is a helper function to find an item that contains the range start // key. -func (rangeTree *RangeTree) find(rg *Range) *Range { +func (rangeTree *RangeTree) Find(rg *Range) *Range { var ret *Range - rangeTree.tree.DescendLessOrEqual(rg, func(i btree.Item) bool { + rangeTree.DescendLessOrEqual(rg, func(i btree.Item) bool { ret = i.(*Range) return false }) - if ret == nil || !ret.contains(rg.StartKey) { + if ret == nil || !ret.Contains(rg.StartKey) { return nil } @@ -104,13 +109,13 @@ func (rangeTree *RangeTree) getOverlaps(rg *Range) []*Range { // find() will return Range of range_a // and both startKey of range_a and range_b are less than endKey of range_d, // thus they are regarded as overlapped ranges. - found := rangeTree.find(rg) + found := rangeTree.Find(rg) if found == nil { found = rg } var overlaps []*Range - rangeTree.tree.AscendGreaterOrEqual(found, func(i btree.Item) bool { + rangeTree.AscendGreaterOrEqual(found, func(i btree.Item) bool { over := i.(*Range) if len(rg.EndKey) > 0 && bytes.Compare(rg.EndKey, over.StartKey) <= 0 { return false @@ -121,31 +126,57 @@ func (rangeTree *RangeTree) getOverlaps(rg *Range) []*Range { return overlaps } -func (rangeTree *RangeTree) update(rg *Range) { - overlaps := rangeTree.getOverlaps(rg) +// Update inserts range into tree and delete overlapping ranges. +func (rangeTree *RangeTree) Update(rg Range) { + overlaps := rangeTree.getOverlaps(&rg) // Range has backuped, overwrite overlapping range. for _, item := range overlaps { log.Info("delete overlapping range", zap.Binary("StartKey", item.StartKey), zap.Binary("EndKey", item.EndKey), ) - rangeTree.tree.Delete(item) + rangeTree.Delete(item) } - rangeTree.tree.ReplaceOrInsert(rg) + rangeTree.ReplaceOrInsert(&rg) } -func (rangeTree *RangeTree) put( +// Put forms a range and inserts it into tree. +func (rangeTree *RangeTree) Put( startKey, endKey []byte, files []*backup.File, ) { - rg := &Range{ + rg := Range{ StartKey: startKey, EndKey: endKey, Files: files, } - rangeTree.update(rg) + rangeTree.Update(rg) +} + +// InsertRange inserts ranges into the range tree. +// It returns a non-nil range if there are soe overlapped ranges. +func (rangeTree *RangeTree) InsertRange(rg Range) *Range { + out := rangeTree.ReplaceOrInsert(&rg) + if out == nil { + return nil + } + return out.(*Range) +} + +// GetSortedRanges collects and returns sorted ranges. +func (rangeTree *RangeTree) GetSortedRanges() []Range { + sortedRanges := make([]Range, 0, rangeTree.Len()) + rangeTree.Ascend(func(rg btree.Item) bool { + if rg == nil { + return false + } + sortedRanges = append(sortedRanges, *rg.(*Range)) + return true + }) + return sortedRanges } -func (rangeTree *RangeTree) getIncompleteRange( +// GetIncompleteRange returns missing range covered by startKey and endKey. +func (rangeTree *RangeTree) GetIncompleteRange( startKey, endKey []byte, ) []Range { if len(startKey) != 0 && bytes.Equal(startKey, endKey) { @@ -155,14 +186,14 @@ func (rangeTree *RangeTree) getIncompleteRange( requsetRange := Range{StartKey: startKey, EndKey: endKey} lastEndKey := startKey pviot := &Range{StartKey: startKey} - if first := rangeTree.find(pviot); first != nil { + if first := rangeTree.Find(pviot); first != nil { pviot.StartKey = first.StartKey } - rangeTree.tree.AscendGreaterOrEqual(pviot, func(i btree.Item) bool { + rangeTree.AscendGreaterOrEqual(pviot, func(i btree.Item) bool { rg := i.(*Range) if bytes.Compare(lastEndKey, rg.StartKey) < 0 { start, end, isIntersect := - requsetRange.intersect(lastEndKey, rg.StartKey) + requsetRange.Intersect(lastEndKey, rg.StartKey) if isIntersect { // There is a gap between the last item and the current item. incomplete = @@ -176,7 +207,7 @@ func (rangeTree *RangeTree) getIncompleteRange( // Check whether we need append the last range if !bytes.Equal(lastEndKey, endKey) && len(lastEndKey) != 0 && (len(endKey) == 0 || bytes.Compare(lastEndKey, endKey) < 0) { - start, end, isIntersect := requsetRange.intersect(lastEndKey, endKey) + start, end, isIntersect := requsetRange.Intersect(lastEndKey, endKey) if isIntersect { incomplete = append(incomplete, Range{StartKey: start, EndKey: end}) @@ -184,24 +215,3 @@ func (rangeTree *RangeTree) getIncompleteRange( } return incomplete } - -func (rangeTree *RangeTree) checkDupFiles() { - // Name -> SHA256 - files := make(map[string][]byte) - rangeTree.tree.Ascend(func(i btree.Item) bool { - rg := i.(*Range) - for _, f := range rg.Files { - old, ok := files[f.Name] - if ok { - log.Error("dup file", - zap.String("Name", f.Name), - zap.String("SHA256_1", hex.EncodeToString(old)), - zap.String("SHA256_2", hex.EncodeToString(f.Sha256)), - ) - } else { - files[f.Name] = f.Sha256 - } - } - return true - }) -} diff --git a/pkg/backup/range_tree_test.go b/pkg/rtree/rtree_test.go similarity index 64% rename from pkg/backup/range_tree_test.go rename to pkg/rtree/rtree_test.go index a7c2d1cd1..d3e151e25 100644 --- a/pkg/backup/range_tree_test.go +++ b/pkg/rtree/rtree_test.go @@ -1,17 +1,6 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package backup +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package rtree import ( "fmt" @@ -31,63 +20,19 @@ func newRange(start, end []byte) *Range { } } -func (s *testRangeTreeSuite) TestRangeIntersect(c *C) { - rg := newRange([]byte("a"), []byte("c")) - - start, end, isIntersect := rg.intersect([]byte(""), []byte("")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("c")) - - start, end, isIntersect = rg.intersect([]byte(""), []byte("a")) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) - - start, end, isIntersect = rg.intersect([]byte(""), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("a"), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("aa"), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("aa")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("b"), []byte("c")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("b")) - c.Assert(end, DeepEquals, []byte("c")) - - start, end, isIntersect = rg.intersect([]byte(""), []byte{1}) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) - - start, end, isIntersect = rg.intersect([]byte("c"), []byte("")) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) -} - func (s *testRangeTreeSuite) TestRangeTree(c *C) { - rangeTree := newRangeTree() - c.Assert(rangeTree.tree.Get(newRange([]byte(""), []byte(""))), IsNil) + rangeTree := NewRangeTree() + c.Assert(rangeTree.Get(newRange([]byte(""), []byte(""))), IsNil) search := func(key []byte) *Range { - rg := rangeTree.tree.Get(newRange(key, []byte(""))) + rg := rangeTree.Get(newRange(key, []byte(""))) if rg == nil { return nil } return rg.(*Range) } assertIncomplete := func(startKey, endKey []byte, ranges []Range) { - incomplete := rangeTree.getIncompleteRange(startKey, endKey) + incomplete := rangeTree.GetIncompleteRange(startKey, endKey) c.Logf("%#v %#v\n%#v\n%#v\n", startKey, endKey, incomplete, ranges) c.Assert(len(incomplete), Equals, len(ranges)) for idx, rg := range incomplete { @@ -111,8 +56,8 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { rangeC := newRange([]byte("c"), []byte("d")) rangeD := newRange([]byte("d"), []byte("")) - rangeTree.update(rangeA) - c.Assert(rangeTree.len(), Equals, 1) + rangeTree.Update(*rangeA) + c.Assert(rangeTree.Len(), Equals, 1) assertIncomplete([]byte("a"), []byte("b"), []Range{}) assertIncomplete([]byte(""), []byte(""), []Range{ @@ -120,8 +65,8 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { {StartKey: []byte("b"), EndKey: []byte("")}, }) - rangeTree.update(rangeC) - c.Assert(rangeTree.len(), Equals, 2) + rangeTree.Update(*rangeC) + c.Assert(rangeTree.Len(), Equals, 2) assertIncomplete([]byte("a"), []byte("c"), []Range{ {StartKey: []byte("b"), EndKey: []byte("c")}, }) @@ -136,55 +81,99 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { }) c.Assert(search([]byte{}), IsNil) - c.Assert(search([]byte("a")), Equals, rangeA) + c.Assert(search([]byte("a")), DeepEquals, rangeA) c.Assert(search([]byte("b")), IsNil) - c.Assert(search([]byte("c")), Equals, rangeC) + c.Assert(search([]byte("c")), DeepEquals, rangeC) c.Assert(search([]byte("d")), IsNil) - rangeTree.update(rangeB) - c.Assert(rangeTree.len(), Equals, 3) - c.Assert(search([]byte("b")), Equals, rangeB) + rangeTree.Update(*rangeB) + c.Assert(rangeTree.Len(), Equals, 3) + c.Assert(search([]byte("b")), DeepEquals, rangeB) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte(""), EndKey: []byte("a")}, {StartKey: []byte("d"), EndKey: []byte("")}, }) - rangeTree.update(rangeD) - c.Assert(rangeTree.len(), Equals, 4) - c.Assert(search([]byte("d")), Equals, rangeD) + rangeTree.Update(*rangeD) + c.Assert(rangeTree.Len(), Equals, 4) + c.Assert(search([]byte("d")), DeepEquals, rangeD) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte(""), EndKey: []byte("a")}, }) // None incomplete for any range after insert range 0 - rangeTree.update(range0) - c.Assert(rangeTree.len(), Equals, 5) + rangeTree.Update(*range0) + c.Assert(rangeTree.Len(), Equals, 5) // Overwrite range B and C. rangeBD := newRange([]byte("b"), []byte("d")) - rangeTree.update(rangeBD) - c.Assert(rangeTree.len(), Equals, 4) + rangeTree.Update(*rangeBD) + c.Assert(rangeTree.Len(), Equals, 4) assertAllComplete() // Overwrite range BD, c-d should be empty - rangeTree.update(rangeB) - c.Assert(rangeTree.len(), Equals, 4) + rangeTree.Update(*rangeB) + c.Assert(rangeTree.Len(), Equals, 4) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte("c"), EndKey: []byte("d")}, }) - rangeTree.update(rangeC) - c.Assert(rangeTree.len(), Equals, 5) + rangeTree.Update(*rangeC) + c.Assert(rangeTree.Len(), Equals, 5) assertAllComplete() } +func (s *testRangeTreeSuite) TestRangeIntersect(c *C) { + rg := newRange([]byte("a"), []byte("c")) + + start, end, isIntersect := rg.Intersect([]byte(""), []byte("")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("c")) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte("a")) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("a"), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("aa"), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("aa")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("b"), []byte("c")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("b")) + c.Assert(end, DeepEquals, []byte("c")) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte{1}) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) + + start, end, isIntersect = rg.Intersect([]byte("c"), []byte("")) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) +} + func BenchmarkRangeTreeUpdate(b *testing.B) { - rangeTree := newRangeTree() + rangeTree := NewRangeTree() for i := 0; i < b.N; i++ { - item := &Range{ + item := Range{ StartKey: []byte(fmt.Sprintf("%20d", i)), EndKey: []byte(fmt.Sprintf("%20d", i+1))} - rangeTree.update(item) + rangeTree.Update(item) } } diff --git a/pkg/storage/flags.go b/pkg/storage/flags.go index 51fd98af1..c828f57a1 100644 --- a/pkg/storage/flags.go +++ b/pkg/storage/flags.go @@ -1,55 +1,21 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" "github.com/spf13/pflag" ) -const ( - // flagSendCredentialOption specify whether to send credentials to tikv - flagSendCredentialOption = "send-credentials-to-tikv" -) - -var ( - sendCredential bool -) - // DefineFlags adds flags to the flag set corresponding to all backend options. func DefineFlags(flags *pflag.FlagSet) { - flags.BoolP(flagSendCredentialOption, "c", true, - "Whether send credentials to tikv") defineS3Flags(flags) defineGCSFlags(flags) } -// GetBackendOptionsFromFlags obtains the backend options from the flag set. -func GetBackendOptionsFromFlags(flags *pflag.FlagSet) (options BackendOptions, err error) { - sendCredential, err = flags.GetBool(flagSendCredentialOption) - if err != nil { - err = errors.Trace(err) - return - } - - if options.S3, err = getBackendOptionsFromS3Flags(flags); err != nil { - return - } - if options.GCS, err = getBackendOptionsFromGCSFlags(flags); err != nil { - return - } - return -} - -// ParseBackendFromFlags is a convenient function to consecutively call -// GetBackendOptionsFromFlags and ParseBackend. -func ParseBackendFromFlags(flags *pflag.FlagSet, storageFlag string) (*backup.StorageBackend, error) { - u, err := flags.GetString(storageFlag) - if err != nil { - return nil, errors.Trace(err) - } - opts, err := GetBackendOptionsFromFlags(flags) - if err != nil { - return nil, err +// ParseFromFlags obtains the backend options from the flag set. +func (options *BackendOptions) ParseFromFlags(flags *pflag.FlagSet) error { + if err := options.S3.parseFromFlags(flags); err != nil { + return err } - return ParseBackend(u, &opts) + return options.GCS.parseFromFlags(flags) } diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index a0df5b03e..4af3ea059 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -46,55 +48,35 @@ func (options *GCSBackendOptions) apply(gcs *backup.GCS) error { } func defineGCSFlags(flags *pflag.FlagSet) { - flags.String(gcsEndpointOption, "", "Set the GCS endpoint URL") - flags.String(gcsStorageClassOption, "", - `Specify the GCS storage class for objects. -If it is not set, objects uploaded are -followed by the default storage class of the bucket. -See https://cloud.google.com/storage/docs/storage-classes -for valid values.`) - flags.String(gcsPredefinedACL, "", - `Specify the GCS predefined acl for objects. -If it is not set, objects uploaded are -followed by the acl of bucket scope. -See https://cloud.google.com/storage/docs/access-control/lists#predefined-acl -for valid values.`) - flags.String(gcsCredentialsFile, "", - `Set the GCS credentials file path. -You can get one from -https://console.cloud.google.com/apis/credentials.`) - - _ = flags.MarkHidden(gcsEndpointOption) - _ = flags.MarkHidden(gcsStorageClassOption) - _ = flags.MarkHidden(gcsPredefinedACL) - _ = flags.MarkHidden(gcsCredentialsFile) + // TODO: remove experimental tag if it's stable + flags.String(gcsEndpointOption, "", "(experimental) Set the GCS endpoint URL") + flags.String(gcsStorageClassOption, "", "(experimental) Specify the GCS storage class for objects") + flags.String(gcsPredefinedACL, "", "(experimental) Specify the GCS predefined acl for objects") + flags.String(gcsCredentialsFile, "", "(experimental) Set the GCS credentials file path") } -func getBackendOptionsFromGCSFlags(flags *pflag.FlagSet) (options GCSBackendOptions, err error) { +func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error { + var err error options.Endpoint, err = flags.GetString(gcsEndpointOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.StorageClass, err = flags.GetString(gcsStorageClassOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.PredefinedACL, err = flags.GetString(gcsPredefinedACL) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.CredentialsFile, err = flags.GetString(gcsCredentialsFile) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } - return + return nil } type gcsStorage struct { @@ -142,11 +124,16 @@ func (s *gcsStorage) FileExists(ctx context.Context, name string) (bool, error) return true, nil } -func newGCSStorage(ctx context.Context, gcs *backup.GCS) (*gcsStorage, error) { - return newGCSStorageWithHTTPClient(ctx, gcs, nil) +func newGCSStorage(ctx context.Context, gcs *backup.GCS, sendCredential bool) (*gcsStorage, error) { + return newGCSStorageWithHTTPClient(ctx, gcs, nil, sendCredential) } -func newGCSStorageWithHTTPClient(ctx context.Context, gcs *backup.GCS, hclient *http.Client) (*gcsStorage, error) { +func newGCSStorageWithHTTPClient( // revive:disable-line:flag-parameter + ctx context.Context, + gcs *backup.GCS, + hclient *http.Client, + sendCredential bool, +) (*gcsStorage, error) { var clientOps []option.ClientOption if gcs.CredentialsBlob == "" { creds, err := google.FindDefaultCredentials(ctx, storage.ScopeReadWrite) diff --git a/pkg/storage/gcs_test.go b/pkg/storage/gcs_test.go index da990cfe7..60a26f616 100644 --- a/pkg/storage/gcs_test.go +++ b/pkg/storage/gcs_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -28,7 +30,7 @@ func (r *testStorageSuite) TestGCS(c *C) { PredefinedAcl: "private", CredentialsBlob: "Fake Credentials", } - stg, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + stg, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) err = stg.Write(ctx, "key", []byte("data")) @@ -66,7 +68,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { server.CreateBucket(bucketName) { - sendCredential = true gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -74,13 +75,12 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "FakeCredentials", } - _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "FakeCredentials") } { - sendCredential = false gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -88,7 +88,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "FakeCredentials", } - _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "") } @@ -106,7 +106,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") c.Assert(err, IsNil) - sendCredential = true gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -114,7 +113,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, `{"type": "service_account"}`) } @@ -132,7 +131,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") c.Assert(err, IsNil) - sendCredential = false gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -140,13 +138,12 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "") } { - sendCredential = true os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") gcs := &backup.GCS{ Bucket: bucketName, @@ -155,7 +152,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, NotNil) } } diff --git a/pkg/storage/local.go b/pkg/storage/local.go index 77ca7f6a4..d2555a978 100644 --- a/pkg/storage/local.go +++ b/pkg/storage/local.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/local_unix.go b/pkg/storage/local_unix.go index be0050e83..aedf7c637 100644 --- a/pkg/storage/local_unix.go +++ b/pkg/storage/local_unix.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + // +build !windows package storage diff --git a/pkg/storage/local_windows.go b/pkg/storage/local_windows.go index a3ab2b784..cb784fad4 100644 --- a/pkg/storage/local_windows.go +++ b/pkg/storage/local_windows.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + // +build windows package storage diff --git a/pkg/storage/noop.go b/pkg/storage/noop.go index 17b1dea55..1ee698342 100644 --- a/pkg/storage/noop.go +++ b/pkg/storage/noop.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import "context" diff --git a/pkg/storage/parse.go b/pkg/storage/parse.go index c470d5458..d75e7663d 100644 --- a/pkg/storage/parse.go +++ b/pkg/storage/parse.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/parse_test.go b/pkg/storage/parse_test.go index d72b8a5b3..3f1bc4d4f 100644 --- a/pkg/storage/parse_test.go +++ b/pkg/storage/parse_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -19,7 +21,7 @@ var _ = Suite(&testStorageSuite{}) func (r *testStorageSuite) TestCreateStorage(c *C) { _, err := ParseBackend("1invalid:", nil) - c.Assert(err, ErrorMatches, "parse 1invalid:: first path segment in URL cannot contain colon") + c.Assert(err, ErrorMatches, "parse (.*)1invalid:(.*): first path segment in URL cannot contain colon") _, err = ParseBackend("net:storage", nil) c.Assert(err, ErrorMatches, "storage net not support yet") diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 5db54556c..bf24b9a2b 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -102,59 +104,51 @@ func (options *S3BackendOptions) apply(s3 *backup.S3) error { } func defineS3Flags(flags *pflag.FlagSet) { - flags.String(s3EndpointOption, "", "Set the S3 endpoint URL, please specify the http or https scheme explicitly") - flags.String(s3RegionOption, "", "Set the S3 region, e.g. us-east-1") - flags.String(s3StorageClassOption, "", "Set the S3 storage class, e.g. STANDARD") - flags.String(s3SSEOption, "", "Set the S3 server-side encryption algorithm, e.g. AES256") - flags.String(s3ACLOption, "", "Set the S3 canned ACLs, e.g. authenticated-read") - flags.String(s3ProviderOption, "", "Set the S3 provider, e.g. aws, alibaba, ceph") - - _ = flags.MarkHidden(s3EndpointOption) - _ = flags.MarkHidden(s3RegionOption) - _ = flags.MarkHidden(s3StorageClassOption) - _ = flags.MarkHidden(s3SSEOption) - _ = flags.MarkHidden(s3ACLOption) - _ = flags.MarkHidden(s3ProviderOption) + // TODO: remove experimental tag if it's stable + flags.String(s3EndpointOption, "", + "(experimental) Set the S3 endpoint URL, please specify the http or https scheme explicitly") + flags.String(s3RegionOption, "", "(experimental) Set the S3 region, e.g. us-east-1") + flags.String(s3StorageClassOption, "", "(experimental) Set the S3 storage class, e.g. STANDARD") + flags.String(s3SSEOption, "", "(experimental) Set the S3 server-side encryption algorithm, e.g. AES256") + flags.String(s3ACLOption, "", "(experimental) Set the S3 canned ACLs, e.g. authenticated-read") + flags.String(s3ProviderOption, "", "(experimental) Set the S3 provider, e.g. aws, alibaba, ceph") } -func getBackendOptionsFromS3Flags(flags *pflag.FlagSet) (options S3BackendOptions, err error) { +func (options *S3BackendOptions) parseFromFlags(flags *pflag.FlagSet) error { + var err error options.Endpoint, err = flags.GetString(s3EndpointOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.Region, err = flags.GetString(s3RegionOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.SSE, err = flags.GetString(s3SSEOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.ACL, err = flags.GetString(s3ACLOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.StorageClass, err = flags.GetString(s3StorageClassOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.ForcePathStyle = true options.Provider, err = flags.GetString(s3ProviderOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } - - return options, err + return nil } // newS3Storage initialize a new s3 storage for metadata -func newS3Storage(backend *backup.S3) (*S3Storage, error) { +func newS3Storage( // revive:disable-line:flag-parameter + backend *backup.S3, + sendCredential bool, +) (*S3Storage, error) { qs := *backend awsConfig := aws.NewConfig(). WithMaxRetries(maxRetries). diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go index 92a5a8737..bd35b6faf 100644 --- a/pkg/storage/s3_test.go +++ b/pkg/storage/s3_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -72,7 +74,7 @@ func (r *testStorageSuite) TestApply(c *C) { options: S3BackendOptions{ Endpoint: "!http:12345", }, - errMsg: "parse !http:12345: first path segment in URL cannot contain colon", + errMsg: "parse (.*)!http:12345(.*): first path segment in URL cannot contain colon", errReturn: true, }, } @@ -236,7 +238,7 @@ func (r *testStorageSuite) TestS3Storage(c *C) { testFn := func(test *testcase, c *C) { c.Log(test.name) ctx := aws.BackgroundContext() - sendCredential = test.sendCredential + sendCredential := test.sendCredential if test.hackCheck { checkS3Bucket = func(svc *s3.S3, bucket string) error { return nil } } @@ -245,7 +247,7 @@ func (r *testStorageSuite) TestS3Storage(c *C) { S3: test.s3, }, } - _, err := Create(ctx, s3) + _, err := Create(ctx, s3, sendCredential) if test.errReturn { c.Assert(err, NotNil) return diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 173638bdd..91143ca54 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -18,7 +20,7 @@ type ExternalStorage interface { } // Create creates ExternalStorage -func Create(ctx context.Context, backend *backup.StorageBackend) (ExternalStorage, error) { +func Create(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) (ExternalStorage, error) { switch backend := backend.Backend.(type) { case *backup.StorageBackend_Local: return newLocalStorage(backend.Local.Path) @@ -26,14 +28,14 @@ func Create(ctx context.Context, backend *backup.StorageBackend) (ExternalStorag if backend.S3 == nil { return nil, errors.New("s3 config not found") } - return newS3Storage(backend.S3) + return newS3Storage(backend.S3, sendCreds) case *backup.StorageBackend_Noop: return newNoopStorage(), nil case *backup.StorageBackend_Gcs: if backend.Gcs == nil { return nil, errors.New("GCS config not found") } - return newGCSStorage(ctx, backend.Gcs) + return newGCSStorage(ctx, backend.Gcs, sendCreds) default: return nil, errors.Errorf("storage %T is not supported yet", backend) } diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index cd5aac6c6..ee465d60b 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import ( @@ -25,7 +27,7 @@ const ( type LogCollector interface { SetUnit(unit string) - CollectSuccessUnit(name string, arg interface{}) + CollectSuccessUnit(name string, unitCount int, arg interface{}) CollectFailureUnit(name string, reason error) @@ -36,25 +38,34 @@ type LogCollector interface { Summary(name string) } -var collector = newLogCollector() +type logFunc func(msg string, fields ...zap.Field) + +var collector = newLogCollector(log.Info) type logCollector struct { - mu sync.Mutex - unit string - unitCount int - successCosts map[string]time.Duration - successData map[string]uint64 - failureReasons map[string]error - fields []zap.Field + mu sync.Mutex + unit string + successUnitCount int + failureUnitCount int + successCosts map[string]time.Duration + successData map[string]uint64 + failureReasons map[string]error + durations map[string]time.Duration + ints map[string]int + + log logFunc } -func newLogCollector() LogCollector { +func newLogCollector(log logFunc) LogCollector { return &logCollector{ - unitCount: 0, - fields: make([]zap.Field, 0), - successCosts: make(map[string]time.Duration), - successData: make(map[string]uint64), - failureReasons: make(map[string]error), + successUnitCount: 0, + failureUnitCount: 0, + successCosts: make(map[string]time.Duration), + successData: make(map[string]uint64), + failureReasons: make(map[string]error), + durations: make(map[string]time.Duration), + ints: make(map[string]int), + log: log, } } @@ -64,7 +75,7 @@ func (tc *logCollector) SetUnit(unit string) { tc.unit = unit } -func (tc *logCollector) CollectSuccessUnit(name string, arg interface{}) { +func (tc *logCollector) CollectSuccessUnit(name string, unitCount int, arg interface{}) { tc.mu.Lock() defer tc.mu.Unlock() @@ -72,7 +83,7 @@ func (tc *logCollector) CollectSuccessUnit(name string, arg interface{}) { case time.Duration: if _, ok := tc.successCosts[name]; !ok { tc.successCosts[name] = v - tc.unitCount++ + tc.successUnitCount += unitCount } else { tc.successCosts[name] += v } @@ -90,26 +101,27 @@ func (tc *logCollector) CollectFailureUnit(name string, reason error) { defer tc.mu.Unlock() if _, ok := tc.failureReasons[name]; !ok { tc.failureReasons[name] = reason - tc.unitCount++ + tc.failureUnitCount++ } } func (tc *logCollector) CollectDuration(name string, t time.Duration) { tc.mu.Lock() defer tc.mu.Unlock() - tc.fields = append(tc.fields, zap.Duration(name, t)) + tc.durations[name] += t } func (tc *logCollector) CollectInt(name string, t int) { tc.mu.Lock() defer tc.mu.Unlock() - tc.fields = append(tc.fields, zap.Int(name, t)) + tc.ints[name] += t } func (tc *logCollector) Summary(name string) { tc.mu.Lock() defer func() { - tc.fields = tc.fields[:0] + tc.durations = make(map[string]time.Duration) + tc.ints = make(map[string]int) tc.successCosts = make(map[string]time.Duration) tc.failureReasons = make(map[string]error) tc.mu.Unlock() @@ -119,27 +131,25 @@ func (tc *logCollector) Summary(name string) { switch tc.unit { case BackupUnit: msg = fmt.Sprintf("total backup ranges: %d, total success: %d, total failed: %d", - tc.unitCount, len(tc.successCosts), len(tc.failureReasons)) - if len(tc.failureReasons) != 0 { - msg += ", failed ranges" - } + tc.failureUnitCount+tc.successUnitCount, tc.successUnitCount, tc.failureUnitCount) case RestoreUnit: - msg = fmt.Sprintf("total restore tables: %d, total success: %d, total failed: %d", - tc.unitCount, len(tc.successCosts), len(tc.failureReasons)) - if len(tc.failureReasons) != 0 { - msg += ", failed tables" - } + msg = fmt.Sprintf("total restore files: %d, total success: %d, total failed: %d", + tc.failureUnitCount+tc.successUnitCount, tc.successUnitCount, tc.failureUnitCount) + } + + logFields := make([]zap.Field, 0, len(tc.durations)+len(tc.ints)) + for key, val := range tc.durations { + logFields = append(logFields, zap.Duration(key, val)) + } + for key, val := range tc.ints { + logFields = append(logFields, zap.Int(key, val)) } - logFields := tc.fields if len(tc.failureReasons) != 0 { - names := make([]string, 0, len(tc.failureReasons)) - for name := range tc.failureReasons { - // logFields = append(logFields, zap.NamedError(name, reason)) - names = append(names, name) + for unitName, reason := range tc.failureReasons { + logFields = append(logFields, zap.String("unitName", unitName), zap.Error(reason)) } - logFields = append(logFields, zap.Strings(msg, names)) - log.Info(name+" summary", logFields...) + log.Info(name+" Failed summary : "+msg, logFields...) return } totalCost := time.Duration(0) @@ -162,7 +172,7 @@ func (tc *logCollector) Summary(name string) { msg += fmt.Sprintf(", %s: %d", name, data) } - log.Info(name+" summary: "+msg, logFields...) + tc.log(name+" Success summary: "+msg, logFields...) } // SetLogCollector allow pass LogCollector outside diff --git a/pkg/summary/collector_test.go b/pkg/summary/collector_test.go new file mode 100644 index 000000000..7dff32dd1 --- /dev/null +++ b/pkg/summary/collector_test.go @@ -0,0 +1,48 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package summary + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "go.uber.org/zap" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testCollectorSuite{}) + +type testCollectorSuite struct { +} + +func (suit *testCollectorSuite) TestSumDurationInt(c *C) { + fields := []zap.Field{} + logger := func(msg string, fs ...zap.Field) { + fields = append(fields, fs...) + } + col := newLogCollector(logger) + col.CollectDuration("a", time.Second) + col.CollectDuration("b", time.Second) + col.CollectDuration("b", time.Second) + col.CollectInt("c", 2) + col.CollectInt("c", 2) + col.Summary("foo") + + c.Assert(len(fields), Equals, 3) + assertContains := func(field zap.Field) { + for _, f := range fields { + if f.Key == field.Key { + c.Assert(f, DeepEquals, field) + return + } + } + c.Error(fields, "do not contain", field) + } + assertContains(zap.Duration("a", time.Second)) + assertContains(zap.Duration("b", 2*time.Second)) + assertContains(zap.Int("c", 4)) +} diff --git a/pkg/summary/summary.go b/pkg/summary/summary.go index 88d3fb143..3ffdedf8a 100644 --- a/pkg/summary/summary.go +++ b/pkg/summary/summary.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import "time" @@ -8,8 +10,8 @@ func SetUnit(unit string) { } // CollectSuccessUnit collects success time costs -func CollectSuccessUnit(name string, arg interface{}) { - collector.CollectSuccessUnit(name, arg) +func CollectSuccessUnit(name string, unitCount int, arg interface{}) { + collector.CollectSuccessUnit(name, unitCount, arg) } // CollectFailureUnit collects fail reason diff --git a/pkg/task/backup.go b/pkg/task/backup.go new file mode 100644 index 000000000..5944a22a0 --- /dev/null +++ b/pkg/task/backup.go @@ -0,0 +1,243 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + "strconv" + "time" + + "github.com/pingcap/errors" + kvproto "github.com/pingcap/kvproto/pkg/backup" + "github.com/pingcap/log" + "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/spf13/pflag" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagBackupTimeago = "timeago" + flagBackupTS = "backupts" + flagLastBackupTS = "lastbackupts" + + defaultBackupConcurrency = 4 +) + +// BackupConfig is the configuration specific for backup tasks. +type BackupConfig struct { + Config + + TimeAgo time.Duration `json:"time-ago" toml:"time-ago"` + BackupTS uint64 `json:"backup-ts" toml:"backup-ts"` + LastBackupTS uint64 `json:"last-backup-ts" toml:"last-backup-ts"` +} + +// DefineBackupFlags defines common flags for the backup command. +func DefineBackupFlags(flags *pflag.FlagSet) { + flags.Duration( + flagBackupTimeago, 0, + "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") + + // TODO: remove experimental tag if it's stable + flags.Uint64(flagLastBackupTS, 0, "(experimental) the last time backup ts,"+ + " use for incremental backup, support TSO only") + flags.String(flagBackupTS, "", "the backup ts support TSO or datetime,"+ + " e.g. '400036290571534337', '2018-05-11 01:42:23'") +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { + timeAgo, err := flags.GetDuration(flagBackupTimeago) + if err != nil { + return errors.Trace(err) + } + if timeAgo < 0 { + return errors.New("negative timeago is not allowed") + } + cfg.TimeAgo = timeAgo + cfg.LastBackupTS, err = flags.GetUint64(flagLastBackupTS) + if err != nil { + return errors.Trace(err) + } + backupTS, err := flags.GetString(flagBackupTS) + if err != nil { + return errors.Trace(err) + } + cfg.BackupTS, err = parseTSString(backupTS) + if err != nil { + return errors.Trace(err) + } + + if err = cfg.Config.ParseFromFlags(flags); err != nil { + return errors.Trace(err) + } + if cfg.Config.Concurrency == 0 { + cfg.Config.Concurrency = defaultBackupConcurrency + } + return nil +} + +// RunBackup starts a backup task inside the current goroutine. +func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) + if err != nil { + return err + } + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := backup.NewBackupClient(ctx, mgr) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + + backupTS, err := client.GetTS(ctx, cfg.TimeAgo, cfg.BackupTS) + if err != nil { + return err + } + + ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( + mgr.GetDomain(), mgr.GetTiKV(), tableFilter, backupTS) + if err != nil { + return err + } + + ddlJobs := make([]*model.Job, 0) + if cfg.LastBackupTS > 0 { + if backupTS < cfg.LastBackupTS { + log.Error("LastBackupTS is larger than current TS") + return errors.New("LastBackupTS is larger than current TS") + } + err = backup.CheckGCSafepoint(ctx, mgr.GetPDClient(), cfg.LastBackupTS) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + ddlJobs, err = backup.GetBackupDDLJobs(mgr.GetDomain(), cfg.LastBackupTS, backupTS) + if err != nil { + return err + } + } + + // The number of regions need to backup + approximateRegions := 0 + for _, r := range ranges { + var regionCount int + regionCount, err = mgr.GetRegionCount(ctx, r.StartKey, r.EndKey) + if err != nil { + return err + } + approximateRegions += regionCount + } + + summary.CollectInt("backup total regions", approximateRegions) + + // Backup + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + + req := kvproto.BackupRequest{ + StartVersion: cfg.LastBackupTS, + EndVersion: backupTS, + RateLimit: cfg.RateLimit, + Concurrency: cfg.Concurrency, + } + err = client.BackupRanges( + ctx, ranges, req, updateCh) + if err != nil { + return err + } + // Backup has finished + close(updateCh) + + // Checksum + backupSchemasConcurrency := backup.DefaultSchemaConcurrency + if backupSchemas.Len() < backupSchemasConcurrency { + backupSchemasConcurrency = backupSchemas.Len() + } + updateCh = utils.StartProgress( + ctx, "Checksum", int64(backupSchemas.Len()), !cfg.LogProgress) + backupSchemas.SetSkipChecksum(!cfg.Checksum) + backupSchemas.Start( + ctx, mgr.GetTiKV(), backupTS, uint(backupSchemasConcurrency), updateCh) + + err = client.CompleteMeta(backupSchemas) + if err != nil { + return err + } + + if cfg.LastBackupTS == 0 { + var valid bool + valid, err = client.FastChecksum() + if err != nil { + return err + } + if !valid { + log.Error("backup FastChecksum mismatch!") + return errors.Errorf("mismatched checksum") + } + + } else { + // Since we don't support checksum for incremental data, fast checksum should be skipped. + log.Info("Skip fast checksum in incremental backup") + } + // Checksum has finished + close(updateCh) + + err = client.SaveBackupMeta(ctx, ddlJobs) + if err != nil { + return err + } + return nil +} + +// parseTSString port from tidb setSnapshotTS +func parseTSString(ts string) (uint64, error) { + if len(ts) == 0 { + return 0, nil + } + if tso, err := strconv.ParseUint(ts, 10, 64); err == nil { + return tso, nil + } + + loc := time.Local + sc := &stmtctx.StatementContext{ + TimeZone: loc, + } + t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp) + if err != nil { + return 0, errors.Trace(err) + } + t1, err := t.GoTime(loc) + if err != nil { + return 0, errors.Trace(err) + } + return variable.GoTimeToTS(t1), nil +} diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go new file mode 100644 index 000000000..d9deaccba --- /dev/null +++ b/pkg/task/backup_raw.go @@ -0,0 +1,145 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "bytes" + "context" + + "github.com/pingcap/errors" + kvproto "github.com/pingcap/kvproto/pkg/backup" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/rtree" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagKeyFormat = "format" + flagTiKVColumnFamily = "cf" + flagStartKey = "start" + flagEndKey = "end" +) + +// RawKvConfig is the common config for rawkv backup and restore. +type RawKvConfig struct { + Config + + StartKey []byte `json:"start-key" toml:"start-key"` + EndKey []byte `json:"end-key" toml:"end-key"` + CF string `json:"cf" toml:"cf"` +} + +// DefineRawBackupFlags defines common flags for the backup command. +func DefineRawBackupFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "backup specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "backup raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "backup raw kv end key, key is exclusive") +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *RawKvConfig) ParseFromFlags(flags *pflag.FlagSet) error { + format, err := flags.GetString(flagKeyFormat) + if err != nil { + return err + } + start, err := flags.GetString(flagStartKey) + if err != nil { + return err + } + cfg.StartKey, err = utils.ParseKey(format, start) + if err != nil { + return err + } + end, err := flags.GetString(flagEndKey) + if err != nil { + return err + } + cfg.EndKey, err = utils.ParseKey(format, end) + if err != nil { + return err + } + + if bytes.Compare(cfg.StartKey, cfg.EndKey) >= 0 { + return errors.New("endKey must be greater than startKey") + } + + cfg.CF, err = flags.GetString(flagTiKVColumnFamily) + if err != nil { + return err + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { + return errors.Trace(err) + } + return nil +} + +// RunBackupRaw starts a backup task inside the current goroutine. +func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := backup.NewBackupClient(ctx, mgr) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + + backupRange := rtree.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey} + + // The number of regions need to backup + approximateRegions, err := mgr.GetRegionCount(ctx, backupRange.StartKey, backupRange.EndKey) + if err != nil { + return err + } + + summary.CollectInt("backup total regions", approximateRegions) + + // Backup + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + + req := kvproto.BackupRequest{ + StartVersion: 0, + EndVersion: 0, + RateLimit: cfg.RateLimit, + Concurrency: cfg.Concurrency, + IsRawKv: true, + Cf: cfg.CF, + } + + err = client.BackupRange(ctx, backupRange.StartKey, backupRange.EndKey, req, updateCh) + if err != nil { + return err + } + // Backup has finished + close(updateCh) + + // Checksum + err = client.SaveBackupMeta(ctx, nil) + if err != nil { + return err + } + return nil +} diff --git a/pkg/task/backup_test.go b/pkg/task/backup_test.go new file mode 100644 index 000000000..6bd60515b --- /dev/null +++ b/pkg/task/backup_test.go @@ -0,0 +1,36 @@ +package task + +import ( + "testing" + "time" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testBackupSuite{}) + +func TestT(t *testing.T) { + TestingT(t) +} + +type testBackupSuite struct{} + +func (s *testBackupSuite) TestParseTSString(c *C) { + var ( + ts uint64 + err error + ) + + ts, err = parseTSString("") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 0) + + ts, err = parseTSString("400036290571534337") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 400036290571534337) + + _, offset := time.Now().Local().Zone() + ts, err = parseTSString("2018-05-11 01:42:23") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 400032515489792000-(offset*1000)<<18) +} diff --git a/pkg/task/common.go b/pkg/task/common.go new file mode 100644 index 000000000..61186abe1 --- /dev/null +++ b/pkg/task/common.go @@ -0,0 +1,288 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + "crypto/tls" + "fmt" + "regexp" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/backup" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/store/tikv" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "go.etcd.io/etcd/pkg/transport" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/utils" +) + +const ( + // flagSendCreds specify whether to send credentials to tikv + flagSendCreds = "send-credentials-to-tikv" + // flagStorage is the name of storage flag. + flagStorage = "storage" + // flagPD is the name of PD url flag. + flagPD = "pd" + // flagCA is the name of TLS CA flag. + flagCA = "ca" + // flagCert is the name of TLS cert flag. + flagCert = "cert" + // flagKey is the name of TLS key flag. + flagKey = "key" + + flagDatabase = "db" + flagTable = "table" + + flagRateLimit = "ratelimit" + flagRateLimitUnit = "ratelimit-unit" + flagConcurrency = "concurrency" + flagChecksum = "checksum" +) + +// TLSConfig is the common configuration for TLS connection. +type TLSConfig struct { + CA string `json:"ca" toml:"ca"` + Cert string `json:"cert" toml:"cert"` + Key string `json:"key" toml:"key"` +} + +// IsEnabled checks if TLS open or not +func (tls *TLSConfig) IsEnabled() bool { + return tls.CA != "" +} + +// ToTLSConfig generate tls.Config +func (tls *TLSConfig) ToTLSConfig() (*tls.Config, error) { + tlsInfo := transport.TLSInfo{ + CertFile: tls.Cert, + KeyFile: tls.Key, + TrustedCAFile: tls.CA, + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, errors.Trace(err) + } + return tlsConfig, nil +} + +// Config is the common configuration for all BRIE tasks. +type Config struct { + storage.BackendOptions + + Storage string `json:"storage" toml:"storage"` + PD []string `json:"pd" toml:"pd"` + TLS TLSConfig `json:"tls" toml:"tls"` + RateLimit uint64 `json:"rate-limit" toml:"rate-limit"` + Concurrency uint32 `json:"concurrency" toml:"concurrency"` + Checksum bool `json:"checksum" toml:"checksum"` + SendCreds bool `json:"send-credentials-to-tikv" toml:"send-credentials-to-tikv"` + // LogProgress is true means the progress bar is printed to the log instead of stdout. + LogProgress bool `json:"log-progress" toml:"log-progress"` + + CaseSensitive bool `json:"case-sensitive" toml:"case-sensitive"` + Filter filter.Rules `json:"black-white-list" toml:"black-white-list"` +} + +// DefineCommonFlags defines the flags common to all BRIE commands. +func DefineCommonFlags(flags *pflag.FlagSet) { + flags.BoolP(flagSendCreds, "c", true, "Whether send credentials to tikv") + flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "s3:///path/to/save"`) + flags.StringSliceP(flagPD, "u", []string{"127.0.0.1:2379"}, "PD address") + flags.String(flagCA, "", "CA certificate path for TLS connection") + flags.String(flagCert, "", "Certificate path for TLS connection") + flags.String(flagKey, "", "Private key path for TLS connection") + + flags.Uint64(flagRateLimit, 0, "The rate limit of the task, MB/s per node") + flags.Bool(flagChecksum, true, "Run checksum at end of task") + + // Default concurrency is different for backup and restore. + // Leave it 0 and let them adjust the value. + flags.Uint32(flagConcurrency, 0, "The size of thread pool on each node that executes the task") + // It may confuse users , so just hide it. + _ = flags.MarkHidden(flagConcurrency) + + flags.Uint64(flagRateLimitUnit, utils.MB, "The unit of rate limit") + _ = flags.MarkHidden(flagRateLimitUnit) + + storage.DefineFlags(flags) +} + +// DefineDatabaseFlags defines the required --db flag. +func DefineDatabaseFlags(command *cobra.Command) { + command.Flags().String(flagDatabase, "", "database name") + _ = command.MarkFlagRequired(flagDatabase) +} + +// DefineTableFlags defines the required --db and --table flags. +func DefineTableFlags(command *cobra.Command) { + DefineDatabaseFlags(command) + command.Flags().StringP(flagTable, "t", "", "table name") + _ = command.MarkFlagRequired(flagTable) +} + +// ParseFromFlags parses the TLS config from the flag set. +func (tls *TLSConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + tls.CA, err = flags.GetString(flagCA) + if err != nil { + return errors.Trace(err) + } + tls.Cert, err = flags.GetString(flagCert) + if err != nil { + return errors.Trace(err) + } + tls.Key, err = flags.GetString(flagKey) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// ParseFromFlags parses the config from the flag set. +func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Storage, err = flags.GetString(flagStorage) + if err != nil { + return errors.Trace(err) + } + cfg.SendCreds, err = flags.GetBool(flagSendCreds) + if err != nil { + return errors.Trace(err) + } + cfg.PD, err = flags.GetStringSlice(flagPD) + if err != nil { + return errors.Trace(err) + } + if len(cfg.PD) == 0 { + return errors.New("must provide at least one PD server address") + } + cfg.Concurrency, err = flags.GetUint32(flagConcurrency) + if err != nil { + return errors.Trace(err) + } + cfg.Checksum, err = flags.GetBool(flagChecksum) + if err != nil { + return errors.Trace(err) + } + + var rateLimit, rateLimitUnit uint64 + rateLimit, err = flags.GetUint64(flagRateLimit) + if err != nil { + return errors.Trace(err) + } + rateLimitUnit, err = flags.GetUint64(flagRateLimitUnit) + if err != nil { + return errors.Trace(err) + } + cfg.RateLimit = rateLimit * rateLimitUnit + + if dbFlag := flags.Lookup(flagDatabase); dbFlag != nil { + db := escapeFilterName(dbFlag.Value.String()) + if len(db) == 0 { + return errors.New("empty database name is not allowed") + } + if tblFlag := flags.Lookup(flagTable); tblFlag != nil { + tbl := escapeFilterName(tblFlag.Value.String()) + if len(tbl) == 0 { + return errors.New("empty table name is not allowed") + } + cfg.Filter.DoTables = []*filter.Table{{Schema: db, Name: tbl}} + } else { + cfg.Filter.DoDBs = []string{db} + } + } + + if err := cfg.BackendOptions.ParseFromFlags(flags); err != nil { + return err + } + return cfg.TLS.ParseFromFlags(flags) +} + +// newMgr creates a new mgr at the given PD address. +func newMgr( + ctx context.Context, + g glue.Glue, + pds []string, + tlsConfig TLSConfig, + storeBehavior conn.StoreBehavior, +) (*conn.Mgr, error) { + var ( + tlsConf *tls.Config + err error + ) + pdAddress := strings.Join(pds, ",") + if len(pdAddress) == 0 { + return nil, errors.New("pd address can not be empty") + } + + securityOption := pd.SecurityOption{} + if tlsConfig.IsEnabled() { + securityOption.CAPath = tlsConfig.CA + securityOption.CertPath = tlsConfig.Cert + securityOption.KeyPath = tlsConfig.Key + tlsConf, err = tlsConfig.ToTLSConfig() + if err != nil { + return nil, err + } + } + + // Disable GC because TiDB enables GC already. + store, err := g.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress), securityOption) + if err != nil { + return nil, err + } + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption, storeBehavior) +} + +// GetStorage gets the storage backend from the config. +func GetStorage( + ctx context.Context, + cfg *Config, +) (*backup.StorageBackend, storage.ExternalStorage, error) { + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return nil, nil, err + } + s, err := storage.Create(ctx, u, cfg.SendCreds) + if err != nil { + return nil, nil, errors.Annotate(err, "create storage failed") + } + return u, s, nil +} + +// ReadBackupMeta reads the backupmeta file from the storage. +func ReadBackupMeta( + ctx context.Context, + fileName string, + cfg *Config, +) (*backup.StorageBackend, storage.ExternalStorage, *backup.BackupMeta, error) { + u, s, err := GetStorage(ctx, cfg) + if err != nil { + return nil, nil, nil, err + } + metaData, err := s.Read(ctx, fileName) + if err != nil { + return nil, nil, nil, errors.Annotate(err, "load backupmeta failed") + } + backupMeta := &backup.BackupMeta{} + if err = proto.Unmarshal(metaData, backupMeta); err != nil { + return nil, nil, nil, errors.Annotate(err, "parse backupmeta failed") + } + return u, s, backupMeta, nil +} + +func escapeFilterName(name string) string { + if !strings.HasPrefix(name, "~") { + return name + } + return "~^" + regexp.QuoteMeta(name) + "$" +} diff --git a/pkg/task/restore.go b/pkg/task/restore.go new file mode 100644 index 000000000..7d5dd6846 --- /dev/null +++ b/pkg/task/restore.go @@ -0,0 +1,418 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/backup" + "github.com/pingcap/log" + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/spf13/pflag" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/rtree" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagOnline = "online" +) + +var schedulers = map[string]struct{}{ + "balance-leader-scheduler": {}, + "balance-hot-region-scheduler": {}, + "balance-region-scheduler": {}, + + "shuffle-leader-scheduler": {}, + "shuffle-region-scheduler": {}, + "shuffle-hot-region-scheduler": {}, +} + +const ( + defaultRestoreConcurrency = 128 + maxRestoreBatchSizeLimit = 256 +) + +// RestoreConfig is the configuration specific for restore tasks. +type RestoreConfig struct { + Config + + Online bool `json:"online" toml:"online"` +} + +// DefineRestoreFlags defines common flags for the restore command. +func DefineRestoreFlags(flags *pflag.FlagSet) { + // TODO remove experimental tag if it's stable + flags.Bool("online", false, "(experimental) Whether online when restore") +} + +// ParseFromFlags parses the restore-related flags from the flag set. +func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + err = cfg.Config.ParseFromFlags(flags) + if err != nil { + return errors.Trace(err) + } + if cfg.Config.Concurrency == 0 { + cfg.Config.Concurrency = defaultRestoreConcurrency + } + return nil +} + +// RunRestore starts a restore task inside the current goroutine. +func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) + if err != nil { + return err + } + defer client.Close() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + err = client.LoadRestoreStores(ctx) + if err != nil { + return err + } + + u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + if client.IsRawKvMode() { + return errors.New("cannot do transactional restore from raw kv data") + } + + files, tables, err := filterRestoreFiles(client, cfg) + if err != nil { + return err + } + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + + var newTS uint64 + if client.IsIncremental() { + newTS, err = client.GetTS(ctx) + if err != nil { + return err + } + } + ddlJobs := restore.FilterDDLJobs(client.GetDDLJobs(), tables) + if err != nil { + return err + } + err = client.ExecDDLs(ddlJobs) + if err != nil { + return errors.Trace(err) + } + rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) + if err != nil { + return err + } + placementRules, err := client.GetPlacementRules(cfg.PD) + if err != nil { + return err + } + err = client.RemoveTiFlashReplica(tables, placementRules) + if err != nil { + return err + } + + defer func() { + _ = client.RecoverTiFlashReplica(tables) + }() + + ranges, err := restore.ValidateFileRanges(files, rewriteRules) + if err != nil { + return err + } + summary.CollectInt("restore ranges", len(ranges)) + + if err = splitPrepareWork(ctx, client, newTables); err != nil { + return err + } + + ranges = restore.AttachFilesToRanges(files, ranges) + + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, + cmdName, + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + clusterCfg, err := restorePreWork(ctx, client, mgr) + if err != nil { + return err + } + + // Do not reset timestamp if we are doing incremental restore, because + // we are not allowed to decrease timestamp. + if !client.IsIncremental() { + if err = client.ResetTS(cfg.PD); err != nil { + log.Error("reset pd TS failed", zap.Error(err)) + return err + } + } + + // Restore sst files in batch. + batchSize := int(cfg.Concurrency) + if batchSize > maxRestoreBatchSizeLimit { + batchSize = maxRestoreBatchSizeLimit // 256 + } + for { + if len(ranges) == 0 { + break + } + if batchSize > len(ranges) { + batchSize = len(ranges) + } + var rangeBatch []rtree.Range + ranges, rangeBatch = ranges[batchSize:], ranges[0:batchSize:batchSize] + + // Split regions by the given rangeBatch. + err = restore.SplitRanges(ctx, client, rangeBatch, rewriteRules, updateCh) + if err != nil { + log.Error("split regions failed", zap.Error(err)) + return err + } + + // Collect related files in the given rangeBatch. + fileBatch := make([]*backup.File, 0, 2*len(rangeBatch)) + for _, rg := range rangeBatch { + fileBatch = append(fileBatch, rg.Files...) + } + + // After split, we can restore backup files. + err = client.RestoreFiles(fileBatch, rewriteRules, updateCh) + if err != nil { + break + } + } + + // Always run the post-work even on error, so we don't stuck in the import + // mode or emptied schedulers + err = restorePostWork(ctx, client, mgr, clusterCfg) + if err != nil { + return err + } + + if err = splitPostWork(ctx, client, newTables); err != nil { + return err + } + + // Restore has finished. + close(updateCh) + + // Checksum + updateCh = utils.StartProgress( + ctx, "Checksum", int64(len(newTables)), !cfg.LogProgress) + err = client.ValidateChecksum( + ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) + if err != nil { + return err + } + close(updateCh) + + return nil +} + +func filterRestoreFiles( + client *restore.Client, + cfg *RestoreConfig, +) (files []*backup.File, tables []*utils.Table, err error) { + tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) + if err != nil { + return nil, nil, err + } + + for _, db := range client.GetDatabases() { + createdDatabase := false + for _, table := range db.Tables { + if !tableFilter.Match(&filter.Table{Schema: db.Info.Name.O, Name: table.Info.Name.O}) { + continue + } + + if !createdDatabase { + if err = client.CreateDatabase(db.Info); err != nil { + return nil, nil, err + } + createdDatabase = true + } + + files = append(files, table.Files...) + tables = append(tables, table) + } + } + + return +} + +// restorePreWork executes some prepare work before restore +func restorePreWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) ([]string, error) { + if client.IsOnline() { + return nil, nil + } + + if err := client.SwitchToImportMode(ctx); err != nil { + return nil, err + } + + existSchedulers, err := mgr.ListSchedulers(ctx) + if err != nil { + return nil, errors.Trace(err) + } + needRemoveSchedulers := make([]string, 0, len(existSchedulers)) + for _, s := range existSchedulers { + if _, ok := schedulers[s]; ok { + needRemoveSchedulers = append(needRemoveSchedulers, s) + } + } + return removePDLeaderScheduler(ctx, mgr, needRemoveSchedulers) +} + +func removePDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, existSchedulers []string) ([]string, error) { + removedSchedulers := make([]string, 0, len(existSchedulers)) + for _, scheduler := range existSchedulers { + err := mgr.RemoveScheduler(ctx, scheduler) + if err != nil { + return nil, err + } + removedSchedulers = append(removedSchedulers, scheduler) + } + return removedSchedulers, nil +} + +// restorePostWork executes some post work after restore +func restorePostWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, removedSchedulers []string) error { + if client.IsOnline() { + return nil + } + if err := client.SwitchToNormalMode(ctx); err != nil { + return err + } + return addPDLeaderScheduler(ctx, mgr, removedSchedulers) +} + +func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers []string) error { + for _, scheduler := range removedSchedulers { + err := mgr.AddScheduler(ctx, scheduler) + if err != nil { + return err + } + } + return nil +} + +func splitPrepareWork(ctx context.Context, client *restore.Client, tables []*model.TableInfo) error { + err := client.SetupPlacementRules(ctx, tables) + if err != nil { + log.Error("setup placement rules failed", zap.Error(err)) + return errors.Trace(err) + } + + err = client.WaitPlacementSchedule(ctx, tables) + if err != nil { + log.Error("wait placement schedule failed", zap.Error(err)) + return errors.Trace(err) + } + return nil +} + +func splitPostWork(ctx context.Context, client *restore.Client, tables []*model.TableInfo) error { + err := client.ResetPlacementRules(ctx, tables) + if err != nil { + return errors.Trace(err) + } + + err = client.ResetRestoreLabels(ctx) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// RunRestoreTiflashReplica restores the replica of tiflash saved in the last restore. +func RunRestoreTiflashReplica(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + // Load saved backupmeta + _, _, backupMeta, err := ReadBackupMeta(ctx, utils.SavedMetaFile, &cfg.Config) + if err != nil { + return err + } + dbs, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return err + } + se, err := restore.NewDB(g, mgr.GetTiKV()) + if err != nil { + return err + } + + tables := make([]*utils.Table, 0) + for _, db := range dbs { + tables = append(tables, db.Tables...) + } + updateCh := utils.StartProgress( + ctx, "RecoverTiflashReplica", int64(len(tables)), !cfg.LogProgress) + for _, t := range tables { + log.Info("get table", zap.Stringer("name", t.Info.Name), + zap.Int("replica", t.TiFlashReplicas)) + if t.TiFlashReplicas > 0 { + err := se.AlterTiflashReplica(ctx, t, t.TiFlashReplicas) + if err != nil { + return err + } + updateCh <- struct{}{} + } + } + summary.CollectInt("recover tables", len(tables)) + + return nil +} diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go new file mode 100644 index 000000000..308a44b4e --- /dev/null +++ b/pkg/task/restore_raw.go @@ -0,0 +1,130 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +// RestoreRawConfig is the configuration specific for raw kv restore tasks. +type RestoreRawConfig struct { + RawKvConfig + + Online bool `json:"online" toml:"online"` +} + +// DefineRawRestoreFlags defines common flags for the backup command. +func DefineRawRestoreFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "restore specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "restore raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "restore raw kv end key, key is exclusive") + + command.Flags().Bool(flagOnline, false, "Whether online when restore") + // TODO remove hidden flag if it's stable + _ = command.Flags().MarkHidden(flagOnline) +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *RestoreRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + return cfg.RawKvConfig.ParseFromFlags(flags) +} + +// RunRestoreRaw starts a raw kv restore task inside the current goroutine. +func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.ErrorOnTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) + if err != nil { + return err + } + defer client.Close() + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + + u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + if !client.IsRawKvMode() { + return errors.New("cannot do raw restore from transactional data") + } + + files, err := client.GetFilesInRawRange(cfg.StartKey, cfg.EndKey, cfg.CF) + if err != nil { + return errors.Trace(err) + } + + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + summary.CollectInt("restore files", len(files)) + + ranges, err := restore.ValidateFileRanges(files, nil) + if err != nil { + return errors.Trace(err) + } + + // Redirect to log if there is no log file to avoid unreadable output. + // TODO: How to show progress? + updateCh := utils.StartProgress( + ctx, + "Raw Restore", + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + err = restore.SplitRanges(ctx, client, ranges, nil, updateCh) + if err != nil { + return errors.Trace(err) + } + + removedSchedulers, err := restorePreWork(ctx, client, mgr) + if err != nil { + return errors.Trace(err) + } + + err = client.RestoreRaw(cfg.StartKey, cfg.EndKey, files, updateCh) + if err != nil { + return errors.Trace(err) + } + + err = restorePostWork(ctx, client, mgr, removedSchedulers) + if err != nil { + return errors.Trace(err) + } + // Restore has finished. + close(updateCh) + + return nil +} diff --git a/pkg/utils/key.go b/pkg/utils/key.go new file mode 100644 index 000000000..8caeb2833 --- /dev/null +++ b/pkg/utils/key.go @@ -0,0 +1,90 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strings" + + "github.com/pingcap/errors" +) + +// ParseKey parse key by given format +func ParseKey(format, key string) ([]byte, error) { + switch format { + case "raw": + return []byte(key), nil + case "escaped": + return unescapedKey(key) + case "hex": + key, err := hex.DecodeString(key) + if err != nil { + return nil, errors.WithStack(err) + } + return key, nil + } + return nil, errors.New("unknown format") +} + +// Ref PD: https://github.com/pingcap/pd/blob/master/tools/pd-ctl/pdctl/command/region_command.go#L334 +func unescapedKey(text string) ([]byte, error) { + var buf []byte + r := bytes.NewBuffer([]byte(text)) + for { + c, err := r.ReadByte() + if err != nil { + if err != io.EOF { + return nil, errors.WithStack(err) + } + break + } + if c != '\\' { + buf = append(buf, c) + continue + } + n := r.Next(1) + if len(n) == 0 { + return nil, io.EOF + } + // See: https://golang.org/ref/spec#Rune_literals + if idx := strings.IndexByte(`abfnrtv\'"`, n[0]); idx != -1 { + buf = append(buf, []byte("\a\b\f\n\r\t\v\\'\"")[idx]) + continue + } + + switch n[0] { + case 'x': + fmt.Sscanf(string(r.Next(2)), "%02x", &c) + buf = append(buf, c) + default: + n = append(n, r.Next(2)...) + _, err := fmt.Sscanf(string(n), "%03o", &c) + if err != nil { + return nil, errors.WithStack(err) + } + buf = append(buf, c) + } + } + return buf, nil +} + +// CompareEndKey compared two keys that BOTH represent the EXCLUSIVE ending of some range. An empty end key is the very +// end, so an empty key is greater than any other keys. +// Please note that this function is not applicable if any one argument is not an EXCLUSIVE ending of a range. +func CompareEndKey(a, b []byte) int { + if len(a) == 0 { + if len(b) == 0 { + return 0 + } + return 1 + } + + if len(b) == 0 { + return -1 + } + + return bytes.Compare(a, b) +} diff --git a/pkg/utils/key_test.go b/pkg/utils/key_test.go new file mode 100644 index 000000000..3e20bae24 --- /dev/null +++ b/pkg/utils/key_test.go @@ -0,0 +1,54 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "encoding/hex" + + . "github.com/pingcap/check" +) + +type testKeySuite struct{} + +var _ = Suite(&testKeySuite{}) + +func (r *testKeySuite) TestParseKey(c *C) { + rawKey := "1234" + parsedKey, err := ParseKey("raw", rawKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte(rawKey)) + + escapedKey := "\\a\\x1" + parsedKey, err = ParseKey("escaped", escapedKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte("\a\x01")) + + hexKey := hex.EncodeToString([]byte("1234")) + parsedKey, err = ParseKey("hex", hexKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte("1234")) + + _, err = ParseKey("notSupport", rawKey) + c.Assert(err, ErrorMatches, "*unknown format*") + +} + +func (r *testKeySuite) TestCompareEndKey(c *C) { + res := CompareEndKey([]byte("1"), []byte("2")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte("1"), []byte("1")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte("2"), []byte("1")) + c.Assert(res, Greater, 0) + + res = CompareEndKey([]byte("1"), []byte("")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte(""), []byte("")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte(""), []byte("1")) + c.Assert(res, Greater, 0) +} diff --git a/pkg/utils/mock_cluster_test.go b/pkg/utils/mock_cluster_test.go deleted file mode 100644 index 42cacae9c..000000000 --- a/pkg/utils/mock_cluster_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package utils - -import ( - . "github.com/pingcap/check" - "github.com/pingcap/tidb/util/testleak" -) - -var _ = Suite(&testMockClusterSuite{}) - -type testMockClusterSuite struct { - mock *MockCluster -} - -func (s *testMockClusterSuite) SetUpSuite(c *C) { - var err error - s.mock, err = NewMockCluster() - c.Assert(err, IsNil) -} - -func (s *testMockClusterSuite) TearDownSuite(c *C) { - testleak.AfterTest(c)() -} - -func (s *testMockClusterSuite) TestSmoke(c *C) { - c.Assert(s.mock.Start(), IsNil) - s.mock.Stop() -} diff --git a/pkg/utils/pd.go b/pkg/utils/pd.go new file mode 100644 index 000000000..7a65a2ac4 --- /dev/null +++ b/pkg/utils/pd.go @@ -0,0 +1,107 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "bytes" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/pd/v4/pkg/codec" + "github.com/pingcap/pd/v4/server/schedule/placement" + "github.com/pingcap/tidb/tablecodec" +) + +const ( + resetTSURL = "/pd/api/v1/admin/reset-ts" + placementRuleURL = "/pd/api/v1/config/rules" +) + +// ResetTS resets the timestamp of PD to a bigger value +func ResetTS(pdAddr string, ts uint64, tlsConf *tls.Config) error { + req, err := json.Marshal(struct { + TSO string `json:"tso,omitempty"` + }{TSO: fmt.Sprintf("%d", ts)}) + if err != nil { + return err + } + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + resetTSURL + resp, err := cli.Post(reqURL, "application/json", strings.NewReader(string(req))) + if err != nil { + return errors.Trace(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusForbidden { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(resp.Body) + return errors.Errorf("pd resets TS failed: req=%v, resp=%v, err=%v", string(req), buf.String(), err) + } + return nil +} + +// GetPlacementRules return the current placement rules +func GetPlacementRules(pdAddr string, tlsConf *tls.Config) ([]placement.Rule, error) { + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + placementRuleURL + resp, err := cli.Get(reqURL) + if err != nil { + return nil, errors.Trace(err) + } + defer resp.Body.Close() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(resp.Body) + if err != nil { + return nil, errors.Trace(err) + } + if resp.StatusCode == http.StatusPreconditionFailed { + return []placement.Rule{}, nil + } + if resp.StatusCode != http.StatusOK { + return nil, errors.Errorf("get placement rules failed: resp=%v, err=%v, code=%d", buf.String(), err, resp.StatusCode) + } + var rules []placement.Rule + err = json.Unmarshal(buf.Bytes(), &rules) + if err != nil { + return nil, errors.Trace(err) + } + return rules, nil +} + +// SearchPlacementRule returns the placement rule matched to the table or nil +func SearchPlacementRule(tableID int64, placementRules []placement.Rule, role placement.PeerRoleType) *placement.Rule { + for _, rule := range placementRules { + key, err := hex.DecodeString(rule.StartKeyHex) + if err != nil { + continue + } + _, decoded, err := codec.DecodeBytes(key) + if err != nil { + continue + } + if rule.Role == role && tableID == tablecodec.DecodeTableID(decoded) { + return &rule + } + } + return nil +} diff --git a/pkg/utils/progress.go b/pkg/utils/progress.go index 8c66093f0..da6b20364 100644 --- a/pkg/utils/progress.go +++ b/pkg/utils/progress.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( @@ -55,7 +57,7 @@ func (pp *ProgressPrinter) goPrintProgress( bar.Set(pb.Color, true) bar.SetWriter(&wrappedWriter{name: pp.name}) } else { - tmpl := `{{string . "barName" | red}} {{ bar . "<" "-" (cycle . "-" "\\" "|" "/" ) "." ">"}} {{percent .}}` + tmpl := `{{string . "barName" | green}} {{ bar . "<" "-" (cycle . "-" "\\" "|" "/" ) "." ">"}} {{percent .}}` bar = pb.ProgressBarTemplate(tmpl).Start64(pp.total) bar.Set("barName", pp.name) } diff --git a/pkg/utils/progress_test.go b/pkg/utils/progress_test.go index 7c1d9c947..0d76abd8f 100644 --- a/pkg/utils/progress_test.go +++ b/pkg/utils/progress_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/retry.go b/pkg/utils/retry.go new file mode 100644 index 000000000..1dbbcdad2 --- /dev/null +++ b/pkg/utils/retry.go @@ -0,0 +1,42 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "context" + "time" +) + +// RetryableFunc presents a retryable opreation +type RetryableFunc func() error + +// Backoffer implements a backoff policy for retrying operations +type Backoffer interface { + // NextBackoff returns a duration to wait before retrying again + NextBackoff(err error) time.Duration + // Attempt returns the remain attempt times + Attempt() int +} + +// WithRetry retrys a given operation with a backoff policy +func WithRetry( + ctx context.Context, + retryableFunc RetryableFunc, + backoffer Backoffer, +) error { + var lastErr error + for backoffer.Attempt() > 0 { + err := retryableFunc() + if err != nil { + lastErr = err + select { + case <-ctx.Done(): + return lastErr + case <-time.After(backoffer.NextBackoff(err)): + } + } else { + return nil + } + } + return lastErr +} diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index 67d28132f..5ac439e36 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -1,17 +1,16 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( "bytes" - "context" "encoding/json" "strings" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/util/sqlexec" ) const ( @@ -19,28 +18,31 @@ const ( MetaFile = "backupmeta" // MetaJSONFile represents backup meta json file name MetaJSONFile = "backupmeta.json" + // SavedMetaFile represents saved meta file name for recovering later + SavedMetaFile = "backupmeta.bak" ) // Table wraps the schema and files of a table. type Table struct { - Db *model.DBInfo - Schema *model.TableInfo - Crc64Xor uint64 - TotalKvs uint64 - TotalBytes uint64 - Files []*backup.File + Db *model.DBInfo + Info *model.TableInfo + Crc64Xor uint64 + TotalKvs uint64 + TotalBytes uint64 + Files []*backup.File + TiFlashReplicas int } // Database wraps the schema and tables of a database. type Database struct { - Schema *model.DBInfo + Info *model.DBInfo Tables []*Table } // GetTable returns a table of the database by name. func (db *Database) GetTable(name string) *Table { for _, table := range db.Tables { - if table.Schema.Name.String() == name { + if table.Info.Name.String() == name { return table } } @@ -61,7 +63,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { db, ok := databases[dbInfo.Name.String()] if !ok { db = &Database{ - Schema: dbInfo, + Info: dbInfo, Tables: make([]*Table, 0), } databases[dbInfo.Name.String()] = db @@ -93,12 +95,13 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { } } table := &Table{ - Db: dbInfo, - Schema: tableInfo, - Crc64Xor: schema.Crc64Xor, - TotalKvs: schema.TotalKvs, - TotalBytes: schema.TotalBytes, - Files: tableFiles, + Db: dbInfo, + Info: tableInfo, + Crc64Xor: schema.Crc64Xor, + TotalKvs: schema.TotalKvs, + TotalBytes: schema.TotalBytes, + Files: tableFiles, + TiFlashReplicas: int(schema.TiflashReplicas), } db.Tables = append(db.Tables, table) } @@ -106,36 +109,6 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { return databases, nil } -// ResultSetToStringSlice changes the RecordSet to [][]string. port from tidb -func ResultSetToStringSlice(ctx context.Context, s session.Session, rs sqlexec.RecordSet) ([][]string, error) { - rows, err := session.GetRows4Test(ctx, s, rs) - if err != nil { - return nil, err - } - err = rs.Close() - if err != nil { - return nil, err - } - sRows := make([][]string, len(rows)) - for i := range rows { - row := rows[i] - iRow := make([]string, row.Len()) - for j := 0; j < row.Len(); j++ { - if row.IsNull(j) { - iRow[j] = "" - } else { - d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) - iRow[j], err = d.ToString() - if err != nil { - return nil, err - } - } - } - sRows[i] = iRow - } - return sRows, nil -} - // EncloseName formats name in sql func EncloseName(name string) string { return "`" + strings.ReplaceAll(name, "`", "``") + "`" diff --git a/pkg/utils/schema_test.go b/pkg/utils/schema_test.go index 336b6d4f8..22456be83 100644 --- a/pkg/utils/schema_test.go +++ b/pkg/utils/schema_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go deleted file mode 100644 index a4ca5f5b5..000000000 --- a/pkg/utils/tso.go +++ /dev/null @@ -1,37 +0,0 @@ -package utils - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/pingcap/errors" -) - -const ( - resetTSURL = "/pd/api/v1/admin/reset-ts" -) - -// ResetTS resets the timestamp of PD to a bigger value -func ResetTS(pdAddr string, ts uint64) error { - req, err := json.Marshal(struct { - TSO string `json:"tso,omitempty"` - }{TSO: fmt.Sprintf("%d", ts)}) - if err != nil { - return err - } - // TODO: Support TLS - reqURL := "http://" + pdAddr + resetTSURL - resp, err := http.Post(reqURL, "application/json", strings.NewReader(string(req))) - if err != nil { - return errors.Trace(err) - } - if resp.StatusCode != 200 && resp.StatusCode != 403 { - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(resp.Body) - return errors.Errorf("pd resets TS failed: req=%v, resp=%v, err=%v", string(req), buf.String(), err) - } - return nil -} diff --git a/pkg/utils/unit.go b/pkg/utils/unit.go index a12dcb6c2..253d97eb6 100644 --- a/pkg/utils/unit.go +++ b/pkg/utils/unit.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils // unit of storage diff --git a/pkg/utils/unit_test.go b/pkg/utils/unit_test.go index 5b3c00530..6cf89e316 100644 --- a/pkg/utils/unit_test.go +++ b/pkg/utils/unit_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index f82e28c69..ff8affa7c 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/version.go b/pkg/utils/version.go index 13a3c7a92..e3d46e301 100644 --- a/pkg/utils/version.go +++ b/pkg/utils/version.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/worker.go b/pkg/utils/worker.go index a77bae090..2d800ddcd 100644 --- a/pkg/utils/worker.go +++ b/pkg/utils/worker.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/tests/README.md b/tests/README.md index 9f307a8a6..814241b4a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -18,6 +18,7 @@ programs. * `mysql` (the CLI client) * `curl` + * `s3cmd` 3. The user executing the tests must have permission to create the folder `/tmp/backup_restore_test`. All test artifacts will be written into this folder. @@ -45,4 +46,4 @@ The script should exit with a nonzero error code on failure. Several convenient commands are provided: -* `run_sql ` — Executes an SQL query on the TiDB database \ No newline at end of file +* `run_sql ` — Executes an SQL query on the TiDB database diff --git a/tests/_utils/run_services b/tests/_utils/run_services new file mode 100644 index 000000000..07fe1a2ad --- /dev/null +++ b/tests/_utils/run_services @@ -0,0 +1,200 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +TEST_DIR=/tmp/backup_restore_test + +PD_ADDR="127.0.0.1:2379" +TIDB_IP="127.0.0.1" +TIDB_PORT="4000" +TIDB_ADDR="127.0.0.1:4000" +TIDB_STATUS_ADDR="127.0.0.1:10080" +# actaul tikv_addr are TIKV_ADDR${i} +TIKV_ADDR="127.0.0.1:2016" +TIKV_STATUS_ADDR="127.0.0.1:2018" +TIKV_COUNT=4 + +stop_services() { + killall -9 tikv-server || true + killall -9 pd-server || true + killall -9 tidb-server || true + + find "$TEST_DIR" -maxdepth 1 -not -path "$TEST_DIR" -not -name "*.log" | xargs rm -r || true +} + +start_services() { + stop_services + + echo "Starting PD..." + mkdir -p "$TEST_DIR/pd" + bin/pd-server \ + --client-urls "http://$PD_ADDR" \ + --log-file "$TEST_DIR/pd.log" \ + --data-dir "$TEST_DIR/pd" & + # wait until PD is online... + i=0 + while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to start PD' + exit 1 + fi + sleep 3 + done + + echo "Starting TiKV..." + for i in $(seq $TIKV_COUNT); do + mkdir -p "$TEST_DIR/tikv${i}" + bin/tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ + --log-file "$TEST_DIR/tikv${i}.log" \ + -C "tests/config/tikv.toml" \ + -s "$TEST_DIR/tikv${i}" & + done + + echo "Waiting initializing TiKV..." + while ! curl -sf "http://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to initialize TiKV cluster' + exit 1 + fi + sleep 3 + done + + echo "Starting TiDB..." + bin/tidb-server \ + -P 4000 \ + --status 10080 \ + --store tikv \ + --path "$PD_ADDR" \ + --config "tests/config/tidb.toml" \ + --log-file "$TEST_DIR/tidb.log" & + + echo "Verifying TiDB is started..." + i=0 + while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do + i=$((i+1)) + if [ "$i" -gt 50 ]; then + echo 'Failed to start TiDB' + exit 1 + fi + sleep 3 + done + + i=0 + while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to bootstrap cluster' + exit 1 + fi + sleep 3 + done +} + +start_services_withTLS() { + stop_services + + PD_CONFIG="$1/config/pd.toml" + TIDB_CONFIG="$1/config/tidb.toml" + TIKV_CONFIG="$1/config/tikv.toml" + + echo $PD_CONFIG + echo $TIDB_CONFIG + echo $TIKV_CONFIG + + echo "Starting PD..." + bin/pd-server \ + --client-urls "https://$PD_ADDR" \ + --log-file "$TEST_DIR/pd.log" \ + --config "$PD_CONFIG" \ + --data-dir "$TEST_DIR/pd" & + # wait until PD is online... + i=0 + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -o /dev/null -sf "https://$PD_ADDR/pd/api/v1/version"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to start PD' + exit 1 + fi + sleep 3 + done + + echo "Starting TiKV..." + for i in $(seq $TIKV_COUNT); do + bin/tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --log-file "$TEST_DIR/tikv${i}.log" \ + -C "$TIKV_CONFIG" \ + -s "$TEST_DIR/tikv${i}" & + done + + echo "Waiting initializing TiKV..." + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -sf "https://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to initialize TiKV cluster' + exit 1 + fi + sleep 3 + done + + echo "Starting TiDB..." + bin/tidb-server \ + -P 4000 \ + --status 10080 \ + --store tikv \ + --config "$TIDB_CONFIG" \ + --path "$PD_ADDR" \ + --log-file "$TEST_DIR/tidb.log" & + + echo "Verifying TiDB is started..." + i=0 + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -o /dev/null -sf "https://$TIDB_IP:10080/status"; do + i=$((i+1)) + if [ "$i" -gt 50 ]; then + echo 'Failed to start TiDB' + exit 1 + fi + sleep 3 + done + + i=0 + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + "https://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to bootstrap cluster' + exit 1 + fi + sleep 3 + done +} diff --git a/tests/br_db_online/run.sh b/tests/br_db_online/run.sh new file mode 100755 index 000000000..95c3121d4 --- /dev/null +++ b/tests/br_db_online/run.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --online + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_db_online_newkv/run.sh b/tests/br_db_online_newkv/run.sh new file mode 100755 index 000000000..d8c3f15ff --- /dev/null +++ b/tests/br_db_online_newkv/run.sh @@ -0,0 +1,77 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +# enable placement rules +echo "config set enable-placement-rules true" | pd-ctl + +# add new tikv for restore +# actaul tikv_addr are TIKV_ADDR${i} +TIKV_ADDR="127.0.0.1:2017" +TIKV_STATUS_ADDR="127.0.0.1:2019" +TIKV_COUNT=3 + +echo "Starting restore TiKV..." +for i in $(seq $TIKV_COUNT); do + tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ + --log-file "$TEST_DIR/restore-tikv${i}.log" \ + -C "tests/config/restore-tikv.toml" \ + -s "$TEST_DIR/restore-tikv${i}" & +done +sleep 5 + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --online + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +echo "config set enable-placement-rules false" | pd-ctl + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_debug_meta/run.sh b/tests/br_debug_meta/run.sh index 1dcfccefe..8dc3ef5a3 100644 --- a/tests/br_debug_meta/run.sh +++ b/tests/br_debug_meta/run.sh @@ -15,28 +15,33 @@ set -eu DB="$TEST_NAME" +TABLE="usertable1" run_sql "CREATE DATABASE $DB;" -run_sql "CREATE TABLE $DB.usertable1 ( \ +run_sql "CREATE TABLE $DB.$TABLE( \ YCSB_KEY varchar(64) NOT NULL, \ FIELD0 varchar(1) DEFAULT NULL, \ PRIMARY KEY (YCSB_KEY) \ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" -run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" -run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"aa\", \"b\");" + +row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # backup table echo "backup start..." run_br --pd $PD_ADDR backup table --db $DB --table usertable1 -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 +run_sql "DROP DATABASE $DB;" + # Test validate decode run_br validate decode -s "local://$TEST_DIR/$DB" # should generate backupmeta.json if [ ! -f "$TEST_DIR/$DB/backupmeta.json" ]; then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] decode failed!" exit 1 fi @@ -45,14 +50,21 @@ run_br validate encode -s "local://$TEST_DIR/$DB" # should generate backupmeta_from_json if [ ! -f "$TEST_DIR/$DB/backupmeta_from_json" ]; then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] encode failed!" exit 1 fi -DIFF=$(diff $TEST_DIR/$DB/backupmeta_from_json $TEST_DIR/$DB/backupmeta) -if [ "$DIFF" != "" ] -then - echo "TEST: [$TEST_NAME] failed!" +# replace backupmeta +mv "$TEST_DIR/$DB/backupmeta_from_json" "$TEST_DIR/$DB/backupmeta" + +# restore table +echo "restore start..." +run_br --pd $PD_ADDR restore table --db $DB --table usertable1 -s "local://$TEST_DIR/$DB" + +row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') + +if [ "${row_count_ori}" != "${row_count_new}" ];then + echo "TEST: [$TEST_NAME] failed!, row count not equal after restore" exit 1 fi diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 3db1ecd60..e50ef1ecf 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -28,7 +28,7 @@ for i in $(seq $DDL_COUNT); do run_sql "USE $DB; ALTER TABLE $TABLE ADD INDEX (FIELD$i);" done -for i in $(sql $DDL_COUNT); do +for i in $(seq $DDL_COUNT); do if (( RANDOM % 2 )); then run_sql "USE $DB; ALTER TABLE $TABLE DROP INDEX FIELD$i;" fi @@ -36,7 +36,7 @@ done # backup full echo "backup start..." -br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) @@ -50,7 +50,7 @@ run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." -br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR +run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') diff --git a/tests/br_incremental/run.sh b/tests/br_incremental/run.sh index bb6a42efb..b6a6061de 100755 --- a/tests/br_incremental/run.sh +++ b/tests/br_incremental/run.sh @@ -20,55 +20,38 @@ TABLE="usertable" run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB - -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +row_count_ori_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # full backup echo "full backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 - -run_sql "DROP TABLE $DB.$TABLE;" - -# full restore -echo "full restore start..." -run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -if [ "$row_count_ori" -ne "$row_count_new" ];then - echo "TEST: [$TEST_NAME] full br failed!" - exit 1 -fi +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 go-ycsb run mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) - -# clean up data -rm -rf $TEST_DIR/$DB - # incremental backup echo "incremental backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts - -start_ts=$(br validate decode --field="start-version" -s "local://$TEST_DIR/$DB" | tail -n1) -end_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts +row_count_ori_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -echo "start version: $start_ts, end version: $end_ts" +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${row_count_ori_full}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi # incremental restore echo "incremental restore start..." -run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -echo "[original] row count: $row_count_ori, [after br] row count: $row_count_new" - -if [ "$row_count_ori" -eq "$row_count_new" ];then - echo "TEST: [$TEST_NAME] successed!" -else - echo "TEST: [$TEST_NAME] failed!" +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${row_count_ori_inc}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" exit 1 fi diff --git a/tests/br_incremental_ddl/run.sh b/tests/br_incremental_ddl/run.sh new file mode 100755 index 000000000..d9a88709b --- /dev/null +++ b/tests/br_incremental_ddl/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('$i');" +done +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('1');" + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_index/run.sh b/tests/br_incremental_index/run.sh new file mode 100755 index 000000000..f4b4b9de7 --- /dev/null +++ b/tests/br_incremental_index/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE} VALUES ($i);" +done + +# full backup +echo "backup full start..." +run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)" & +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +wait +# run ddls +echo "run ddls..." +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c2 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c3 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} DROP COLUMN c3;"; +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE} VALUES (1, 1);" +row_count_insert=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check insert count +if [ "${row_count_insert}" != "$(expr $row_count_inc + 1)" ];then + echo "TEST: [$TEST_NAME] insert record fail on database $DB" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_key_locked/codec.go b/tests/br_key_locked/codec.go index cd02c35d7..39ff110e5 100644 --- a/tests/br_key_locked/codec.go +++ b/tests/br_key_locked/codec.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/util/codec" ) diff --git a/tests/br_key_locked/locker.go b/tests/br_key_locked/locker.go index 25f5b526f..9825faff7 100644 --- a/tests/br_key_locked/locker.go +++ b/tests/br_key_locked/locker.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/store/tikv/tikvrpc" diff --git a/tests/br_rawkv/client.go b/tests/br_rawkv/client.go new file mode 100644 index 000000000..bd13839f6 --- /dev/null +++ b/tests/br_rawkv/client.go @@ -0,0 +1,325 @@ +package main + +import ( + "bytes" + "encoding/hex" + "flag" + "fmt" + "hash/crc64" + "math/rand" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/store/tikv" + "github.com/prometheus/common/log" +) + +var ( + pdAddr = flag.String("pd", "127.0.0.1:2379", "Address of PD") + runMode = flag.String("mode", "", "Mode. One of 'rand-gen', 'checksum', 'scan' and 'diff'") + startKeyStr = flag.String("start-key", "", "Start key in hex") + endKeyStr = flag.String("end-key", "", "End key in hex") + keyMaxLen = flag.Int("key-max-len", 32, "Max length of keys for rand-gen mode") + concurrency = flag.Int("concurrency", 32, "Concurrency to run rand-gen") + duration = flag.Int("duration", 10, "duration(second) of rand-gen") +) + +func createClient(addr string) (*tikv.RawKVClient, error) { + cli, err := tikv.NewRawKVClient([]string{addr}, config.Security{}) + return cli, err +} + +func main() { + flag.Parse() + + startKey, err := hex.DecodeString(*startKeyStr) + if err != nil { + log.Fatalf("Invalid startKey: %v, err: %+v", startKeyStr, err) + } + endKey, err := hex.DecodeString(*endKeyStr) + if err != nil { + log.Fatalf("Invalid endKey: %v, err: %+v", endKeyStr, err) + } + if len(endKey) == 0 { + log.Fatal("Empty endKey is not supported yet") + } + + if *runMode == "test-rand-key" { + testRandKey(startKey, endKey, *keyMaxLen) + return + } + + client, err := createClient(*pdAddr) + if err != nil { + log.Fatalf("Failed to create client to %v, err: %+v", *pdAddr, err) + } + + switch *runMode { + case "rand-gen": + err = randGenWithDuration(client, startKey, endKey, *keyMaxLen, *concurrency, *duration) + case "checksum": + err = checksum(client, startKey, endKey) + case "scan": + err = scan(client, startKey, endKey) + case "delete": + err = deleteRange(client, startKey, endKey) + } + + if err != nil { + log.Fatalf("Error: %+v", err) + } +} + +func randGenWithDuration(client *tikv.RawKVClient, startKey, endKey []byte, + maxLen int, concurrency int, duration int) error { + var err error + ok := make(chan struct{}) + go func() { + err = randGen(client, startKey, endKey, maxLen, concurrency) + ok <- struct{}{} + }() + select { + case <-time.After(time.Second * time.Duration(duration)): + case <-ok: + } + return err +} + +func randGen(client *tikv.RawKVClient, startKey, endKey []byte, maxLen int, concurrency int) error { + log.Infof("Start rand-gen from %v to %v, maxLen %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey), maxLen) + log.Infof("Rand-gen will keep running. Please Ctrl+C to stop manually.") + + // Cannot generate shorter key than commonPrefix + commonPrefixLen := 0 + for ; commonPrefixLen < len(startKey) && commonPrefixLen < len(endKey) && + startKey[commonPrefixLen] == endKey[commonPrefixLen]; commonPrefixLen++ { + continue + } + + if maxLen < commonPrefixLen { + return errors.Errorf("maxLen (%v) < commonPrefixLen (%v)", maxLen, commonPrefixLen) + } + + const batchSize = 32 + + errCh := make(chan error, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for { + keys := make([][]byte, 0, batchSize) + values := make([][]byte, 0, batchSize) + + for i := 0; i < batchSize; i++ { + key := randKey(startKey, endKey, maxLen) + keys = append(keys, key) + value := randValue() + values = append(values, value) + } + + err := client.BatchPut(keys, values) + if err != nil { + errCh <- errors.Trace(err) + } + } + }() + } + + err := <-errCh + if err != nil { + return errors.Trace(err) + } + + return nil +} + +func testRandKey(startKey, endKey []byte, maxLen int) { + for { + k := randKey(startKey, endKey, maxLen) + if bytes.Compare(k, startKey) < 0 || bytes.Compare(k, endKey) >= 0 { + panic(hex.EncodeToString(k)) + } + } +} + +func randKey(startKey, endKey []byte, maxLen int) []byte { +Retry: + for { // Regenerate on fail + result := make([]byte, 0, maxLen) + + upperUnbounded := false + lowerUnbounded := false + + for i := 0; i < maxLen; i++ { + upperBound := 256 + if !upperUnbounded { + if i >= len(endKey) { + // The generated key is the same as endKey which is invalid. Regenerate it. + continue Retry + } + upperBound = int(endKey[i]) + 1 + } + + lowerBound := 0 + if !lowerUnbounded { + if i >= len(startKey) { + lowerUnbounded = true + } else { + lowerBound = int(startKey[i]) + } + } + + if lowerUnbounded { + if rand.Intn(257) == 0 { + return result + } + } + + value := rand.Intn(upperBound - lowerBound) + value += lowerBound + + if value < upperBound-1 { + upperUnbounded = true + } + if value > lowerBound { + lowerUnbounded = true + } + + result = append(result, uint8(value)) + } + + return result + } +} + +func randValue() []byte { + result := make([]byte, 0, 512) + for i := 0; i < 512; i++ { + value := rand.Intn(257) + if value == 256 { + if i > 0 { + return result + } + value-- + } + result = append(result, uint8(value)) + } + return result +} + +func checksum(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start checkcum on range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + + scanner := newRawKVScanner(client, startKey, endKey) + digest := crc64.New(crc64.MakeTable(crc64.ECMA)) + + var res uint64 + + for { + k, v, err := scanner.Next() + if err != nil { + return errors.Trace(err) + } + if len(k) == 0 { + break + } + _, _ = digest.Write(k) + _, _ = digest.Write(v) + res ^= digest.Sum64() + } + + fmt.Printf("Checksum result: %016x\n", res) + return nil +} + +func deleteRange(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start delete data in range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + return client.DeleteRange(startKey, endKey) +} + +func scan(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start scanning data in range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + + scanner := newRawKVScanner(client, startKey, endKey) + + var key []byte + for { + k, v, err := scanner.Next() + if err != nil { + return errors.Trace(err) + } + if len(k) == 0 { + break + } + fmt.Printf("key: %v, value: %v\n", hex.EncodeToString(k), hex.EncodeToString(v)) + if bytes.Compare(key, k) >= 0 { + log.Errorf("Scan result is not in order. "+ + "Previous key: %v, Current key: %v", + hex.EncodeToString(key), hex.EncodeToString(k)) + } + } + + log.Infof("Finished Scanning.") + return nil +} + +const defaultScanBatchSize = 128 + +type rawKVScanner struct { + client *tikv.RawKVClient + batchSize int + + currentKey []byte + endKey []byte + + bufferKeys [][]byte + bufferValues [][]byte + bufferCursor int + noMore bool +} + +func newRawKVScanner(client *tikv.RawKVClient, startKey, endKey []byte) *rawKVScanner { + return &rawKVScanner{ + client: client, + batchSize: defaultScanBatchSize, + + currentKey: startKey, + endKey: endKey, + + noMore: false, + } +} + +func (s *rawKVScanner) Next() ([]byte, []byte, error) { + if s.bufferCursor >= len(s.bufferKeys) { + if s.noMore { + return nil, nil, nil + } + + s.bufferCursor = 0 + + batchSize := s.batchSize + var err error + s.bufferKeys, s.bufferValues, err = s.client.Scan(s.currentKey, s.endKey, batchSize) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if len(s.bufferKeys) < batchSize { + s.noMore = true + } + + if len(s.bufferKeys) == 0 { + return nil, nil, nil + } + + bufferKey := s.bufferKeys[len(s.bufferKeys)-1] + bufferKey = append(bufferKey, 0) + s.currentKey = bufferKey + } + + key := s.bufferKeys[s.bufferCursor] + value := s.bufferValues[s.bufferCursor] + s.bufferCursor++ + return key, value, nil +} diff --git a/tests/br_rawkv/run.sh b/tests/br_rawkv/run.sh new file mode 100644 index 000000000..f57e76827 --- /dev/null +++ b/tests/br_rawkv/run.sh @@ -0,0 +1,85 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +BACKUP_DIR="raw_backup" + +checksum() { + bin/rawkv --pd $PD_ADDR --mode checksum --start-key $1 --end-key $2 | grep result | awk '{print $3}' +} + +fail_and_exit() { + echo "TEST: [$TEST_NAME] failed!" + exit 1 +} + +checksum_empty=$(checksum 31 3130303030303030) + +# generate raw kv randomly in range[start-key, end-key) in 10s +bin/rawkv --pd $PD_ADDR --mode rand-gen --start-key 31 --end-key 3130303030303030 --duration 10 + +checksum_ori=$(checksum 31 3130303030303030) +checksum_partial=$(checksum 311111 311122) + +# backup rawkv +echo "backup start..." +run_br --pd $PD_ADDR backup raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 + +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit +fi + +# restore rawkv +echo "restore start..." +run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 + +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_ori" ];then + echo "checksum failed after restore" + fail_and_exit +fi + +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit +fi + +# FIXME restore rawkv partially after change endkey to inclusive +# echo "restore start..." +# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 311111 --end 311122 --format hex --concurrency 4 +# +# checksum_new=$(checksum 31 3130303030303030) +# +# if [ "$checksum_new" != "$checksum_partial" ];then +# echo "checksum failed after restore" +# fail_and_exit +# fi + +echo "TEST: [$TEST_NAME] successed!" diff --git a/tests/br_s3/run.sh b/tests/br_s3/run.sh new file mode 100755 index 000000000..422a1270d --- /dev/null +++ b/tests/br_s3/run.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux +DB="$TEST_NAME" +TABLE="usertable" +DB_COUNT=3 + +# start the s3 server +export MINIO_ACCESS_KEY=brs3accesskey +export MINIO_SECRET_KEY=brs3secretkey +export MINIO_BROWSER=off +export AWS_ACCESS_KEY_ID=$MINIO_ACCESS_KEY +export AWS_SECRET_ACCESS_KEY=$MINIO_SECRET_KEY +export S3_ENDPOINT=127.0.0.1:24927 +rm -rf "$TEST_DIR/$DB" +mkdir -p "$TEST_DIR/$DB" +bin/minio server --address $S3_ENDPOINT "$TEST_DIR/$DB" & +MINIO_PID=$! +i=0 +while ! curl -o /dev/null -v -s "http://$S3_ENDPOINT/"; do + i=$(($i+1)) + if [ $i -gt 7 ]; then + echo 'Failed to start minio' + exit 1 + fi + sleep 2 +done + +stop_minio() { + kill -2 $MINIO_PID +} +trap stop_minio EXIT + +s3cmd --access_key=$MINIO_ACCESS_KEY --secret_key=$MINIO_SECRET_KEY --host=$S3_ENDPOINT --host-bucket=$S3_ENDPOINT --no-ssl mb s3://mybucket + +# Fill in the database +for i in $(seq $DB_COUNT); do + run_sql "CREATE DATABASE $DB${i};" + go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB${i} +done + +for i in $(seq $DB_COUNT); do + row_count_ori[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') +done + +# backup full +echo "backup start..." +run_br --pd $PD_ADDR backup full -s "s3://mybucket/$DB" --s3.endpoint="http://$S3_ENDPOINT" + +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB${i};" +done + +# restore full +echo "restore start..." +run_br restore full -s "s3://mybucket/$DB" --pd $PD_ADDR --s3.endpoint="http://$S3_ENDPOINT" + +for i in $(seq $DB_COUNT); do + row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') +done + +fail=false +for i in $(seq $DB_COUNT); do + if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then + fail=true + echo "TEST: [$TEST_NAME] fail on database $DB${i}" + fi + echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" +done + +if $fail; then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +else + echo "TEST: [$TEST_NAME] successed!" +fi + +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB${i};" +done diff --git a/tests/br_s3/workload b/tests/br_s3/workload new file mode 100644 index 000000000..19336335e --- /dev/null +++ b/tests/br_s3/workload @@ -0,0 +1,12 @@ +recordcount=5000 +operationcount=0 +workload=core + +readallfields=true + +readproportion=0 +updateproportion=0 +scanproportion=0 +insertproportion=0 + +requestdistribution=uniform diff --git a/tests/br_table_partition/run.sh b/tests/br_table_partition/run.sh index fe0ce874b..ce7fe1df1 100755 --- a/tests/br_table_partition/run.sh +++ b/tests/br_table_partition/run.sh @@ -30,25 +30,23 @@ done echo "backup start..." run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 -for i in $(seq $DB_COUNT); do - run_sql "DROP DATABASE $DB${i};" -done +run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR -for i in $(seq $DB_COUNT); do +for i in $(seq $TABLE_COUNT); do row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE${i};" | awk '/COUNT/{print $2}') done fail=false -for i in $(seq $DB_COUNT); do +for i in $(seq $TABLE_COUNT); do if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then fail=true - echo "TEST: [$TEST_NAME] fail on database $DB${i}" + echo "TEST: [$TEST_NAME] fail on table $DB.$TABLE${i}" fi - echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" + echo "table $DB.$TABLE${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" done if $fail; then diff --git a/tests/br_tls/certificates/ca.pem b/tests/br_tls/certificates/ca.pem new file mode 100644 index 000000000..49098d653 --- /dev/null +++ b/tests/br_tls/certificates/ca.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDgDCCAmigAwIBAgIUHWvlRJydvYTR0ot3b8f6IlSHcGUwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwNzQxMDBaGA8yMTIwMDEyNTA3NDEwMFowVzELMAkGA1UEBhMCQ04xEDAO +BgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxEDAOBgNVBAoTB1BpbmdD +QVAxEjAQBgNVBAMTCU15IG93biBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOAdNtjanFhPaKJHQjr7+h/Cpps5bLc6S1vmgi/EIi9PKv3eyDgtlW1r +As2sjXRMHjcuZp2hHJ9r9FrMQD1rQQq5vJzQqM+eyWLc2tyZWXNWkZVvpjU4Hy5k +jZFLXoyHgAvps/LGu81F5Lk5CvLHswWTyGQUCFi1l/cYcQg6AExh2pO/WJu4hQhe +1mBBIKsJhZ5b5tWruLeI+YIjD1oo1ADMHYLK1BHON2fUmUHRGbrYKu4yCuyip3wn +rbVlpabn7l1JBMatCUJLHR6VWQ2MNjrOXAEUYm4xGEN+tUYyUOGl5mHFguLl3OIn +wj+1dT3WOr/NraPYlwVOnAd9GNbPJj0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG +MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ0CEqxLwEpI6J2gYJRg15oWZrj/ +MA0GCSqGSIb3DQEBCwUAA4IBAQCf8xRf7q1xAaGrc9HCPvN4OFkxDwz1CifrvrLR +ZgIWGUdCHDW2D1IiWKZQWeJKC1otA5x0hrS5kEGfkLFhADEU4txwp70DQaBArPti +pSgheIEbaT0H3BUTYSgS3VL2HjxN5OVMN6jNG3rWyxnJpNOCsJhhJXPK50CRZ7fk +Dcodj6FfEM2bfp2bGkxyVtUch7eepfUVbslXa7jE7Y8M3cr9NoLUcSP6D1RJWkNd +dBQoUsb6Ckq27ozEKOgwuBVv4BrrbFN//+7WHP8Vy6sSMyd+dJLBi6wehJjQhIz6 +vqLWE81rSJuxZqjLpCkFdeEF+9SRjWegU0ZDM4V+YeX53BPC +-----END CERTIFICATE----- diff --git a/tests/br_tls/certificates/client-key.pem b/tests/br_tls/certificates/client-key.pem new file mode 100644 index 000000000..43b021796 --- /dev/null +++ b/tests/br_tls/certificates/client-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA06qb7HABWHrU4CvBUO/2hXGgobi/UlTqTrYGZoJqSrvhKCP6 +HOivZjyWaSMDIfrguN+0C+bd80/5XGMmwjDt8PuZ+Ef3jcJLuB1e+Kms0s5tiTng +6Z028PkSpGvKXjPebWu7zoxVpDcTGM6MmlZQqpOuIgwi+7WX/bIgIu9wooCvJEGq +hScG2wpvK3txnpsVA4eXXdFoH0R5mtqbxVFfhwKMDqga4pRwJStLTDiMrtUz+OKc +rMIrkH4ndhvm2UYTVvhHlkZ3ooeDYsu40NnvedwaE+Ii7EnmcSDF9PaCVrXSK9F/ +KNRXX/x67PMWCVqcNyGtRsCuDe7FnDfGpudVXwIDAQABAoIBAHAzW/v1U4FHe1hp +WUxCJ3eNSAzyFdja0mlu6+2i7B05gpz4lTiFz5RuQXzx5lM43a6iRpqYgsbbed+T +X5RIw5iehnuqCnvGpsSuLQ27Q7VrX30ChUrQ37LVFSC7Usak0B9IoIFYun0WBLV9 +p+KYJqKFLiU2McUj+bGtnoNmUVqRzXQosoQue/pS9OknZ3NU7FxiyI3o4ME8rDvv +9x4vc1zcqbGXTQB224kOT0xoYt8RTmIbHvkR6/yszAtHDBcdzhINVuf38lv9FvaN +FxymwsY4IKPumQZlOEzHvSnpHTrwBMFdXjqpX1VxQb3yznEK+01MHf/tYsiU57IS +WVQMTeECgYEA7Fk0w66LGgdeeWrNTSTBCBPTofDVmR7Tro6k++5XTRt552ZoVz2l +8Lfi/Px5hIyve9gnM7slWrQ72JIQ5xVYZHtic3iwVFuRAD/QVfWU/SNsRsSB/93M +3BEumwJA6vN/qvkZueos3LOxN8kExk6GD0wIl6HjTeJPbbPHqmk6Pr0CgYEA5UQI +skaj8QGpjG8Hc10FeJpYsZiZA7gJaNu4RPqBj+1RHu/eYrL2mouooZdJfIJTmlTz +4NJcfb+Dl6qwbHUQ3mddhauFu1/YRwmaR5QKjwaBdeZbly9ljsRISFpjtosc7IBA +/Bl83xtbCboMdm7cH49X2CgRQ1uVFWraye0MBEsCgYEA43vtHFdYjaIAHa9dkV3J +6aNjtF/gxzNznXSwecfrAU1r5PydezLcEDh94vCDacAbe4EOIm2Dw6zsWUQlvrW9 +0WEs3mWQmnFTvECvnrz0PT2mDus/EO4EKuDi0dG2eC4MeJywVVB/A6J09XOnA9Q6 +lmihcIkiBinIN5etm2kS5aUCgYBCdcRnmZ6woKC7uvvX72FEosmPQgMpVtIzeW4j +YNLqHAtmAnbe+a4PAukxXp/I3ibKGFJSG+j/8uJ8tthJuG3ZavFrbFtqA9C4VwpI +MZwV9fbVbJ+kZfL0veWOQ9Wf9xe9Xzh3XBQcwNtVKH+wXVamN3FpkcPfWM8Q1Fb0 +LilLnQKBgQCq7+YlSnQX0rbmPTXVVb+B12rbqLDnqA0EuaVGrdu9zPPT04e5fpHU +SD33ibaEyeOF+zLg8T53whDbLJ0tURhUk+BlLTjdd99NXtyGMlfDnIsCnAeJhY8f +Iki6LYbbP2uWV4/5IDy9XW7J42Pfl9QyEVXq+PfTyPPjXC9/J4GOuw== +-----END RSA PRIVATE KEY----- diff --git a/tests/br_tls/certificates/client.pem b/tests/br_tls/certificates/client.pem new file mode 100644 index 000000000..7dace2f9d --- /dev/null +++ b/tests/br_tls/certificates/client.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIUaupI14PPUSshx7FmD7lsVPFahwAwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwNzQ4MDBaGA8yMTIwMDEyNTA3NDgwMFowETEPMA0GA1UEAxMGY2xpZW50 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA06qb7HABWHrU4CvBUO/2 +hXGgobi/UlTqTrYGZoJqSrvhKCP6HOivZjyWaSMDIfrguN+0C+bd80/5XGMmwjDt +8PuZ+Ef3jcJLuB1e+Kms0s5tiTng6Z028PkSpGvKXjPebWu7zoxVpDcTGM6MmlZQ +qpOuIgwi+7WX/bIgIu9wooCvJEGqhScG2wpvK3txnpsVA4eXXdFoH0R5mtqbxVFf +hwKMDqga4pRwJStLTDiMrtUz+OKcrMIrkH4ndhvm2UYTVvhHlkZ3ooeDYsu40Nnv +edwaE+Ii7EnmcSDF9PaCVrXSK9F/KNRXX/x67PMWCVqcNyGtRsCuDe7FnDfGpudV +XwIDAQABo4GDMIGAMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRqlq/slflqw/cdlE+xNcnmmxZwlTAf +BgNVHSMEGDAWgBSdAhKsS8BKSOidoGCUYNeaFma4/zALBgNVHREEBDACggAwDQYJ +KoZIhvcNAQELBQADggEBAMGC48O77wZHZRRxXIpTQDMUSpGTKks76l+s1N7sMLrG +DCQi/XFVfV8e/Z1qs224IyU1IGXXcdwK0Zfa9owUmVmiHE8lznv7m9m7j4BGOshc +pvnJaeuUluKR/QHzwpMsUKudoEyRjn09e0Jl0/IcsKh13rzgd458XR0ShCjxybo4 +nQ1aZb1wOPLG6tpUYsV+x2Coc6TgnJWJYlDbRfpIuj6y16T1kKuWzpm6VU3kbiJ9 +/nzDgauuJHIlXEWL9dBZcpzUibFswIQyGsK7c4AJrtY1OGx0/2nZIIjtGY3gtWyX +XGV9c4kM695gl5rJndB4IPl5GQeJBCNyIaVybh7Va70= +-----END CERTIFICATE----- diff --git a/tests/br_tls/certificates/server-key.pem b/tests/br_tls/certificates/server-key.pem new file mode 100644 index 000000000..2779d6ec6 --- /dev/null +++ b/tests/br_tls/certificates/server-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAq9mcQG/nSLREM2r7s2tCKCE/1KJQvV0xmkIglFD2VDDfYW+C +mBME5LNWbYR6L0yCVHU8B7aVnw1FsbiF4TpUY3w/r4mOGl7QbGivMYvRe6Nh2xUO +TvctwFyv2FvrtBX1rZ5/8QLbz1IFHOtTV7QUzLzWq3fSAiF1vhVsS3BUmh6QvWU8 +q9dylpmUQ22otSRXmirwEzFt9K+w3VK9Z6aac7e2XRurVPxbqgQUq2bblUhii8Fc +dCUA8NenlWp+H64mN2TzVaGb5Csr7SNS7AWDEPKfoo7W3H7bzKlmRVcPeRdftwti +SI1jfboxprya/nbTyBPE/yfLU/SYn/b89epziwIDAQABAoIBACPlI08OULgN90Tq +LsLuP3ZUY5nNgaHcKnU3JMj2FE3Hm5ElkpijOF1w3Dep+T+R8pMjnbNavuvnAMy7 +ZzOBVIknNcI7sDPv5AcQ4q8trkbt/I2fW0rBNIw+j/hYUuZdw+BNABpeZ31pe2nr ++Y+TLNkLBKfyMiqBxK88mE81mmZKblyvXCawW0A/iDDJ7fPNqoGF+y9ylTYaNRPk +aJGnaEZobJ4Lm5tSqW4gRX2ft6Hm67RkvVaopPFnlkvfusXUTFUqEVQCURRUqXbf +1ah2chUHxj22UdY9540H5yVNgEP3oR+uS/hbZqxKcJUTznUW5th3CyQPIKMlGlcB +p+zWlTECgYEAxlY4zGJw4QQwGYMKLyWCSHUgKYrKu2Ub2JKJFMTdsoj9H7DI+WHf +lQaO9NCOo2lt0ofYM1MzEpI5Cl/aMrPw+mwquBbxWdMHXK2eSsUQOVo9HtUjgK2t +J2AYFCfsYndo+hCj3ApMHgiY3sghTCXeycvT52bm11VeNVcs3pKxIYMCgYEA3dAJ +PwIfAB8t+6JCP2yYH4ExNjoMNYMdXqhz4vt3UGwgskRqTW6qdd9JvrRQ/JPvGpDy +T375h/+lLw0E4ljsnOPGSzbXNf4bYRHTwPOL+LqVM4Bg90hjclqphElHChxep1di +WcdArB0oae/l4M96z3GjfnXIUVOp8K6BUQCab1kCgYAFFAQUR5j4SfEpVg+WsXEq +hcUzCxixv5785pOX8opynctNWmtq5zSgTjCu2AAu8u4a69t/ROwT16aaO2YM0kqj +Ps3BNOUtFZgkqVVaOL13mnXiKjbkfo3majFzoqoMw13uuSpY4fKc+j9fxOQFXRrd +M9jTHfFfJhJpbzf44uyiHQKBgFIPwzvyVvG+l05/Ky83x9fv/frn4thxV45LmAQj +sHKqbjZFpWZcSOgu4aOSJlwrhsw3T84lVcAAzmXn1STAbVll01jEQz6QciSpacP6 +1pAAx240UqtptpD6BbkROxz8ffA/Hf3E/6Itb2QyAsP3PqI8kpYYkTG1WCvZA7Kq +HHiRAoGAXbUZ25LcrmyuxKWpbty8fck1tjKPvclQB35rOx6vgnfW6pcKMeebYvgq +nJka/QunEReOH/kGxAd/+ymvUBuFQCfFg3Aus+DtAuh9AkBr+cIyPjJqynnIT87J +MbkOw4uEhDJAtGUR9o1j83N1f05bnEwssXiXR0LZPylb9Qzc4tg= +-----END RSA PRIVATE KEY----- diff --git a/tests/br_tls/certificates/server.pem b/tests/br_tls/certificates/server.pem new file mode 100644 index 000000000..ea5ef2d5f --- /dev/null +++ b/tests/br_tls/certificates/server.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIUWBTDQm4xOYDxZBTkpCQouREtT8QwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwOTExMDBaGA8yMTIwMDEyNTA5MTEwMFowFjEUMBIGA1UEAxMLdGlkYi1z +ZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCr2ZxAb+dItEQz +avuza0IoIT/UolC9XTGaQiCUUPZUMN9hb4KYEwTks1ZthHovTIJUdTwHtpWfDUWx +uIXhOlRjfD+viY4aXtBsaK8xi9F7o2HbFQ5O9y3AXK/YW+u0FfWtnn/xAtvPUgUc +61NXtBTMvNard9ICIXW+FWxLcFSaHpC9ZTyr13KWmZRDbai1JFeaKvATMW30r7Dd +Ur1npppzt7ZdG6tU/FuqBBSrZtuVSGKLwVx0JQDw16eVan4friY3ZPNVoZvkKyvt +I1LsBYMQ8p+ijtbcftvMqWZFVw95F1+3C2JIjWN9ujGmvJr+dtPIE8T/J8tT9Jif +9vz16nOLAgMBAAGjgZEwgY4wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsG +AQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRVB/Bvdzvh +6WQRWpc9SzcbXLz77zAfBgNVHSMEGDAWgBSdAhKsS8BKSOidoGCUYNeaFma4/zAP +BgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAAqg5pgGQqORKRSdlY +wzVvzKaulpvjZfVMM6YiOUtmlU0CGWq7E3gLFzkvebpU0KsFlbyZ92h/2Fw5Ay2b +kxkCy18mJ4lGkvF0cU4UD3XheFMvD2QWWRX4WPpAhStofrWOXeyq3Div2+fQjMJd +kyeWUzPU7T467IWUHOWNsFAjfVHNsmG45qLGt+XQckHTvASX5IvN+5tkRUCW30vO +b3BdDQUFglGTUFU2epaZGTti0SYiRiY+9R3zFWX4uBcEBYhk9e/0BU8FqdWW5GjI +pFpH9t64CjKIdRQXpIn4cogK/GwyuRuDPV/RkMjrIqOi7pGejXwyDe9avHFVR6re +oowA +-----END CERTIFICATE----- diff --git a/tests/br_tls/config/pd.toml b/tests/br_tls/config/pd.toml new file mode 100644 index 000000000..69cb94b6f --- /dev/null +++ b/tests/br_tls/config/pd.toml @@ -0,0 +1,9 @@ +# config of pd + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "tests/br_tls/certificates/ca.pem" +# Path of file that contains X509 certificate in PEM format. +cert-path = "tests/br_tls/certificates/server.pem" +# Path of file that contains X509 key in PEM format. +key-path = "tests/br_tls/certificates/server-key.pem" diff --git a/tests/br_tls/config/tidb.toml b/tests/br_tls/config/tidb.toml new file mode 100644 index 000000000..48a783332 --- /dev/null +++ b/tests/br_tls/config/tidb.toml @@ -0,0 +1,14 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whether BR is gracefully shutdown. +lease = "360s" + +[security] +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "tests/br_tls/certificates/ca.pem" +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "tests/br_tls/certificates/server.pem" +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "tests/br_tls/certificates/server-key.pem" \ No newline at end of file diff --git a/tests/br_tls/config/tikv.toml b/tests/br_tls/config/tikv.toml new file mode 100644 index 000000000..b4859a731 --- /dev/null +++ b/tests/br_tls/config/tikv.toml @@ -0,0 +1,19 @@ +# config of tikv + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +ca-path = "tests/br_tls/certificates/ca.pem" +cert-path = "tests/br_tls/certificates/server.pem" +key-path = "tests/br_tls/certificates/server-key.pem" diff --git a/tests/br_tls/run.sh b/tests/br_tls/run.sh new file mode 100755 index 000000000..9c494b700 --- /dev/null +++ b/tests/br_tls/run.sh @@ -0,0 +1,67 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/run_services + +DB="$TEST_NAME" +TABLE="usertable1" +TABLE2="usertable2" + +echo "Restart cluster with tls" +start_services_withTLS "$cur" + +run_sql "DROP DATABASE IF EXISTS $DB;" +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.$TABLE( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.$TABLE VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.$TABLE2( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.$TABLE2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --ca $cur/certificates/ca.pem --cert $cur/certificates/client.pem --key $cur/certificates/client-key.pem + +run_sql "DROP DATABASE $DB;" + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --ca $cur/certificates/ca.pem --cert $cur/certificates/client.pem --key $cur/certificates/client-key.pem + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" + +echo "Restart service without tls" +start_services diff --git a/tests/br_z_gc_safepoint/gc.go b/tests/br_z_gc_safepoint/gc.go index a18367259..d5a30361e 100644 --- a/tests/br_z_gc_safepoint/gc.go +++ b/tests/br_z_gc_safepoint/gc.go @@ -21,7 +21,7 @@ import ( "time" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/store/tikv/oracle" "go.uber.org/zap" ) diff --git a/tests/br_z_gc_safepoint/run.sh b/tests/br_z_gc_safepoint/run.sh index 916ca1fa8..a76e97501 100755 --- a/tests/br_z_gc_safepoint/run.sh +++ b/tests/br_z_gc_safepoint/run.sh @@ -23,6 +23,8 @@ set -eu DB="$TEST_NAME" TABLE="usertable" +MAX_UINT64=9223372036854775807 + run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB @@ -39,7 +41,25 @@ echo "backup start (expect fail)..." run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 if [ "$backup_gc_fail" -ne "1" ];then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] test check backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts 1 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check last backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup with max_uint64 start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts $MAX_UINT64 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check max backup ts failed!" exit 1 fi diff --git a/tests/config/restore-tikv.toml b/tests/config/restore-tikv.toml new file mode 100644 index 000000000..010711cd4 --- /dev/null +++ b/tests/config/restore-tikv.toml @@ -0,0 +1,17 @@ +# config of tikv + +[server] +labels = { exclusive = "restore" } + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false +capacity = "10GB" \ No newline at end of file diff --git a/tests/config/tikv.toml b/tests/config/tikv.toml index e93a16597..73323d878 100644 --- a/tests/config/tikv.toml +++ b/tests/config/tikv.toml @@ -11,3 +11,4 @@ max-open-files = 4096 [raftstore] # true (default value) for high reliability, this can prevent data loss when power failure. sync-log = false +capacity = "10GB" \ No newline at end of file diff --git a/tests/download_tools.sh b/tests/download_tools.sh new file mode 100755 index 000000000..e0689dd61 --- /dev/null +++ b/tests/download_tools.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download tools for running the integration test + +set -eu + +BIN="$(dirname "$0")/../bin" + +if [ "$(uname -s)" != Linux ]; then + echo 'Can only automatically download binaries on Linux.' + exit 1 +fi + +MISSING_TIDB_COMPONENTS= +for COMPONENT in tidb-server pd-server tikv-server pd-ctl; do + if [ ! -e "$BIN/$COMPONENT" ]; then + MISSING_TIDB_COMPONENTS="$MISSING_TIDB_COMPONENTS tidb-latest-linux-amd64/bin/$COMPONENT" + fi +done + +if [ -n "$MISSING_TIDB_COMPONENTS" ]; then + echo "Downloading latest TiDB bundle..." + # TODO: the url is going to change from 'latest' to 'nightly' someday. + curl -L -f -o "$BIN/tidb.tar.gz" "https://download.pingcap.org/tidb-latest-linux-amd64.tar.gz" + tar -x -f "$BIN/tidb.tar.gz" -C "$BIN/" $MISSING_TIDB_COMPONENTS + rm "$BIN/tidb.tar.gz" + mv "$BIN"/tidb-latest-linux-amd64/bin/* "$BIN/" + rmdir "$BIN/tidb-latest-linux-amd64/bin" + rmdir "$BIN/tidb-latest-linux-amd64" +fi + +if [ ! -e "$BIN/go-ycsb" ]; then + # TODO: replace this once there's a public downloadable release. + echo 'go-ycsb is missing. Please build manually following https://github.com/pingcap/go-ycsb#getting-started' + exit 1 +fi + +if [ ! -e "$BIN/minio" ]; then + echo "Downloading minio..." + curl -L -f -o "$BIN/minio" "https://dl.min.io/server/minio/release/linux-amd64/minio" + chmod a+x "$BIN/minio" +fi + +echo "All binaries are now available." diff --git a/tests/run.sh b/tests/run.sh index 3cedc7093..21d6b27ed 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # Copyright 2019 PingCAP, Inc. # @@ -14,83 +14,11 @@ # limitations under the License. set -eu +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/_utils/run_services -TEST_DIR=/tmp/backup_restore_test - -PD_ADDR="127.0.0.1:2379" -TIDB_IP="127.0.0.1" -TIDB_PORT="4000" -TIDB_ADDR="127.0.0.1:4000" -TIDB_STATUS_ADDR="127.0.0.1:10080" -# actaul tikv_addr are TIKV_ADDR${i} -TIKV_ADDR="127.0.0.1:2016" -TIKV_COUNT=4 - -stop_services() { - killall -9 tikv-server || true - killall -9 pd-server || true - killall -9 tidb-server || true - - find "$TEST_DIR" -maxdepth 1 -not -path "$TEST_DIR" -not -name "*.log" | xargs rm -r || true -} - -start_services() { - stop_services - - mkdir -p "$TEST_DIR" - rm -f "$TEST_DIR"/*.log - - echo "Starting PD..." - bin/pd-server \ - --client-urls "http://$PD_ADDR" \ - --log-file "$TEST_DIR/pd.log" \ - --data-dir "$TEST_DIR/pd" & - # wait until PD is online... - while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do - sleep 1 - done - - echo "Starting TiKV..." - for i in $(seq $TIKV_COUNT); do - bin/tikv-server \ - --pd "$PD_ADDR" \ - -A "$TIKV_ADDR$i" \ - --log-file "$TEST_DIR/tikv${i}.log" \ - -C "tests/config/tikv.toml" \ - -s "$TEST_DIR/tikv${i}" & - done - sleep 1 - - echo "Starting TiDB..." - bin/tidb-server \ - -P 4000 \ - --status 10080 \ - --store tikv \ - --path "$PD_ADDR" \ - --config "tests/config/tidb.toml" \ - --log-file "$TEST_DIR/tidb.log" & - - echo "Verifying TiDB is started..." - i=0 - while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do - i=$((i+1)) - if [ "$i" -gt 10 ]; then - echo 'Failed to start TiDB' - exit 1 - fi - sleep 3 - done - - i=0 - while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do - i=$((i+1)) - if [ "$i" -gt 10 ]; then - echo 'Failed to bootstrap cluster' - exit 1 - fi - sleep 3 - done -} +mkdir -p "$TEST_DIR" +rm -f "$TEST_DIR"/*.log trap stop_services EXIT start_services @@ -100,7 +28,7 @@ if [ "${1-}" = '--debug' ]; then read line fi -for script in tests/*/run.sh; do +for script in tests/${TEST_NAME-*}/run.sh; do echo "*===== Running test $script... =====*" TEST_DIR="$TEST_DIR" \ PD_ADDR="$PD_ADDR" \ @@ -111,5 +39,5 @@ for script in tests/*/run.sh; do TIKV_ADDR="$TIKV_ADDR" \ PATH="tests/_utils:bin:$PATH" \ TEST_NAME="$(basename "$(dirname "$script")")" \ - sh "$script" + bash "$script" done diff --git a/tools.json b/tools.json index e3dd19414..2b41d4fce 100644 --- a/tools.json +++ b/tools.json @@ -18,7 +18,7 @@ }, { "Repository": "github.com/golangci/golangci-lint/cmd/golangci-lint", - "Commit": "901cf25e20f86b7e9dc6f73eaba5afbd0cbdc257" + "Commit": "b9eef79121fff235d0d794c176ffa2b3d9bd422f" } ], "RetoolVersion": "1.3.7"