diff --git a/BUILD.bazel b/BUILD.bazel
index 00f4ed67a67c6..71ab0b44b41e1 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -4,6 +4,7 @@ load("@bazel_gazelle//:def.bzl", "gazelle")
# gazelle:prefix github.com/pingcap/tidb
# gazelle:exclude tidb-binlog/proto/proto
# gazelle:exclude plugin/conn_ip_example
+# gazelle:exclude build/linter/staticcheck
gazelle(name = "gazelle")
package(default_visibility = ["//visibility:public"])
diff --git a/DEPS.bzl b/DEPS.bzl
index 32f321378fa33..b41d46497f96d 100644
--- a/DEPS.bzl
+++ b/DEPS.bzl
@@ -5,8 +5,9 @@ def go_deps():
name = "co_honnef_go_tools",
build_file_proto_mode = "disable_global",
importpath = "honnef.co/go/tools",
- sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=",
- version = "v0.0.1-2020.1.4",
+ replace = "honnef.co/go/tools",
+ sum = "h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=",
+ version = "v0.3.2",
)
go_repository(
name = "com_github_ajg_form",
@@ -174,8 +175,8 @@ def go_deps():
name = "com_github_burntsushi_toml",
build_file_proto_mode = "disable_global",
importpath = "github.com/BurntSushi/toml",
- sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
- version = "v0.3.1",
+ sum = "h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=",
+ version = "v0.4.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
@@ -219,6 +220,14 @@ def go_deps():
sum = "h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=",
version = "v2.1.2",
)
+ go_repository(
+ name = "com_github_charithe_durationcheck",
+ build_file_proto_mode = "disable",
+ importpath = "github.com/charithe/durationcheck",
+ sum = "h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=",
+ version = "v0.0.9",
+ )
+
go_repository(
name = "com_github_cheggaaa_pb_v3",
build_file_proto_mode = "disable_global",
@@ -918,6 +927,14 @@ def go_deps():
sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=",
version = "v0.0.4",
)
+ go_repository(
+ name = "com_github_golangci_prealloc",
+ build_file_proto_mode = "disable",
+ importpath = "github.com/golangci/prealloc",
+ sum = "h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=",
+ version = "v0.0.0-20180630174525-215b22d4de21",
+ )
+
go_repository(
name = "com_github_gomodule_redigo",
build_file_proto_mode = "disable_global",
@@ -2928,6 +2945,14 @@ def go_deps():
sum = "h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=",
version = "v0.0.0-20220426173459-3bcf042a4bf5",
)
+ go_repository(
+ name = "org_golang_x_exp_typeparams",
+ build_file_proto_mode = "disable",
+ importpath = "golang.org/x/exp/typeparams",
+ sum = "h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=",
+ version = "v0.0.0-20220218215828-6cf2b201936e",
+ )
+
go_repository(
name = "org_golang_x_image",
build_file_proto_mode = "disable_global",
@@ -2953,8 +2978,8 @@ def go_deps():
name = "org_golang_x_mod",
build_file_proto_mode = "disable_global",
importpath = "golang.org/x/mod",
- sum = "h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4=",
- version = "v0.6.0-dev.0.20211013180041-c96bc1413d57",
+ sum = "h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=",
+ version = "v0.6.0-dev.0.20220419223038-86c51ed26bb4",
)
go_repository(
name = "org_golang_x_net",
@@ -3009,8 +3034,8 @@ def go_deps():
name = "org_golang_x_tools",
build_file_proto_mode = "disable_global",
importpath = "golang.org/x/tools",
- sum = "h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=",
- version = "v0.1.8",
+ sum = "h1:OKYpQQVE3DKSc3r3zHVzq46vq5YH7x8xpR3/k9ixmUg=",
+ version = "v0.1.11-0.20220513221640-090b14e8501f",
)
go_repository(
name = "org_golang_x_xerrors",
diff --git a/README.md b/README.md
index 11163a7d2edfd..ca96703510e40 100644
--- a/README.md
+++ b/README.md
@@ -60,7 +60,7 @@ The [community repository](https://github.com/pingcap/community) hosts all infor
[](https://github.com/pingcap/tidb-map/blob/master/maps/contribution-map.md#tidb-is-an-open-source-distributed-htap-database-compatible-with-the-mysql-protocol)
-Contributions are welcomed and greatly appreciated. See [Contribution to TiDB](https://pingcap.github.io/tidb-dev-guide/contribute-to-tidb/introduction.html) for details on typical contribution workflows. For more contributing information, click on the contributor icon above.
+Contributions are welcomed and greatly appreciated. All the contributors are welcomed to claim your reward by filing this [form](https://forms.pingcap.com/f/tidb-contribution-swag). See [Contribution to TiDB](https://pingcap.github.io/tidb-dev-guide/contribute-to-tidb/introduction.html) for details on typical contribution workflows. For more contributing information, click on the contributor icon above.
## Adopters
diff --git a/bindinfo/bind_cache.go b/bindinfo/bind_cache.go
index f1f58f43dae4a..8ce69deedd840 100644
--- a/bindinfo/bind_cache.go
+++ b/bindinfo/bind_cache.go
@@ -153,6 +153,7 @@ func (c *bindCache) GetAllBindRecords() []*BindRecord {
c.lock.Lock()
defer c.lock.Unlock()
values := c.cache.Values()
+ //nolint: prealloc
var bindRecords []*BindRecord
for _, vals := range values {
bindRecords = append(bindRecords, vals.([]*BindRecord)...)
diff --git a/br/cmd/tidb-lightning-ctl/main.go b/br/cmd/tidb-lightning-ctl/main.go
index 77dd52541470c..08f0c080963f3 100644
--- a/br/cmd/tidb-lightning-ctl/main.go
+++ b/br/cmd/tidb-lightning-ctl/main.go
@@ -262,7 +262,7 @@ func checkpointDump(ctx context.Context, cfg *config.Config, dumpFolder string)
}
func getLocalStoringTables(ctx context.Context, cfg *config.Config) (err2 error) {
- //nolint:prealloc // This is a placeholder.
+ //nolint: prealloc
var tables []string
defer func() {
if err2 == nil {
diff --git a/br/pkg/lightning/BUILD.bazel b/br/pkg/lightning/BUILD.bazel
index 933ad8e1162d4..1cafc9ec5fbef 100644
--- a/br/pkg/lightning/BUILD.bazel
+++ b/br/pkg/lightning/BUILD.bazel
@@ -51,6 +51,7 @@ go_test(
"//br/pkg/lightning/checkpoints",
"//br/pkg/lightning/config",
"//br/pkg/lightning/glue",
+ "//br/pkg/lightning/log",
"//br/pkg/lightning/mydump",
"//br/pkg/lightning/web",
"@com_github_docker_go_units//:go-units",
diff --git a/br/pkg/lightning/backend/backend.go b/br/pkg/lightning/backend/backend.go
index 7de6da020c2a1..fcfbb60a5aec7 100644
--- a/br/pkg/lightning/backend/backend.go
+++ b/br/pkg/lightning/backend/backend.go
@@ -69,8 +69,8 @@ func makeTag(tableName string, engineID int32) string {
return fmt.Sprintf("%s:%d", tableName, engineID)
}
-func makeLogger(tag string, engineUUID uuid.UUID) log.Logger {
- return log.With(
+func makeLogger(logger log.Logger, tag string, engineUUID uuid.UUID) log.Logger {
+ return logger.With(
zap.String("engineTag", tag),
zap.Stringer("engineUUID", engineUUID),
)
@@ -143,7 +143,7 @@ type AbstractBackend interface {
ShouldPostProcess() bool
// NewEncoder creates an encoder of a TiDB table.
- NewEncoder(tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error)
+ NewEncoder(ctx context.Context, tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error)
OpenEngine(ctx context.Context, config *EngineConfig, engineUUID uuid.UUID) error
@@ -260,8 +260,8 @@ func (be Backend) MakeEmptyRows() kv.Rows {
return be.abstract.MakeEmptyRows()
}
-func (be Backend) NewEncoder(tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
- return be.abstract.NewEncoder(tbl, options)
+func (be Backend) NewEncoder(ctx context.Context, tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
+ return be.abstract.NewEncoder(ctx, tbl, options)
}
func (be Backend) ShouldPostProcess() bool {
@@ -321,7 +321,7 @@ func (be Backend) UnsafeImportAndReset(ctx context.Context, engineUUID uuid.UUID
closedEngine := ClosedEngine{
engine: engine{
backend: be.abstract,
- logger: makeLogger("", engineUUID),
+ logger: makeLogger(log.FromContext(ctx), "", engineUUID),
uuid: engineUUID,
},
}
@@ -334,7 +334,7 @@ func (be Backend) UnsafeImportAndReset(ctx context.Context, engineUUID uuid.UUID
// OpenEngine opens an engine with the given table name and engine ID.
func (be Backend) OpenEngine(ctx context.Context, config *EngineConfig, tableName string, engineID int32) (*OpenedEngine, error) {
tag, engineUUID := MakeUUID(tableName, engineID)
- logger := makeLogger(tag, engineUUID)
+ logger := makeLogger(log.FromContext(ctx), tag, engineUUID)
if err := be.abstract.OpenEngine(ctx, config, engineUUID); err != nil {
return nil, err
@@ -437,7 +437,7 @@ func (be Backend) UnsafeCloseEngine(ctx context.Context, cfg *EngineConfig, tabl
func (be Backend) UnsafeCloseEngineWithUUID(ctx context.Context, cfg *EngineConfig, tag string, engineUUID uuid.UUID) (*ClosedEngine, error) {
return engine{
backend: be.abstract,
- logger: makeLogger(tag, engineUUID),
+ logger: makeLogger(log.FromContext(ctx), tag, engineUUID),
uuid: engineUUID,
}.unsafeClose(ctx, cfg)
}
diff --git a/br/pkg/lightning/backend/backend_test.go b/br/pkg/lightning/backend/backend_test.go
index d388e74533833..73adc91deff9f 100644
--- a/br/pkg/lightning/backend/backend_test.go
+++ b/br/pkg/lightning/backend/backend_test.go
@@ -326,9 +326,9 @@ func TestNewEncoder(t *testing.T) {
encoder := mock.NewMockEncoder(s.controller)
options := &kv.SessionOptions{SQLMode: mysql.ModeANSIQuotes, Timestamp: 1234567890}
- s.mockBackend.EXPECT().NewEncoder(nil, options).Return(encoder, nil)
+ s.mockBackend.EXPECT().NewEncoder(nil, nil, options).Return(encoder, nil)
- realEncoder, err := s.mockBackend.NewEncoder(nil, options)
+ realEncoder, err := s.mockBackend.NewEncoder(nil, nil, options)
require.Equal(t, realEncoder, encoder)
require.NoError(t, err)
}
diff --git a/br/pkg/lightning/backend/kv/kv2sql.go b/br/pkg/lightning/backend/kv/kv2sql.go
index 47b9aa5393b2d..1a436bbe822ea 100644
--- a/br/pkg/lightning/backend/kv/kv2sql.go
+++ b/br/pkg/lightning/backend/kv/kv2sql.go
@@ -17,6 +17,7 @@ package kv
import (
"fmt"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/table"
@@ -104,8 +105,13 @@ func (t *TableKVDecoder) IterRawIndexKeys(h kv.Handle, rawRow []byte, fn func([]
return nil
}
-func NewTableKVDecoder(tbl table.Table, tableName string, options *SessionOptions) (*TableKVDecoder, error) {
- se := newSession(options)
+func NewTableKVDecoder(
+ tbl table.Table,
+ tableName string,
+ options *SessionOptions,
+ logger log.Logger,
+) (*TableKVDecoder, error) {
+ se := newSession(options, logger)
cols := tbl.Cols()
// Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord
recordCtx := tables.NewCommonAddRecordCtx(len(cols))
diff --git a/br/pkg/lightning/backend/kv/session.go b/br/pkg/lightning/backend/kv/session.go
index 0e2751135c061..c5b623b31fd24 100644
--- a/br/pkg/lightning/backend/kv/session.go
+++ b/br/pkg/lightning/backend/kv/session.go
@@ -246,11 +246,11 @@ type SessionOptions struct {
}
// NewSession creates a new trimmed down Session matching the options.
-func NewSession(options *SessionOptions) sessionctx.Context {
- return newSession(options)
+func NewSession(options *SessionOptions, logger log.Logger) sessionctx.Context {
+ return newSession(options, logger)
}
-func newSession(options *SessionOptions) *session {
+func newSession(options *SessionOptions, logger log.Logger) *session {
sqlMode := options.SQLMode
vars := variable.NewSessionVars()
vars.SkipUTF8Check = true
@@ -265,7 +265,7 @@ func newSession(options *SessionOptions) *session {
if options.SysVars != nil {
for k, v := range options.SysVars {
if err := vars.SetSystemVar(k, v); err != nil {
- log.L().DPanic("new session: failed to set system var",
+ logger.DPanic("new session: failed to set system var",
log.ShortError(err),
zap.String("key", k))
}
@@ -273,7 +273,7 @@ func newSession(options *SessionOptions) *session {
}
vars.StmtCtx.TimeZone = vars.Location()
if err := vars.SetSystemVar("timestamp", strconv.FormatInt(options.Timestamp, 10)); err != nil {
- log.L().Warn("new session: failed to set timestamp",
+ logger.Warn("new session: failed to set timestamp",
log.ShortError(err))
}
vars.TxnCtx = nil
diff --git a/br/pkg/lightning/backend/kv/session_test.go b/br/pkg/lightning/backend/kv/session_test.go
index 9703390afb2ec..a37f48c190ed8 100644
--- a/br/pkg/lightning/backend/kv/session_test.go
+++ b/br/pkg/lightning/backend/kv/session_test.go
@@ -17,12 +17,13 @@ package kv
import (
"testing"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/parser/mysql"
"github.com/stretchr/testify/require"
)
func TestSession(t *testing.T) {
- session := newSession(&SessionOptions{SQLMode: mysql.ModeNone, Timestamp: 1234567890})
+ session := newSession(&SessionOptions{SQLMode: mysql.ModeNone, Timestamp: 1234567890}, log.L())
_, err := session.Txn(true)
require.NoError(t, err)
}
diff --git a/br/pkg/lightning/backend/kv/sql2kv.go b/br/pkg/lightning/backend/kv/sql2kv.go
index 54b34979b980e..bd13f27e38954 100644
--- a/br/pkg/lightning/backend/kv/sql2kv.go
+++ b/br/pkg/lightning/backend/kv/sql2kv.go
@@ -66,13 +66,18 @@ type tableKVEncoder struct {
metrics *metric.Metrics
}
-func NewTableKVEncoder(tbl table.Table, options *SessionOptions, metrics *metric.Metrics) (Encoder, error) {
+func NewTableKVEncoder(
+ tbl table.Table,
+ options *SessionOptions,
+ metrics *metric.Metrics,
+ logger log.Logger,
+) (Encoder, error) {
if metrics != nil {
metrics.KvEncoderCounter.WithLabelValues("open").Inc()
}
meta := tbl.Meta()
cols := tbl.Cols()
- se := newSession(options)
+ se := newSession(options, logger)
// Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord
recordCtx := tables.NewCommonAddRecordCtx(len(cols))
tables.SetAddRecordCtx(se, recordCtx)
@@ -267,7 +272,7 @@ func logKVConvertFailed(logger log.Logger, row []types.Datum, j int, colInfo *mo
log.ShortError(err),
)
- log.L().Error("failed to covert kv value", logutil.RedactAny("origVal", original.GetValue()),
+ logger.Error("failed to convert kv value", logutil.RedactAny("origVal", original.GetValue()),
zap.Stringer("fieldType", &colInfo.FieldType), zap.String("column", colInfo.Name.O),
zap.Int("columnID", j+1))
return errors.Annotatef(
@@ -352,7 +357,7 @@ func (kvcodec *tableKVEncoder) Encode(
var value types.Datum
var err error
- //nolint:prealloc // This is a placeholder.
+ //nolint: prealloc
var record []types.Datum
if kvcodec.recordCache != nil {
diff --git a/br/pkg/lightning/backend/kv/sql2kv_test.go b/br/pkg/lightning/backend/kv/sql2kv_test.go
index b7a33e3c6943a..b604942e38756 100644
--- a/br/pkg/lightning/backend/kv/sql2kv_test.go
+++ b/br/pkg/lightning/backend/kv/sql2kv_test.go
@@ -89,7 +89,7 @@ func TestEncode(t *testing.T) {
strictMode, err := NewTableKVEncoder(tbl, &SessionOptions{
SQLMode: mysql.ModeStrictAllTables,
Timestamp: 1234567890,
- }, nil)
+ }, nil, logger)
require.NoError(t, err)
pairs, err := strictMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234)
require.Regexp(t, "failed to cast value as tinyint\\(4\\) for column `c1` \\(#1\\):.*overflows tinyint", err)
@@ -121,7 +121,7 @@ func TestEncode(t *testing.T) {
mockMode, err := NewTableKVEncoder(mockTbl, &SessionOptions{
SQLMode: mysql.ModeStrictAllTables,
Timestamp: 1234567891,
- }, nil)
+ }, nil, logger)
require.NoError(t, err)
_, err = mockMode.Encode(logger, rowsWithPk2, 2, []int{0, 1}, "1.csv", 1234)
require.EqualError(t, err, "mock error")
@@ -131,7 +131,7 @@ func TestEncode(t *testing.T) {
SQLMode: mysql.ModeNone,
Timestamp: 1234567892,
SysVars: map[string]string{"tidb_row_format_version": "1"},
- }, nil)
+ }, nil, logger)
require.NoError(t, err)
pairs, err = noneMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234)
require.NoError(t, err)
@@ -153,7 +153,7 @@ func TestDecode(t *testing.T) {
decoder, err := NewTableKVDecoder(tbl, "`test`.`c1`", &SessionOptions{
SQLMode: mysql.ModeStrictAllTables,
Timestamp: 1234567890,
- })
+ }, log.L())
require.NoError(t, err)
require.NotNil(t, decoder)
require.Equal(t, decoder.Name(), "`test`.`c1`")
@@ -208,7 +208,7 @@ func TestDecodeIndex(t *testing.T) {
strictMode, err := NewTableKVEncoder(tbl, &SessionOptions{
SQLMode: mysql.ModeStrictAllTables,
Timestamp: 1234567890,
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
pairs, err := strictMode.Encode(logger, rows, 1, []int{0, 1, -1}, "1.csv", 123)
data := pairs.(*KvPairs)
@@ -217,7 +217,7 @@ func TestDecodeIndex(t *testing.T) {
decoder, err := NewTableKVDecoder(tbl, "`test`.``", &SessionOptions{
SQLMode: mysql.ModeStrictAllTables,
Timestamp: 1234567890,
- })
+ }, log.L())
require.NoError(t, err)
h1, err := decoder.DecodeHandleFromRowKey(data.pairs[0].Key)
require.NoError(t, err)
@@ -247,7 +247,7 @@ func TestEncodeRowFormatV2(t *testing.T) {
SQLMode: mysql.ModeNone,
Timestamp: 1234567892,
SysVars: map[string]string{"tidb_row_format_version": "2"},
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
pairs, err := noneMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234)
require.NoError(t, err)
@@ -295,7 +295,7 @@ func TestEncodeTimestamp(t *testing.T) {
"tidb_row_format_version": "1",
"time_zone": "+08:00",
},
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
pairs, err := encoder.Encode(logger, nil, 70, []int{-1, 1}, "1.csv", 1234)
require.NoError(t, err)
@@ -320,7 +320,7 @@ func TestEncodeDoubleAutoIncrement(t *testing.T) {
SysVars: map[string]string{
"tidb_row_format_version": "2",
},
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
strDatumForID := types.NewStringDatum("1")
@@ -386,7 +386,7 @@ func TestEncodeMissingAutoValue(t *testing.T) {
SysVars: map[string]string{
"tidb_row_format_version": "2",
},
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
realRowID := encoder.(*tableKVEncoder).autoIDFn(rowID)
@@ -447,7 +447,7 @@ func TestDefaultAutoRandoms(t *testing.T) {
Timestamp: 1234567893,
SysVars: map[string]string{"tidb_row_format_version": "2"},
AutoRandomSeed: 456,
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
logger := log.Logger{Logger: zap.NewNop()}
pairs, err := encoder.Encode(logger, []types.Datum{types.NewStringDatum("")}, 70, []int{-1, 0}, "1.csv", 1234)
@@ -482,7 +482,7 @@ func TestShardRowId(t *testing.T) {
Timestamp: 1234567893,
SysVars: map[string]string{"tidb_row_format_version": "2"},
AutoRandomSeed: 456,
- }, nil)
+ }, nil, log.L())
require.NoError(t, err)
logger := log.Logger{Logger: zap.NewNop()}
keyMap := make(map[int64]struct{}, 16)
@@ -636,7 +636,7 @@ func SetUpTest(b *testing.B) *benchSQL2KVSuite {
// Construct the corresponding KV encoder.
tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tableInfo)
require.NoError(b, err)
- encoder, err := NewTableKVEncoder(tbl, &SessionOptions{SysVars: map[string]string{"tidb_row_format_version": "2"}}, nil)
+ encoder, err := NewTableKVEncoder(tbl, &SessionOptions{SysVars: map[string]string{"tidb_row_format_version": "2"}}, nil, log.L())
require.NoError(b, err)
logger := log.Logger{Logger: zap.NewNop()}
diff --git a/br/pkg/lightning/backend/local/duplicate.go b/br/pkg/lightning/backend/local/duplicate.go
index 983ae33fcfd68..be446f81b9ba4 100644
--- a/br/pkg/lightning/backend/local/duplicate.go
+++ b/br/pkg/lightning/backend/local/duplicate.go
@@ -223,6 +223,7 @@ func tableHandleKeyRanges(tableInfo *model.TableInfo) ([]tidbkv.KeyRange, error)
// tableIndexKeyRanges returns all key ranges associated with the tableInfo and indexInfo.
func tableIndexKeyRanges(tableInfo *model.TableInfo, indexInfo *model.IndexInfo) ([]tidbkv.KeyRange, error) {
tableIDs := physicalTableIDs(tableInfo)
+ //nolint: prealloc
var keyRanges []tidbkv.KeyRange
for _, tid := range tableIDs {
partitionKeysRanges, err := distsql.IndexRangesToKVRanges(nil, tid, indexInfo.ID, ranger.FullRange(), nil)
@@ -411,12 +412,13 @@ func NewDuplicateManager(
sessOpts *kv.SessionOptions,
concurrency int,
hasDupe *atomic.Bool,
+ logger log.Logger,
) (*DuplicateManager, error) {
- decoder, err := kv.NewTableKVDecoder(tbl, tableName, sessOpts)
+ logger = logger.With(zap.String("tableName", tableName))
+ decoder, err := kv.NewTableKVDecoder(tbl, tableName, sessOpts, logger)
if err != nil {
return nil, errors.Trace(err)
}
- logger := log.With(zap.String("tableName", tableName))
return &DuplicateManager{
tbl: tbl,
tableName: tableName,
@@ -605,6 +607,7 @@ func (m *DuplicateManager) buildLocalDupTasks(dupDB *pebble.DB, keyAdapter KeyAd
if err != nil {
return nil, errors.Trace(err)
}
+ //nolint: prealloc
var newTasks []dupTask
for _, task := range tasks {
// FIXME: Do not hardcode sizeLimit and keysLimit.
@@ -705,10 +708,11 @@ func (m *DuplicateManager) processRemoteDupTaskOnce(
regionPool *utils.WorkerPool,
remainKeyRanges *pendingKeyRanges,
) (madeProgress bool, err error) {
- var (
- regions []*restore.RegionInfo
- keyRanges []tidbkv.KeyRange
- )
+ //nolint: prealloc
+ var regions []*restore.RegionInfo
+ //nolint: prealloc
+ var keyRanges []tidbkv.KeyRange
+
for _, kr := range remainKeyRanges.list() {
subRegions, subKeyRanges, err := m.splitKeyRangeByRegions(ctx, kr)
if err != nil {
diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go
index f82334e0d58d2..04036e57b16ac 100644
--- a/br/pkg/lightning/backend/local/engine.go
+++ b/br/pkg/lightning/backend/local/engine.go
@@ -135,6 +135,8 @@ type Engine struct {
duplicateDetection bool
duplicateDB *pebble.DB
errorMgr *errormanager.ErrorManager
+
+ logger log.Logger
}
func (e *Engine) setError(err error) {
@@ -145,7 +147,7 @@ func (e *Engine) setError(err error) {
}
func (e *Engine) Close() error {
- log.L().Debug("closing local engine", zap.Stringer("engine", e.UUID), zap.Stack("stack"))
+ e.logger.Debug("closing local engine", zap.Stringer("engine", e.UUID), zap.Stack("stack"))
if e.db == nil {
return nil
}
@@ -774,7 +776,7 @@ func (e *Engine) ingestSSTs(metas []*sstMeta) error {
totalCount += m.totalCount
fileSize += m.fileSize
}
- log.L().Info("write data to local DB",
+ e.logger.Info("write data to local DB",
zap.Int64("size", totalSize),
zap.Int64("kvs", totalCount),
zap.Int("files", len(metas)),
@@ -861,7 +863,7 @@ func saveEngineMetaToDB(meta *engineMeta, db *pebble.DB) error {
// saveEngineMeta saves the metadata about the DB into the DB itself.
// This method should be followed by a Flush to ensure the data is actually synchronized
func (e *Engine) saveEngineMeta() error {
- log.L().Debug("save engine meta", zap.Stringer("uuid", e.UUID), zap.Int64("count", e.Length.Load()),
+ e.logger.Debug("save engine meta", zap.Stringer("uuid", e.UUID), zap.Int64("count", e.Length.Load()),
zap.Int64("size", e.TotalSize.Load()))
return errors.Trace(saveEngineMetaToDB(&e.engineMeta, e.db))
}
@@ -870,7 +872,7 @@ func (e *Engine) loadEngineMeta() error {
jsonBytes, closer, err := e.db.Get(engineMetaKey)
if err != nil {
if err == pebble.ErrNotFound {
- log.L().Debug("local db missing engine meta", zap.Stringer("uuid", e.UUID), log.ShortError(err))
+ e.logger.Debug("local db missing engine meta", zap.Stringer("uuid", e.UUID), log.ShortError(err))
return nil
}
return err
@@ -878,10 +880,10 @@ func (e *Engine) loadEngineMeta() error {
defer closer.Close()
if err = json.Unmarshal(jsonBytes, &e.engineMeta); err != nil {
- log.L().Warn("local db failed to deserialize meta", zap.Stringer("uuid", e.UUID), zap.ByteString("content", jsonBytes), zap.Error(err))
+ e.logger.Warn("local db failed to deserialize meta", zap.Stringer("uuid", e.UUID), zap.ByteString("content", jsonBytes), zap.Error(err))
return err
}
- log.L().Debug("load engine meta", zap.Stringer("uuid", e.UUID), zap.Int64("count", e.Length.Load()),
+ e.logger.Debug("load engine meta", zap.Stringer("uuid", e.UUID), zap.Int64("count", e.Length.Load()),
zap.Int64("size", e.TotalSize.Load()))
return nil
}
@@ -961,7 +963,7 @@ func (e *Engine) newKVIter(ctx context.Context, opts *pebble.IterOptions) Iter {
if !e.duplicateDetection {
return pebbleIter{Iterator: e.db.NewIter(opts)}
}
- logger := log.With(
+ logger := log.FromContext(ctx).With(
zap.String("table", common.UniqueTable(e.tableInfo.DB, e.tableInfo.Name)),
zap.Int64("tableID", e.tableInfo.ID),
zap.Stringer("engineUUID", e.UUID))
@@ -1247,7 +1249,7 @@ func (w *Writer) createSSTWriter() (*sstWriter, error) {
if err != nil {
return nil, err
}
- sw := &sstWriter{sstMeta: &sstMeta{path: path}, writer: writer}
+ sw := &sstWriter{sstMeta: &sstMeta{path: path}, writer: writer, logger: w.engine.logger}
return sw, nil
}
@@ -1256,6 +1258,7 @@ var errorUnorderedSSTInsertion = errors.New("inserting KVs into SST without orde
type sstWriter struct {
*sstMeta
writer *sstable.Writer
+ logger log.Logger
}
func newSSTWriter(path string) (*sstable.Writer, error) {
@@ -1289,7 +1292,7 @@ func (sw *sstWriter) writeKVs(kvs []common.KvPair) error {
var lastKey []byte
for _, p := range kvs {
if bytes.Equal(p.Key, lastKey) {
- log.L().Warn("duplicated key found, skip write", logutil.Key("key", p.Key))
+ sw.logger.Warn("duplicated key found, skip write", logutil.Key("key", p.Key))
continue
}
internalKey.UserKey = p.Key
@@ -1467,7 +1470,7 @@ func (i dbSSTIngester) mergeSSTs(metas []*sstMeta, dir string) (*sstMeta, error)
lastKey := make([]byte, 0)
for {
if bytes.Equal(lastKey, key) {
- log.L().Warn("duplicated key found, skipped", zap.Binary("key", lastKey))
+ i.e.logger.Warn("duplicated key found, skipped", zap.Binary("key", lastKey))
newMeta.totalCount--
newMeta.totalSize -= int64(len(key) + len(val))
@@ -1500,7 +1503,7 @@ func (i dbSSTIngester) mergeSSTs(metas []*sstMeta, dir string) (*sstMeta, error)
newMeta.fileSize = int64(meta.Size)
dur := time.Since(start)
- log.L().Info("compact sst", zap.Int("fileCount", len(metas)), zap.Int64("size", newMeta.totalSize),
+ i.e.logger.Info("compact sst", zap.Int("fileCount", len(metas)), zap.Int64("size", newMeta.totalSize),
zap.Int64("count", newMeta.totalCount), zap.Duration("cost", dur), zap.String("file", name))
// async clean raw SSTs.
@@ -1509,7 +1512,7 @@ func (i dbSSTIngester) mergeSSTs(metas []*sstMeta, dir string) (*sstMeta, error)
for _, m := range metas {
totalSize += m.fileSize
if err := os.Remove(m.path); err != nil {
- log.L().Warn("async cleanup sst file failed", zap.Error(err))
+ i.e.logger.Warn("async cleanup sst file failed", zap.Error(err))
}
}
// decrease the pending size after clean up
diff --git a/br/pkg/lightning/backend/local/engine_test.go b/br/pkg/lightning/backend/local/engine_test.go
index cb2dbd6fc3a97..13c890c028297 100644
--- a/br/pkg/lightning/backend/local/engine_test.go
+++ b/br/pkg/lightning/backend/local/engine_test.go
@@ -26,6 +26,7 @@ import (
"github.com/cockroachdb/pebble"
"github.com/cockroachdb/pebble/sstable"
"github.com/google/uuid"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/stretchr/testify/require"
"github.com/pingcap/failpoint"
@@ -59,6 +60,7 @@ func TestIngestSSTWithClosedEngine(t *testing.T) {
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: noopKeyAdapter{},
+ logger: log.L(),
}
f.sstIngester = dbSSTIngester{e: f}
sstPath := path.Join(tmpPath, uuid.New().String()+".sst")
@@ -97,6 +99,7 @@ func TestAutoSplitSST(t *testing.T) {
engine: &Engine{
sstDir: dir,
keyAdapter: noopKeyAdapter{},
+ logger: log.L(),
},
isKVSorted: true,
isWriteBatchSorted: true,
diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go
old mode 100755
new mode 100644
index 13773dc6d2ee4..76bdba192bd99
--- a/br/pkg/lightning/backend/local/local.go
+++ b/br/pkg/lightning/backend/local/local.go
@@ -235,6 +235,7 @@ type local struct {
bufferPool *membuf.Pool
metrics *metric.Metrics
writeLimiter StoreWriteLimiter
+ logger log.Logger
}
func openDuplicateDB(storeDir string) (*pebble.DB, error) {
@@ -342,6 +343,7 @@ func NewLocalBackend(
importClientFactory: importClientFactory,
bufferPool: membuf.NewPool(membuf.WithAllocator(manual.Allocator{})),
writeLimiter: writeLimiter,
+ logger: log.FromContext(ctx),
}
if m, ok := metric.FromContext(ctx); ok {
local.metrics = m
@@ -384,7 +386,7 @@ func (local *local) checkMultiIngestSupport(ctx context.Context) error {
client, err1 := local.getImportClient(ctx, s.Id)
if err1 != nil {
err = err1
- log.L().Warn("get import client failed", zap.Error(err), zap.String("store", s.Address))
+ log.FromContext(ctx).Warn("get import client failed", zap.Error(err), zap.String("store", s.Address))
continue
}
_, err = client.MultiIngest(ctx, &sst.MultiIngestRequest{})
@@ -393,12 +395,12 @@ func (local *local) checkMultiIngestSupport(ctx context.Context) error {
}
if st, ok := status.FromError(err); ok {
if st.Code() == codes.Unimplemented {
- log.L().Info("multi ingest not support", zap.Any("unsupported store", s))
+ log.FromContext(ctx).Info("multi ingest not support", zap.Any("unsupported store", s))
local.supportMultiIngest = false
return nil
}
}
- log.L().Warn("check multi ingest support failed", zap.Error(err), zap.String("store", s.Address),
+ log.FromContext(ctx).Warn("check multi ingest support failed", zap.Error(err), zap.String("store", s.Address),
zap.Int("retry", i))
}
if err != nil {
@@ -407,14 +409,14 @@ func (local *local) checkMultiIngestSupport(ctx context.Context) error {
if hasTiFlash {
return errors.Trace(err)
}
- log.L().Warn("check multi failed all retry, fallback to false", log.ShortError(err))
+ log.FromContext(ctx).Warn("check multi failed all retry, fallback to false", log.ShortError(err))
local.supportMultiIngest = false
return nil
}
}
local.supportMultiIngest = true
- log.L().Info("multi ingest support")
+ log.FromContext(ctx).Info("multi ingest support")
return nil
}
@@ -479,6 +481,7 @@ func (local *local) Close() {
engine.Close()
engine.unlock()
}
+
local.importClientFactory.Close()
local.bufferPool.Destroy()
@@ -488,22 +491,22 @@ func (local *local) Close() {
hasDuplicates := iter.First()
allIsWell := true
if err := iter.Error(); err != nil {
- log.L().Warn("iterate duplicate db failed", zap.Error(err))
+ local.logger.Warn("iterate duplicate db failed", zap.Error(err))
allIsWell = false
}
if err := iter.Close(); err != nil {
- log.L().Warn("close duplicate db iter failed", zap.Error(err))
+ local.logger.Warn("close duplicate db iter failed", zap.Error(err))
allIsWell = false
}
if err := local.duplicateDB.Close(); err != nil {
- log.L().Warn("close duplicate db failed", zap.Error(err))
+ local.logger.Warn("close duplicate db failed", zap.Error(err))
allIsWell = false
}
// If checkpoint is disabled, or we don't detect any duplicate, then this duplicate
// db dir will be useless, so we clean up this dir.
if allIsWell && (!local.checkpointEnabled || !hasDuplicates) {
if err := os.RemoveAll(filepath.Join(local.localStoreDir, duplicateDBName)); err != nil {
- log.L().Warn("remove duplicate db file failed", zap.Error(err))
+ local.logger.Warn("remove duplicate db file failed", zap.Error(err))
}
}
local.duplicateDB = nil
@@ -514,7 +517,7 @@ func (local *local) Close() {
if !local.checkpointEnabled || common.IsEmptyDir(local.localStoreDir) {
err := os.RemoveAll(local.localStoreDir)
if err != nil {
- log.L().Warn("remove local db file failed", zap.Error(err))
+ local.logger.Warn("remove local db file failed", zap.Error(err))
}
}
@@ -626,6 +629,7 @@ func (local *local) OpenEngine(ctx context.Context, cfg *backend.EngineConfig, e
duplicateDB: local.duplicateDB,
errorMgr: local.errorMgr,
keyAdapter: local.keyAdapter,
+ logger: log.FromContext(ctx),
})
engine := e.(*Engine)
engine.db = db
@@ -674,6 +678,7 @@ func (local *local) CloseEngine(ctx context.Context, cfg *backend.EngineConfig,
duplicateDetection: local.duplicateDetection,
duplicateDB: local.duplicateDB,
errorMgr: local.errorMgr,
+ logger: log.FromContext(ctx),
}
engine.sstIngester = dbSSTIngester{e: engine}
if err = engine.loadEngineMeta(); err != nil {
@@ -750,7 +755,7 @@ func (local *local) WriteToTiKV(
break
}
if e != nil {
- log.L().Error("failed to get StoreInfo from pd http api", zap.Error(e))
+ log.FromContext(ctx).Error("failed to get StoreInfo from pd http api", zap.Error(e))
}
}
}
@@ -767,7 +772,7 @@ func (local *local) WriteToTiKV(
return nil, Range{}, stats, errors.Annotate(iter.Error(), "failed to read the first key")
}
if !iter.Valid() {
- log.L().Info("keys within region is empty, skip ingest", logutil.Key("start", start),
+ log.FromContext(ctx).Info("keys within region is empty, skip ingest", logutil.Key("start", start),
logutil.Key("regionStart", region.Region.StartKey), logutil.Key("end", end),
logutil.Key("regionEnd", region.Region.EndKey))
return nil, regionRange, stats, nil
@@ -908,20 +913,20 @@ func (local *local) WriteToTiKV(
}
if leaderID == region.Region.Peers[i].GetId() {
leaderPeerMetas = resp.Metas
- log.L().Debug("get metas after write kv stream to tikv", zap.Reflect("metas", leaderPeerMetas))
+ log.FromContext(ctx).Debug("get metas after write kv stream to tikv", zap.Reflect("metas", leaderPeerMetas))
}
}
// if there is not leader currently, we should directly return an error
if len(leaderPeerMetas) == 0 {
- log.L().Warn("write to tikv no leader", logutil.Region(region.Region), logutil.Leader(region.Leader),
+ log.FromContext(ctx).Warn("write to tikv no leader", logutil.Region(region.Region), logutil.Leader(region.Leader),
zap.Uint64("leader_id", leaderID), logutil.SSTMeta(meta),
zap.Int64("kv_pairs", totalCount), zap.Int64("total_bytes", size))
return nil, Range{}, stats, errors.Errorf("write to tikv with no leader returned, region '%d', leader: %d",
region.Region.Id, leaderID)
}
- log.L().Debug("write to kv", zap.Reflect("region", region), zap.Uint64("leader", leaderID),
+ log.FromContext(ctx).Debug("write to kv", zap.Reflect("region", region), zap.Uint64("leader", leaderID),
zap.Reflect("meta", meta), zap.Reflect("return metas", leaderPeerMetas),
zap.Int64("kv_pairs", totalCount), zap.Int64("total_bytes", size),
zap.Int64("buf_size", bytesBuf.TotalSize()),
@@ -931,7 +936,7 @@ func (local *local) WriteToTiKV(
if iter.Valid() && iter.Next() {
firstKey := append([]byte{}, iter.Key()...)
finishedRange = Range{start: regionRange.start, end: firstKey}
- log.L().Info("write to tikv partial finish", zap.Int64("count", totalCount),
+ log.FromContext(ctx).Info("write to tikv partial finish", zap.Int64("count", totalCount),
zap.Int64("size", size), logutil.Key("startKey", regionRange.start), logutil.Key("endKey", regionRange.end),
logutil.Key("remainStart", firstKey), logutil.Key("remainEnd", regionRange.end),
logutil.Region(region.Region), logutil.Leader(region.Leader))
@@ -1047,7 +1052,7 @@ func (local *local) readAndSplitIntoRange(ctx context.Context, engine *Engine, r
return ranges, nil
}
- logger := log.With(zap.Stringer("engine", engine.UUID))
+ logger := log.FromContext(ctx).With(zap.Stringer("engine", engine.UUID))
sizeProps, err := getSizeProperties(logger, engine.db, local.keyAdapter)
if err != nil {
return nil, errors.Trace(err)
@@ -1084,7 +1089,7 @@ func (local *local) writeAndIngestByRange(
return errors.Annotate(iter.Error(), "failed to read the first key")
}
if !hasKey {
- log.L().Info("There is no pairs in iterator",
+ log.FromContext(ctxt).Info("There is no pairs in iterator",
logutil.Key("start", start),
logutil.Key("end", end))
engine.finishedRanges.add(Range{start: start, end: end})
@@ -1115,14 +1120,14 @@ WriteAndIngest:
endKey := codec.EncodeBytes([]byte{}, nextKey(pairEnd))
regions, err = split.PaginateScanRegion(ctx, local.splitCli, startKey, endKey, scanRegionLimit)
if err != nil || len(regions) == 0 {
- log.L().Warn("scan region failed", log.ShortError(err), zap.Int("region_len", len(regions)),
+ log.FromContext(ctx).Warn("scan region failed", log.ShortError(err), zap.Int("region_len", len(regions)),
logutil.Key("startKey", startKey), logutil.Key("endKey", endKey), zap.Int("retry", retry))
retry++
continue WriteAndIngest
}
for _, region := range regions {
- log.L().Debug("get region", zap.Int("retry", retry), zap.Binary("startKey", startKey),
+ log.FromContext(ctx).Debug("get region", zap.Int("retry", retry), zap.Binary("startKey", startKey),
zap.Binary("endKey", endKey), zap.Uint64("id", region.Region.GetId()),
zap.Stringer("epoch", region.Region.GetRegionEpoch()), zap.Binary("start", region.Region.GetStartKey()),
zap.Binary("end", region.Region.GetEndKey()), zap.Reflect("peers", region.Region.GetPeers()))
@@ -1141,7 +1146,7 @@ WriteAndIngest:
} else {
retry++
}
- log.L().Info("retry write and ingest kv pairs", logutil.Key("startKey", pairStart),
+ log.FromContext(ctx).Info("retry write and ingest kv pairs", logutil.Key("startKey", pairStart),
logutil.Key("endKey", end), log.ShortError(err), zap.Int("retry", retry))
continue WriteAndIngest
}
@@ -1182,7 +1187,7 @@ loopWrite:
return err
}
- log.L().Warn("write to tikv failed", log.ShortError(err), zap.Int("retry", i))
+ log.FromContext(ctx).Warn("write to tikv failed", log.ShortError(err), zap.Int("retry", i))
continue loopWrite
}
@@ -1201,7 +1206,7 @@ loopWrite:
ingestMetas := metas[start:end]
errCnt := 0
for errCnt < maxRetryTimes {
- log.L().Debug("ingest meta", zap.Reflect("meta", ingestMetas))
+ log.FromContext(ctx).Debug("ingest meta", zap.Reflect("meta", ingestMetas))
var resp *sst.IngestResponse
failpoint.Inject("FailIngestMeta", func(val failpoint.Value) {
// only inject the error once
@@ -1235,7 +1240,7 @@ loopWrite:
if common.IsContextCanceledError(err) {
return err
}
- log.L().Warn("ingest failed", log.ShortError(err), logutil.SSTMetas(ingestMetas),
+ log.FromContext(ctx).Warn("ingest failed", log.ShortError(err), logutil.SSTMetas(ingestMetas),
logutil.Region(region.Region), logutil.Leader(region.Leader))
errCnt++
continue
@@ -1253,7 +1258,7 @@ loopWrite:
}
switch retryTy {
case retryNone:
- log.L().Warn("ingest failed noretry", log.ShortError(err), logutil.SSTMetas(ingestMetas),
+ log.FromContext(ctx).Warn("ingest failed noretry", log.ShortError(err), logutil.SSTMetas(ingestMetas),
logutil.Region(region.Region), logutil.Leader(region.Leader))
// met non-retryable error retry whole Write procedure
return err
@@ -1268,7 +1273,7 @@ loopWrite:
}
if err != nil {
- log.L().Warn("write and ingest region, will retry import full range", log.ShortError(err),
+ log.FromContext(ctx).Warn("write and ingest region, will retry import full range", log.ShortError(err),
logutil.Region(region.Region), logutil.Key("start", start),
logutil.Key("end", end))
} else {
@@ -1288,10 +1293,10 @@ loopWrite:
func (local *local) writeAndIngestByRanges(ctx context.Context, engine *Engine, ranges []Range, regionSplitSize int64, regionSplitKeys int64) error {
if engine.Length.Load() == 0 {
// engine is empty, this is likes because it's a index engine but the table contains no index
- log.L().Info("engine contains no data", zap.Stringer("uuid", engine.UUID))
+ log.FromContext(ctx).Info("engine contains no data", zap.Stringer("uuid", engine.UUID))
return nil
}
- log.L().Debug("the ranges Length write to tikv", zap.Int("Length", len(ranges)))
+ log.FromContext(ctx).Debug("the ranges Length write to tikv", zap.Int("Length", len(ranges)))
var allErrLock sync.Mutex
var allErr error
@@ -1324,7 +1329,7 @@ func (local *local) writeAndIngestByRanges(ctx context.Context, engine *Engine,
if !common.IsRetryableError(err) {
break
}
- log.L().Warn("write and ingest by range failed",
+ log.FromContext(ctx).Warn("write and ingest by range failed",
zap.Int("retry time", i+1), log.ShortError(err))
backOffTime *= 2
if backOffTime > maxRetryBackoffTime {
@@ -1366,7 +1371,7 @@ func (local *local) ImportEngine(ctx context.Context, engineUUID uuid.UUID, regi
lfTotalSize := lf.TotalSize.Load()
lfLength := lf.Length.Load()
if lfTotalSize == 0 {
- log.L().Info("engine contains no kv, skip import", zap.Stringer("engine", engineUUID))
+ log.FromContext(ctx).Info("engine contains no kv, skip import", zap.Stringer("engine", engineUUID))
return nil
}
kvRegionSplitSize, kvRegionSplitKeys, err := getRegionSplitSizeKeys(ctx, local.pdCtl.GetPDClient(), local.tls)
@@ -1378,7 +1383,7 @@ func (local *local) ImportEngine(ctx context.Context, engineUUID uuid.UUID, regi
regionSplitKeys = kvRegionSplitKeys
}
} else {
- log.L().Warn("fail to get region split keys and size", zap.Error(err))
+ log.FromContext(ctx).Warn("fail to get region split keys and size", zap.Error(err))
}
// split sorted file into range by 96MB size per file
@@ -1387,14 +1392,14 @@ func (local *local) ImportEngine(ctx context.Context, engineUUID uuid.UUID, regi
return err
}
- log.L().Info("start import engine", zap.Stringer("uuid", engineUUID),
+ log.FromContext(ctx).Info("start import engine", zap.Stringer("uuid", engineUUID),
zap.Int("ranges", len(ranges)), zap.Int64("count", lfLength), zap.Int64("size", lfTotalSize))
for {
unfinishedRanges := lf.unfinishedRanges(ranges)
if len(unfinishedRanges) == 0 {
break
}
- log.L().Info("import engine unfinished ranges", zap.Int("count", len(unfinishedRanges)))
+ log.FromContext(ctx).Info("import engine unfinished ranges", zap.Int("count", len(unfinishedRanges)))
// if all the kv can fit in one region, skip split regions. TiDB will split one region for
// the table when table is created.
@@ -1406,37 +1411,37 @@ func (local *local) ImportEngine(ctx context.Context, engineUUID uuid.UUID, regi
break
}
- log.L().Warn("split and scatter failed in retry", zap.Stringer("uuid", engineUUID),
+ log.FromContext(ctx).Warn("split and scatter failed in retry", zap.Stringer("uuid", engineUUID),
log.ShortError(err), zap.Int("retry", i))
}
if err != nil {
- log.L().Error("split & scatter ranges failed", zap.Stringer("uuid", engineUUID), log.ShortError(err))
+ log.FromContext(ctx).Error("split & scatter ranges failed", zap.Stringer("uuid", engineUUID), log.ShortError(err))
return err
}
// start to write to kv and ingest
err = local.writeAndIngestByRanges(ctx, lf, unfinishedRanges, regionSplitSize, regionSplitKeys)
if err != nil {
- log.L().Error("write and ingest engine failed", log.ShortError(err))
+ log.FromContext(ctx).Error("write and ingest engine failed", log.ShortError(err))
return err
}
}
- log.L().Info("import engine success", zap.Stringer("uuid", engineUUID),
+ log.FromContext(ctx).Info("import engine success", zap.Stringer("uuid", engineUUID),
zap.Int64("size", lfTotalSize), zap.Int64("kvs", lfLength),
zap.Int64("importedSize", lf.importedKVSize.Load()), zap.Int64("importedCount", lf.importedKVCount.Load()))
return nil
}
func (local *local) CollectLocalDuplicateRows(ctx context.Context, tbl table.Table, tableName string, opts *kv.SessionOptions) (hasDupe bool, err error) {
- logger := log.With(zap.String("table", tableName)).Begin(zap.InfoLevel, "[detect-dupe] collect local duplicate keys")
+ logger := log.FromContext(ctx).With(zap.String("table", tableName)).Begin(zap.InfoLevel, "[detect-dupe] collect local duplicate keys")
defer func() {
logger.End(zap.ErrorLevel, err)
}()
atomicHasDupe := atomic.NewBool(false)
duplicateManager, err := NewDuplicateManager(tbl, tableName, local.splitCli, local.tikvCli,
- local.errorMgr, opts, local.dupeConcurrency, atomicHasDupe)
+ local.errorMgr, opts, local.dupeConcurrency, atomicHasDupe, log.FromContext(ctx))
if err != nil {
return false, errors.Trace(err)
}
@@ -1447,14 +1452,14 @@ func (local *local) CollectLocalDuplicateRows(ctx context.Context, tbl table.Tab
}
func (local *local) CollectRemoteDuplicateRows(ctx context.Context, tbl table.Table, tableName string, opts *kv.SessionOptions) (hasDupe bool, err error) {
- logger := log.With(zap.String("table", tableName)).Begin(zap.InfoLevel, "[detect-dupe] collect remote duplicate keys")
+ logger := log.FromContext(ctx).With(zap.String("table", tableName)).Begin(zap.InfoLevel, "[detect-dupe] collect remote duplicate keys")
defer func() {
logger.End(zap.ErrorLevel, err)
}()
atomicHasDupe := atomic.NewBool(false)
duplicateManager, err := NewDuplicateManager(tbl, tableName, local.splitCli, local.tikvCli,
- local.errorMgr, opts, local.dupeConcurrency, atomicHasDupe)
+ local.errorMgr, opts, local.dupeConcurrency, atomicHasDupe, log.FromContext(ctx))
if err != nil {
return false, errors.Trace(err)
}
@@ -1465,7 +1470,7 @@ func (local *local) CollectRemoteDuplicateRows(ctx context.Context, tbl table.Ta
}
func (local *local) ResolveDuplicateRows(ctx context.Context, tbl table.Table, tableName string, algorithm config.DuplicateResolutionAlgorithm) (err error) {
- logger := log.With(zap.String("table", tableName)).Begin(zap.InfoLevel, "[resolve-dupe] resolve duplicate rows")
+ logger := log.FromContext(ctx).With(zap.String("table", tableName)).Begin(zap.InfoLevel, "[resolve-dupe] resolve duplicate rows")
defer func() {
logger.End(zap.ErrorLevel, err)
}()
@@ -1483,7 +1488,7 @@ func (local *local) ResolveDuplicateRows(ctx context.Context, tbl table.Table, t
// TODO: reuse the *kv.SessionOptions from NewEncoder for picking the correct time zone.
decoder, err := kv.NewTableKVDecoder(tbl, tableName, &kv.SessionOptions{
SQLMode: mysql.ModeStrictAllTables,
- })
+ }, log.FromContext(ctx))
if err != nil {
return err
}
@@ -1565,7 +1570,7 @@ func (local *local) ResetEngine(ctx context.Context, engineUUID uuid.UUID) error
// the only way to reset the engine + reclaim the space is to delete and reopen it 🤷
localEngine := local.lockEngine(engineUUID, importMutexStateClose)
if localEngine == nil {
- log.L().Warn("could not find engine in cleanupEngine", zap.Stringer("uuid", engineUUID))
+ log.FromContext(ctx).Warn("could not find engine in cleanupEngine", zap.Stringer("uuid", engineUUID))
return nil
}
defer localEngine.unlock()
@@ -1598,7 +1603,7 @@ func (local *local) CleanupEngine(ctx context.Context, engineUUID uuid.UUID) err
localEngine := local.lockEngine(engineUUID, importMutexStateClose)
// release this engine after import success
if localEngine == nil {
- log.L().Warn("could not find engine in cleanupEngine", zap.Stringer("uuid", engineUUID))
+ log.FromContext(ctx).Warn("could not find engine in cleanupEngine", zap.Stringer("uuid", engineUUID))
return nil
}
defer localEngine.unlock()
@@ -1675,7 +1680,7 @@ func checkTiFlashVersion(ctx context.Context, g glue.Glue, checkCtx *backend.Che
return nil
}
- res, err := g.GetSQLExecutor().QueryStringsWithLog(ctx, tiFlashReplicaQuery, "fetch tiflash replica info", log.L())
+ res, err := g.GetSQLExecutor().QueryStringsWithLog(ctx, tiFlashReplicaQuery, "fetch tiflash replica info", log.FromContext(ctx))
if err != nil {
return errors.Annotate(err, "fetch tiflash replica info failed")
}
@@ -1714,8 +1719,8 @@ func (local *local) MakeEmptyRows() kv.Rows {
return kv.MakeRowsFromKvPairs(nil)
}
-func (local *local) NewEncoder(tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
- return kv.NewTableKVEncoder(tbl, options, local.metrics)
+func (local *local) NewEncoder(ctx context.Context, tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
+ return kv.NewTableKVEncoder(tbl, options, local.metrics, log.FromContext(ctx))
}
func engineSSTDir(storeDir string, engineUUID uuid.UUID) string {
@@ -1767,7 +1772,7 @@ func (local *local) isIngestRetryable(
if newRegion != nil {
return newRegion, nil
}
- log.L().Warn("get region by key return nil, will retry", logutil.Region(region.Region), logutil.Leader(region.Leader),
+ log.FromContext(ctx).Warn("get region by key return nil, will retry", logutil.Region(region.Region), logutil.Leader(region.Leader),
zap.Int("retry", i))
select {
case <-ctx.Done():
@@ -1919,7 +1924,7 @@ func getRegionSplitSizeKeys(ctx context.Context, cli pd.Client, tls *common.TLS)
if err == nil {
return regionSplitSize, regionSplitKeys, nil
}
- log.L().Warn("get region split size and keys failed", zap.Error(err), zap.String("store", serverInfo.StatusAddr))
+ log.FromContext(ctx).Warn("get region split size and keys failed", zap.Error(err), zap.String("store", serverInfo.StatusAddr))
}
return 0, 0, errors.New("get region split size and keys failed")
}
diff --git a/br/pkg/lightning/backend/local/local_test.go b/br/pkg/lightning/backend/local/local_test.go
index 0711bfb1fc463..13112051e59d6 100644
--- a/br/pkg/lightning/backend/local/local_test.go
+++ b/br/pkg/lightning/backend/local/local_test.go
@@ -41,6 +41,7 @@ import (
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/common"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/pingcap/tidb/br/pkg/membuf"
"github.com/pingcap/tidb/br/pkg/mock"
@@ -331,6 +332,7 @@ func testLocalWriter(t *testing.T, needSort bool, partitialSort bool) {
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: noopKeyAdapter{},
+ logger: log.L(),
}
f.sstIngester = dbSSTIngester{e: f}
f.wg.Add(1)
@@ -438,6 +440,7 @@ func (c *mockSplitClient) GetRegion(ctx context.Context, key []byte) (*restore.R
func TestIsIngestRetryable(t *testing.T) {
local := &local{
splitCli: &mockSplitClient{},
+ logger: log.L(),
}
resp := &sst.IngestResponse{
@@ -567,6 +570,7 @@ func TestLocalIngestLoop(t *testing.T) {
CompactThreshold: 100,
CompactConcurrency: 4,
},
+ logger: log.L(),
}
f.sstIngester = testIngester{}
f.wg.Add(1)
@@ -784,6 +788,7 @@ func testMergeSSTs(t *testing.T, kvs [][]common.KvPair, meta *sstMeta) {
CompactThreshold: 100,
CompactConcurrency: 4,
},
+ logger: log.L(),
}
createSSTWriter := func() (*sstWriter, error) {
@@ -1182,6 +1187,7 @@ func TestMultiIngest(t *testing.T) {
return importCli
},
},
+ logger: log.L(),
}
err := local.checkMultiIngestSupport(context.Background())
if err != nil {
diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go
index c4aaae30db37b..9839e3592d195 100644
--- a/br/pkg/lightning/backend/local/localhelper.go
+++ b/br/pkg/lightning/backend/local/localhelper.go
@@ -111,7 +111,7 @@ func (local *local) SplitAndScatterRegionByRanges(
waitTime := splitRegionBaseBackOffTime
skippedKeys := 0
for i := 0; i < splitRetryTimes; i++ {
- log.L().Info("split and scatter region",
+ log.FromContext(ctx).Info("split and scatter region",
logutil.Key("minKey", minKey),
logutil.Key("maxKey", maxKey),
zap.Int("retry", i),
@@ -130,15 +130,15 @@ func (local *local) SplitAndScatterRegionByRanges(
}
var regions []*split.RegionInfo
regions, err = split.PaginateScanRegion(ctx, local.splitCli, minKey, maxKey, 128)
- log.L().Info("paginate scan regions", zap.Int("count", len(regions)),
+ log.FromContext(ctx).Info("paginate scan regions", zap.Int("count", len(regions)),
logutil.Key("start", minKey), logutil.Key("end", maxKey))
if err != nil {
- log.L().Warn("paginate scan region failed", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
+ log.FromContext(ctx).Warn("paginate scan region failed", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
log.ShortError(err), zap.Int("retry", i))
continue
}
- log.L().Info("paginate scan region finished", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
+ log.FromContext(ctx).Info("paginate scan region finished", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
zap.Int("regions", len(regions)))
if !needSplit {
@@ -156,7 +156,7 @@ func (local *local) SplitAndScatterRegionByRanges(
return beforeEnd(startKey, regions[i].Region.EndKey)
})
if idx < 0 || idx >= len(regions) {
- log.L().Error("target region not found", logutil.Key("start_key", startKey),
+ log.FromContext(ctx).Error("target region not found", logutil.Key("start_key", startKey),
logutil.RegionBy("first_region", regions[0].Region),
logutil.RegionBy("last_region", regions[len(regions)-1].Region))
return errors.New("target region not found")
@@ -167,7 +167,7 @@ func (local *local) SplitAndScatterRegionByRanges(
}
ranges = needSplitRanges
if len(ranges) == 0 {
- log.L().Info("no ranges need to be split, skipped.")
+ log.FromContext(ctx).Info("no ranges need to be split, skipped.")
return nil
}
@@ -175,7 +175,7 @@ func (local *local) SplitAndScatterRegionByRanges(
if tableInfo != nil {
tableRegionStats, err = fetchTableRegionSizeStats(ctx, db, tableInfo.ID)
if err != nil {
- log.L().Warn("fetch table region size statistics failed",
+ log.FromContext(ctx).Warn("fetch table region size statistics failed",
zap.String("table", tableInfo.Name), zap.Error(err))
tableRegionStats = make(map[uint64]int64)
}
@@ -191,16 +191,16 @@ func (local *local) SplitAndScatterRegionByRanges(
firstKeyEnc := codec.EncodeBytes([]byte{}, retryKeys[0])
lastKeyEnc := codec.EncodeBytes([]byte{}, retryKeys[len(retryKeys)-1])
if bytes.Compare(firstKeyEnc, regions[0].Region.StartKey) < 0 || !beforeEnd(lastKeyEnc, regions[len(regions)-1].Region.EndKey) {
- log.L().Warn("no valid key for split region",
+ log.FromContext(ctx).Warn("no valid key for split region",
logutil.Key("firstKey", firstKeyEnc), logutil.Key("lastKey", lastKeyEnc),
logutil.Key("firstRegionStart", regions[0].Region.StartKey),
logutil.Key("lastRegionEnd", regions[len(regions)-1].Region.EndKey))
return errors.New("check split keys failed")
}
- splitKeyMap = getSplitKeys(retryKeys, regions)
+ splitKeyMap = getSplitKeys(retryKeys, regions, log.FromContext(ctx))
retryKeys = retryKeys[:0]
} else {
- splitKeyMap = getSplitKeysByRanges(ranges, regions)
+ splitKeyMap = getSplitKeysByRanges(ranges, regions, log.FromContext(ctx))
}
type splitInfo struct {
@@ -233,7 +233,7 @@ func (local *local) SplitAndScatterRegionByRanges(
splitRegionStart := codec.EncodeBytes([]byte{}, keys[startIdx])
splitRegionEnd := codec.EncodeBytes([]byte{}, keys[endIdx-1])
if bytes.Compare(splitRegionStart, splitRegion.Region.StartKey) < 0 || !beforeEnd(splitRegionEnd, splitRegion.Region.EndKey) {
- log.L().Fatal("no valid key in region",
+ log.FromContext(ctx).Fatal("no valid key in region",
logutil.Key("startKey", splitRegionStart), logutil.Key("endKey", splitRegionEnd),
logutil.Key("regionStart", splitRegion.Region.StartKey), logutil.Key("regionEnd", splitRegion.Region.EndKey),
logutil.Region(splitRegion.Region), logutil.Leader(splitRegion.Leader))
@@ -242,7 +242,7 @@ func (local *local) SplitAndScatterRegionByRanges(
if err1 != nil {
if strings.Contains(err1.Error(), "no valid key") {
for _, key := range keys {
- log.L().Warn("no valid key",
+ log.FromContext(ctx).Warn("no valid key",
logutil.Key("startKey", region.Region.StartKey),
logutil.Key("endKey", region.Region.EndKey),
logutil.Key("key", codec.EncodeBytes([]byte{}, key)))
@@ -252,7 +252,7 @@ func (local *local) SplitAndScatterRegionByRanges(
// do not retry on context.Canceled error
return err1
}
- log.L().Warn("split regions", log.ShortError(err1), zap.Int("retry time", i),
+ log.FromContext(ctx).Warn("split regions", log.ShortError(err1), zap.Int("retry time", i),
zap.Uint64("region_id", region.Region.Id))
syncLock.Lock()
@@ -262,7 +262,7 @@ func (local *local) SplitAndScatterRegionByRanges(
syncLock.Unlock()
break
} else {
- log.L().Info("batch split region", zap.Uint64("region_id", splitRegion.Region.Id),
+ log.FromContext(ctx).Info("batch split region", zap.Uint64("region_id", splitRegion.Region.Id),
zap.Int("keys", endIdx-startIdx), zap.Binary("firstKey", keys[startIdx]),
zap.Binary("end", keys[endIdx-1]))
sort.Slice(newRegions, func(i, j int) bool {
@@ -294,7 +294,7 @@ func (local *local) SplitAndScatterRegionByRanges(
// we can skip split it again.
regionSize, ok := tableRegionStats[regionID]
if !ok {
- log.L().Warn("region stats not found", zap.Uint64("region", regionID))
+ log.FromContext(ctx).Warn("region stats not found", zap.Uint64("region", regionID))
}
if len(keys) == 1 && regionSize < regionSplitSize {
skippedKeys++
@@ -341,11 +341,11 @@ func (local *local) SplitAndScatterRegionByRanges(
scatterCount++
}
if scatterCount == len(scatterRegions) {
- log.L().Info("waiting for scattering regions done",
+ log.FromContext(ctx).Info("waiting for scattering regions done",
zap.Int("skipped_keys", skippedKeys),
zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime)))
} else {
- log.L().Info("waiting for scattering regions timeout",
+ log.FromContext(ctx).Info("waiting for scattering regions timeout",
zap.Int("skipped_keys", skippedKeys),
zap.Int("scatterCount", scatterCount),
zap.Int("regions", len(scatterRegions)),
@@ -357,7 +357,7 @@ func (local *local) SplitAndScatterRegionByRanges(
func fetchTableRegionSizeStats(ctx context.Context, db *sql.DB, tableID int64) (map[uint64]int64, error) {
exec := &common.SQLWithRetry{
DB: db,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
stats := make(map[uint64]int64)
@@ -406,7 +406,7 @@ func (local *local) BatchSplitRegions(ctx context.Context, region *split.RegionI
}
// the scatter operation likely fails because region replicate not finish yet
// pack them to one log to avoid printing a lot warn logs.
- log.L().Warn("scatter region failed", zap.Int("regionCount", len(newRegions)),
+ log.FromContext(ctx).Warn("scatter region failed", zap.Int("regionCount", len(newRegions)),
zap.Int("failedCount", len(retryRegions)), zap.Error(failedErr), zap.Int("retry", i))
scatterRegions = retryRegions
retryRegions = make([]*split.RegionInfo, 0)
@@ -433,7 +433,7 @@ func (local *local) waitForSplit(ctx context.Context, regionID uint64) {
for i := 0; i < split.SplitCheckMaxRetryTimes; i++ {
ok, err := local.hasRegion(ctx, regionID)
if err != nil {
- log.L().Info("wait for split failed", log.ShortError(err))
+ log.FromContext(ctx).Info("wait for split failed", log.ShortError(err))
return
}
if ok {
@@ -455,10 +455,10 @@ func (local *local) waitForScatterRegion(ctx context.Context, regionInfo *split.
}
if err != nil {
if !common.IsRetryableError(err) {
- log.L().Warn("wait for scatter region encountered non-retryable error", logutil.Region(regionInfo.Region), zap.Error(err))
+ log.FromContext(ctx).Warn("wait for scatter region encountered non-retryable error", logutil.Region(regionInfo.Region), zap.Error(err))
return
}
- log.L().Warn("wait for scatter region encountered error, will retry again", logutil.Region(regionInfo.Region), zap.Error(err))
+ log.FromContext(ctx).Warn("wait for scatter region encountered error, will retry again", logutil.Region(regionInfo.Region), zap.Error(err))
}
select {
case <-time.After(time.Second):
@@ -497,13 +497,13 @@ func (local *local) checkScatterRegionFinishedOrReScatter(ctx context.Context, r
case pdpb.OperatorStatus_SUCCESS:
return true, nil
default:
- log.L().Warn("scatter-region operator status is abnormal, will scatter region again",
+ log.FromContext(ctx).Warn("scatter-region operator status is abnormal, will scatter region again",
logutil.Region(regionInfo.Region), zap.Stringer("status", resp.GetStatus()))
return false, local.splitCli.ScatterRegion(ctx, regionInfo)
}
}
-func getSplitKeysByRanges(ranges []Range, regions []*split.RegionInfo) map[uint64][][]byte {
+func getSplitKeysByRanges(ranges []Range, regions []*split.RegionInfo, logger log.Logger) map[uint64][][]byte {
checkKeys := make([][]byte, 0)
var lastEnd []byte
for _, rg := range ranges {
@@ -513,19 +513,19 @@ func getSplitKeysByRanges(ranges []Range, regions []*split.RegionInfo) map[uint6
checkKeys = append(checkKeys, rg.end)
lastEnd = rg.end
}
- return getSplitKeys(checkKeys, regions)
+ return getSplitKeys(checkKeys, regions, logger)
}
-func getSplitKeys(checkKeys [][]byte, regions []*split.RegionInfo) map[uint64][][]byte {
+func getSplitKeys(checkKeys [][]byte, regions []*split.RegionInfo, logger log.Logger) map[uint64][][]byte {
splitKeyMap := make(map[uint64][][]byte)
for _, key := range checkKeys {
- if region := needSplit(key, regions); region != nil {
+ if region := needSplit(key, regions, logger); region != nil {
splitKeys, ok := splitKeyMap[region.Region.GetId()]
if !ok {
splitKeys = make([][]byte, 0, 1)
}
splitKeyMap[region.Region.GetId()] = append(splitKeys, key)
- log.L().Debug("get key for split region",
+ logger.Debug("get key for split region",
zap.Binary("key", key),
zap.Binary("startKey", region.Region.StartKey),
zap.Binary("endKey", region.Region.EndKey))
@@ -535,7 +535,7 @@ func getSplitKeys(checkKeys [][]byte, regions []*split.RegionInfo) map[uint64][]
}
// needSplit checks whether a key is necessary to split, if true returns the split region
-func needSplit(key []byte, regions []*split.RegionInfo) *split.RegionInfo {
+func needSplit(key []byte, regions []*split.RegionInfo, logger log.Logger) *split.RegionInfo {
// If splitKey is the max key.
if len(key) == 0 {
return nil
@@ -548,7 +548,7 @@ func needSplit(key []byte, regions []*split.RegionInfo) *split.RegionInfo {
if idx < len(regions) {
// If splitKey is in a region
if bytes.Compare(splitKey, regions[idx].Region.GetStartKey()) > 0 && beforeEnd(splitKey, regions[idx].Region.GetEndKey()) {
- log.L().Debug("need split",
+ logger.Debug("need split",
zap.Binary("splitKey", key),
zap.Binary("encodedKey", splitKey),
zap.Binary("region start", regions[idx].Region.GetStartKey()),
diff --git a/br/pkg/lightning/backend/local/localhelper_test.go b/br/pkg/lightning/backend/local/localhelper_test.go
index 767829e9c857f..8d3d367443ac8 100644
--- a/br/pkg/lightning/backend/local/localhelper_test.go
+++ b/br/pkg/lightning/backend/local/localhelper_test.go
@@ -29,6 +29,7 @@ import (
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/tidb/br/pkg/lightning/glue"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/restore"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/mysql"
@@ -418,6 +419,7 @@ func doTestBatchSplitRegionByRanges(ctx context.Context, t *testing.T, hook clie
local := &local{
splitCli: client,
g: glue.NewExternalTiDBGlue(nil, mysql.ModeNone),
+ logger: log.L(),
}
// current region ranges: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, )
@@ -586,6 +588,7 @@ func TestSplitAndScatterRegionInBatches(t *testing.T) {
local := &local{
splitCli: client,
g: glue.NewExternalTiDBGlue(nil, mysql.ModeNone),
+ logger: log.L(),
}
ctx, cancel := context.WithCancel(context.Background())
@@ -672,6 +675,7 @@ func doTestBatchSplitByRangesWithClusteredIndex(t *testing.T, hook clientHook) {
local := &local{
splitCli: client,
g: glue.NewExternalTiDBGlue(nil, mysql.ModeNone),
+ logger: log.L(),
}
ctx := context.Background()
@@ -762,7 +766,7 @@ func TestNeedSplit(t *testing.T) {
for hdl, idx := range checkMap {
checkKey := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(hdl))
- res := needSplit(checkKey, regions)
+ res := needSplit(checkKey, regions, log.L())
if idx < 0 {
require.Nil(t, res)
} else {
diff --git a/br/pkg/lightning/backend/noop/noop.go b/br/pkg/lightning/backend/noop/noop.go
index 2ac3e2b346dbb..93332e41b7074 100644
--- a/br/pkg/lightning/backend/noop/noop.go
+++ b/br/pkg/lightning/backend/noop/noop.go
@@ -67,7 +67,7 @@ func (b noopBackend) ShouldPostProcess() bool {
}
// NewEncoder creates an encoder of a TiDB table.
-func (b noopBackend) NewEncoder(tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
+func (b noopBackend) NewEncoder(ctx context.Context, tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
return noopEncoder{}, nil
}
diff --git a/br/pkg/lightning/backend/tidb/tidb.go b/br/pkg/lightning/backend/tidb/tidb.go
index 9ae4564c68a37..1a9d100d39bd5 100644
--- a/br/pkg/lightning/backend/tidb/tidb.go
+++ b/br/pkg/lightning/backend/tidb/tidb.go
@@ -100,11 +100,11 @@ type tidbBackend struct {
//
// The backend does not take ownership of `db`. Caller should close `db`
// manually after the backend expired.
-func NewTiDBBackend(db *sql.DB, onDuplicate string, errorMgr *errormanager.ErrorManager) backend.Backend {
+func NewTiDBBackend(ctx context.Context, db *sql.DB, onDuplicate string, errorMgr *errormanager.ErrorManager) backend.Backend {
switch onDuplicate {
case config.ReplaceOnDup, config.IgnoreOnDup, config.ErrorOnDup:
default:
- log.L().Warn("unsupported action on duplicate, overwrite with `replace`")
+ log.FromContext(ctx).Warn("unsupported action on duplicate, overwrite with `replace`")
onDuplicate = config.ReplaceOnDup
}
return backend.MakeBackend(&tidbBackend{db: db, onDuplicate: onDuplicate, errorMgr: errorMgr})
@@ -354,12 +354,12 @@ func (enc *tidbEncoder) Encode(logger log.Logger, row []types.Datum, _ int64, co
}
// EncodeRowForRecord encodes a row to a string compatible with INSERT statements.
-func EncodeRowForRecord(encTable table.Table, sqlMode mysql.SQLMode, row []types.Datum, columnPermutation []int) string {
+func EncodeRowForRecord(ctx context.Context, encTable table.Table, sqlMode mysql.SQLMode, row []types.Datum, columnPermutation []int) string {
enc := tidbEncoder{
tbl: encTable,
mode: sqlMode,
}
- resRow, err := enc.Encode(log.L(), row, 0, columnPermutation, "", 0)
+ resRow, err := enc.Encode(log.FromContext(ctx), row, 0, columnPermutation, "", 0)
if err != nil {
// if encode can't succeed, fallback to record the raw input strings
// ignore the error since it can only happen if the datum type is unknown, this can't happen here.
@@ -394,12 +394,12 @@ func (be *tidbBackend) ShouldPostProcess() bool {
}
func (be *tidbBackend) CheckRequirements(ctx context.Context, _ *backend.CheckCtx) error {
- log.L().Info("skipping check requirements for tidb backend")
+ log.FromContext(ctx).Info("skipping check requirements for tidb backend")
return nil
}
-func (be *tidbBackend) NewEncoder(tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
- se := kv.NewSession(options)
+func (be *tidbBackend) NewEncoder(ctx context.Context, tbl table.Table, options *kv.SessionOptions) (kv.Encoder, error) {
+ se := kv.NewSession(options, log.FromContext(ctx))
if options.SQLMode.HasStrictMode() {
se.GetSessionVars().SkipUTF8Check = false
se.GetSessionVars().SkipASCIICheck = false
@@ -554,7 +554,7 @@ func (be *tidbBackend) execStmts(ctx context.Context, stmtTasks []stmtTask, tabl
_, err := be.db.ExecContext(ctx, stmt)
if err != nil {
if !common.IsContextCanceledError(err) {
- log.L().Error("execute statement failed",
+ log.FromContext(ctx).Error("execute statement failed",
zap.Array("rows", stmtTask.rows), zap.String("stmt", redact.String(stmt)), zap.Error(err))
}
// It's batch mode, just return the error.
@@ -566,7 +566,7 @@ func (be *tidbBackend) execStmts(ctx context.Context, stmtTasks []stmtTask, tabl
continue
}
firstRow := stmtTask.rows[0]
- err = be.errorMgr.RecordTypeError(ctx, log.L(), tableName, firstRow.path, firstRow.offset, firstRow.insertStmt, err)
+ err = be.errorMgr.RecordTypeError(ctx, log.FromContext(ctx), tableName, firstRow.path, firstRow.offset, firstRow.insertStmt, err)
if err == nil {
// max-error not yet reached (error consumed by errorMgr), proceed to next stmtTask.
break
@@ -587,7 +587,7 @@ func (be *tidbBackend) execStmts(ctx context.Context, stmtTasks []stmtTask, tabl
func (be *tidbBackend) FetchRemoteTableModels(ctx context.Context, schemaName string) (tables []*model.TableInfo, err error) {
s := common.SQLWithRetry{
DB: be.db,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
err = s.Transact(ctx, "fetch table columns", func(c context.Context, tx *sql.Tx) error {
diff --git a/br/pkg/lightning/backend/tidb/tidb_test.go b/br/pkg/lightning/backend/tidb/tidb_test.go
index b7de642a26746..097c51dfdfe5f 100644
--- a/br/pkg/lightning/backend/tidb/tidb_test.go
+++ b/br/pkg/lightning/backend/tidb/tidb_test.go
@@ -63,7 +63,7 @@ func createMysqlSuite(t *testing.T) *mysqlSuite {
tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic}
tbl, err := tables.TableFromMeta(kv.NewPanickingAllocators(0), tblInfo)
require.NoError(t, err)
- backend := tidb.NewTiDBBackend(db, config.ReplaceOnDup, errormanager.New(nil, config.NewConfig()))
+ backend := tidb.NewTiDBBackend(context.Background(), db, config.ReplaceOnDup, errormanager.New(nil, config.NewConfig(), log.L()))
return &mysqlSuite{dbHandle: db, mockDB: mock, backend: backend, tbl: tbl}
}
@@ -99,7 +99,7 @@ func TestWriteRowsReplaceOnDup(t *testing.T) {
// skip column a,c due to ignore-columns
perms[0] = -1
perms[2] = -1
- encoder, err := s.backend.NewEncoder(s.tbl, &kv.SessionOptions{SQLMode: 0, Timestamp: 1234567890})
+ encoder, err := s.backend.NewEncoder(context.Background(), s.tbl, &kv.SessionOptions{SQLMode: 0, Timestamp: 1234567890})
require.NoError(t, err)
row, err := encoder.Encode(logger, []types.Datum{
types.NewUintDatum(18446744073709551615),
@@ -140,7 +140,7 @@ func TestWriteRowsIgnoreOnDup(t *testing.T) {
ctx := context.Background()
logger := log.L()
- ignoreBackend := tidb.NewTiDBBackend(s.dbHandle, config.IgnoreOnDup, errormanager.New(nil, config.NewConfig()))
+ ignoreBackend := tidb.NewTiDBBackend(ctx, s.dbHandle, config.IgnoreOnDup, errormanager.New(nil, config.NewConfig(), logger))
engine, err := ignoreBackend.OpenEngine(ctx, &backend.EngineConfig{}, "`foo`.`bar`", 1)
require.NoError(t, err)
@@ -149,7 +149,7 @@ func TestWriteRowsIgnoreOnDup(t *testing.T) {
indexRows := ignoreBackend.MakeEmptyRows()
indexChecksum := verification.MakeKVChecksum(0, 0, 0)
- encoder, err := ignoreBackend.NewEncoder(s.tbl, &kv.SessionOptions{})
+ encoder, err := ignoreBackend.NewEncoder(ctx, s.tbl, &kv.SessionOptions{})
require.NoError(t, err)
row, err := encoder.Encode(logger, []types.Datum{
types.NewIntDatum(1),
@@ -165,7 +165,7 @@ func TestWriteRowsIgnoreOnDup(t *testing.T) {
require.NoError(t, err)
// test encode rows with _tidb_rowid
- encoder, err = ignoreBackend.NewEncoder(s.tbl, &kv.SessionOptions{})
+ encoder, err = ignoreBackend.NewEncoder(ctx, s.tbl, &kv.SessionOptions{})
require.NoError(t, err)
rowWithID, err := encoder.Encode(logger, []types.Datum{
types.NewIntDatum(1),
@@ -186,7 +186,7 @@ func TestWriteRowsErrorOnDup(t *testing.T) {
ctx := context.Background()
logger := log.L()
- ignoreBackend := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig()))
+ ignoreBackend := tidb.NewTiDBBackend(ctx, s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig(), logger))
engine, err := ignoreBackend.OpenEngine(ctx, &backend.EngineConfig{}, "`foo`.`bar`", 1)
require.NoError(t, err)
@@ -195,7 +195,7 @@ func TestWriteRowsErrorOnDup(t *testing.T) {
indexRows := ignoreBackend.MakeEmptyRows()
indexChecksum := verification.MakeKVChecksum(0, 0, 0)
- encoder, err := ignoreBackend.NewEncoder(s.tbl, &kv.SessionOptions{})
+ encoder, err := ignoreBackend.NewEncoder(ctx, s.tbl, &kv.SessionOptions{})
require.NoError(t, err)
row, err := encoder.Encode(logger, []types.Datum{
types.NewIntDatum(1),
@@ -228,8 +228,10 @@ func testStrictMode(t *testing.T) {
tbl, err := tables.TableFromMeta(kv.NewPanickingAllocators(0), tblInfo)
require.NoError(t, err)
- bk := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig()))
- encoder, err := bk.NewEncoder(tbl, &kv.SessionOptions{SQLMode: mysql.ModeStrictAllTables})
+ ctx := context.Background()
+
+ bk := tidb.NewTiDBBackend(ctx, s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig(), log.L()))
+ encoder, err := bk.NewEncoder(ctx, tbl, &kv.SessionOptions{SQLMode: mysql.ModeStrictAllTables})
require.NoError(t, err)
logger := log.L()
@@ -245,7 +247,7 @@ func testStrictMode(t *testing.T) {
require.Regexp(t, `incorrect utf8 value .* for column s0$`, err.Error())
// oepn a new encode because column count changed.
- encoder, err = bk.NewEncoder(tbl, &kv.SessionOptions{SQLMode: mysql.ModeStrictAllTables})
+ encoder, err = bk.NewEncoder(ctx, tbl, &kv.SessionOptions{SQLMode: mysql.ModeStrictAllTables})
require.NoError(t, err)
_, err = encoder.Encode(logger, []types.Datum{
types.NewStringDatum(""),
@@ -267,7 +269,7 @@ func TestFetchRemoteTableModels_3_x(t *testing.T) {
AddRow("t", "id", "int(10)", "", "auto_increment"))
s.mockDB.ExpectCommit()
- bk := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig()))
+ bk := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig(), log.L()))
tableInfos, err := bk.FetchRemoteTableModels(context.Background(), "test")
require.NoError(t, err)
ft := types.FieldType{}
@@ -304,7 +306,7 @@ func TestFetchRemoteTableModels_4_0(t *testing.T) {
AddRow("test", "t", "id", int64(1)))
s.mockDB.ExpectCommit()
- bk := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig()))
+ bk := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig(), log.L()))
tableInfos, err := bk.FetchRemoteTableModels(context.Background(), "test")
require.NoError(t, err)
ft := types.FieldType{}
@@ -341,7 +343,7 @@ func TestFetchRemoteTableModels_4_x_auto_increment(t *testing.T) {
AddRow("test", "t", "id", int64(1), "AUTO_INCREMENT"))
s.mockDB.ExpectCommit()
- bk := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig()))
+ bk := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig(), log.L()))
tableInfos, err := bk.FetchRemoteTableModels(context.Background(), "test")
require.NoError(t, err)
ft := types.FieldType{}
@@ -378,7 +380,7 @@ func TestFetchRemoteTableModels_4_x_auto_random(t *testing.T) {
AddRow("test", "t", "id", int64(1), "AUTO_RANDOM"))
s.mockDB.ExpectCommit()
- bk := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig()))
+ bk := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup, errormanager.New(nil, config.NewConfig(), log.L()))
tableInfos, err := bk.FetchRemoteTableModels(context.Background(), "test")
require.NoError(t, err)
ft := types.FieldType{}
@@ -413,8 +415,8 @@ func TestWriteRowsErrorNoRetry(t *testing.T) {
WillReturnError(nonRetryableError)
// disable error record, should not expect retry statements one by one.
- ignoreBackend := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup,
- errormanager.New(s.dbHandle, &config.Config{}),
+ ignoreBackend := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup,
+ errormanager.New(s.dbHandle, &config.Config{}, log.L()),
)
dataRows := encodeRowsTiDB(t, ignoreBackend, s.tbl)
ctx := context.Background()
@@ -473,7 +475,7 @@ func TestWriteRowsErrorDowngradingAll(t *testing.T) {
WillReturnResult(driver.ResultNoRows)
// disable error record, should not expect retry statements one by one.
- ignoreBackend := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup,
+ ignoreBackend := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup,
errormanager.New(s.dbHandle, &config.Config{
App: config.Lightning{
TaskInfoSchemaName: "tidb_lightning_errors",
@@ -481,7 +483,7 @@ func TestWriteRowsErrorDowngradingAll(t *testing.T) {
Type: *atomic.NewInt64(10),
},
},
- }),
+ }, log.L()),
)
dataRows := encodeRowsTiDB(t, ignoreBackend, s.tbl)
ctx := context.Background()
@@ -528,7 +530,7 @@ func TestWriteRowsErrorDowngradingExceedThreshold(t *testing.T) {
ExpectExec("\\QINSERT INTO `foo`.`bar`(`a`) VALUES(4)\\E").
WillReturnError(nonRetryableError)
- ignoreBackend := tidb.NewTiDBBackend(s.dbHandle, config.ErrorOnDup,
+ ignoreBackend := tidb.NewTiDBBackend(context.Background(), s.dbHandle, config.ErrorOnDup,
errormanager.New(s.dbHandle, &config.Config{
App: config.Lightning{
TaskInfoSchemaName: "tidb_lightning_errors",
@@ -536,7 +538,7 @@ func TestWriteRowsErrorDowngradingExceedThreshold(t *testing.T) {
Type: *atomic.NewInt64(3),
},
},
- }),
+ }, log.L()),
)
dataRows := encodeRowsTiDB(t, ignoreBackend, s.tbl)
ctx := context.Background()
@@ -558,7 +560,7 @@ func encodeRowsTiDB(t *testing.T, b backend.Backend, tbl table.Table) kv.Rows {
indexChecksum := verification.MakeKVChecksum(0, 0, 0)
logger := log.L()
- encoder, err := b.NewEncoder(tbl, &kv.SessionOptions{})
+ encoder, err := b.NewEncoder(context.Background(), tbl, &kv.SessionOptions{})
require.NoError(t, err)
row, err := encoder.Encode(logger, []types.Datum{
types.NewIntDatum(1),
@@ -609,7 +611,7 @@ func TestEncodeRowForRecord(t *testing.T) {
s := createMysqlSuite(t)
// for a correct row, the will encode a correct result
- row := tidb.EncodeRowForRecord(s.tbl, mysql.ModeStrictTransTables, []types.Datum{
+ row := tidb.EncodeRowForRecord(context.Background(), s.tbl, mysql.ModeStrictTransTables, []types.Datum{
types.NewIntDatum(5),
types.NewStringDatum("test test"),
types.NewBinaryLiteralDatum(types.NewBinaryLiteralFromUint(0xabcdef, 6)),
@@ -618,7 +620,7 @@ func TestEncodeRowForRecord(t *testing.T) {
// the following row will result in column count mismatch error, there for encode
// result will fallback to a "," separated string list.
- row = tidb.EncodeRowForRecord(s.tbl, mysql.ModeStrictTransTables, []types.Datum{
+ row = tidb.EncodeRowForRecord(context.Background(), s.tbl, mysql.ModeStrictTransTables, []types.Datum{
types.NewIntDatum(5),
types.NewStringDatum("test test"),
types.NewBinaryLiteralDatum(types.NewBinaryLiteralFromUint(0xabcdef, 6)),
diff --git a/br/pkg/lightning/checkpoints/checkpoints.go b/br/pkg/lightning/checkpoints/checkpoints.go
index 36cefed180ae3..30ab72b0298f0 100644
--- a/br/pkg/lightning/checkpoints/checkpoints.go
+++ b/br/pkg/lightning/checkpoints/checkpoints.go
@@ -493,7 +493,7 @@ type DB interface {
// It assumes the entire table has not been imported before and will fill in
// default values for the column permutations and checksums.
InsertEngineCheckpoints(ctx context.Context, tableName string, checkpoints map[int32]*EngineCheckpoint) error
- Update(checkpointDiffs map[string]*TableCheckpointDiff) error
+ Update(taskCtx context.Context, checkpointDiffs map[string]*TableCheckpointDiff) error
RemoveCheckpoint(ctx context.Context, tableName string) error
// MoveCheckpoints renames the checkpoint schema to include a suffix
@@ -608,7 +608,7 @@ func (*NullCheckpointsDB) InsertEngineCheckpoints(_ context.Context, _ string, _
return nil
}
-func (*NullCheckpointsDB) Update(map[string]*TableCheckpointDiff) error {
+func (*NullCheckpointsDB) Update(context.Context, map[string]*TableCheckpointDiff) error {
return nil
}
@@ -621,7 +621,7 @@ func NewMySQLCheckpointsDB(ctx context.Context, db *sql.DB, schemaName string) (
schema := common.EscapeIdentifier(schemaName)
sql := common.SQLWithRetry{
DB: db,
- Logger: log.With(zap.String("schema", schemaName)),
+ Logger: log.FromContext(ctx).With(zap.String("schema", schemaName)),
HideQueryLog: true,
}
err := sql.Exec(ctx, "create checkpoints database", fmt.Sprintf(CreateDBTemplate, schema))
@@ -658,7 +658,7 @@ func NewMySQLCheckpointsDB(ctx context.Context, db *sql.DB, schemaName string) (
func (cpdb *MySQLCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, dbInfo map[string]*TidbDBInfo) error {
// We can have at most 65535 placeholders https://stackoverflow.com/q/4922345/
// Since this step is not performance critical, we just insert the rows one-by-one.
- s := common.SQLWithRetry{DB: cpdb.db, Logger: log.L()}
+ s := common.SQLWithRetry{DB: cpdb.db, Logger: log.FromContext(ctx)}
err := s.Transact(ctx, "insert checkpoints", func(c context.Context, tx *sql.Tx) error {
taskStmt, err := tx.PrepareContext(c, fmt.Sprintf(InitTaskTemplate, cpdb.schema, CheckpointTableNameTask))
if err != nil {
@@ -706,7 +706,7 @@ func (cpdb *MySQLCheckpointsDB) Initialize(ctx context.Context, cfg *config.Conf
func (cpdb *MySQLCheckpointsDB) TaskCheckpoint(ctx context.Context) (*TaskCheckpoint, error) {
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
taskQuery := fmt.Sprintf(ReadTaskTemplate, cpdb.schema, CheckpointTableNameTask)
@@ -735,7 +735,7 @@ func (cpdb *MySQLCheckpointsDB) Get(ctx context.Context, tableName string) (*Tab
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.With(zap.String("table", tableName)),
+ Logger: log.FromContext(ctx).With(zap.String("table", tableName)),
}
err := s.Transact(ctx, "read checkpoint", func(c context.Context, tx *sql.Tx) error {
// 1. Populate the engines.
@@ -824,7 +824,7 @@ func (cpdb *MySQLCheckpointsDB) Get(ctx context.Context, tableName string) (*Tab
func (cpdb *MySQLCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tableName string, checkpoints map[int32]*EngineCheckpoint) error {
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.With(zap.String("table", tableName)),
+ Logger: log.FromContext(ctx).With(zap.String("table", tableName)),
}
err := s.Transact(ctx, "update engine checkpoints", func(c context.Context, tx *sql.Tx) error {
engineStmt, err := tx.PrepareContext(c, fmt.Sprintf(ReplaceEngineTemplate, cpdb.schema, CheckpointTableNameEngine))
@@ -870,15 +870,15 @@ func (cpdb *MySQLCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tab
return nil
}
-func (cpdb *MySQLCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpointDiff) error {
+func (cpdb *MySQLCheckpointsDB) Update(taskCtx context.Context, checkpointDiffs map[string]*TableCheckpointDiff) error {
chunkQuery := fmt.Sprintf(UpdateChunkTemplate, cpdb.schema, CheckpointTableNameChunk)
rebaseQuery := fmt.Sprintf(UpdateTableRebaseTemplate, cpdb.schema, CheckpointTableNameTable)
tableStatusQuery := fmt.Sprintf(UpdateTableStatusTemplate, cpdb.schema, CheckpointTableNameTable)
tableChecksumQuery := fmt.Sprintf(UpdateTableChecksumTemplate, cpdb.schema, CheckpointTableNameTable)
engineStatusQuery := fmt.Sprintf(UpdateEngineTemplate, cpdb.schema, CheckpointTableNameEngine)
- s := common.SQLWithRetry{DB: cpdb.db, Logger: log.L()}
- return s.Transact(context.Background(), "update checkpoints", func(c context.Context, tx *sql.Tx) error {
+ s := common.SQLWithRetry{DB: cpdb.db, Logger: log.FromContext(taskCtx)}
+ return s.Transact(taskCtx, "update checkpoints", func(c context.Context, tx *sql.Tx) error {
chunkStmt, e := tx.PrepareContext(c, chunkQuery)
if e != nil {
return errors.Trace(e)
@@ -981,7 +981,7 @@ func newFileCheckpointsDB(
return nil, errors.Trace(err)
}
if !exist {
- log.L().Info("open checkpoint file failed, going to create a new one",
+ log.FromContext(ctx).Info("open checkpoint file failed, going to create a new one",
zap.String("path", path),
log.ShortError(err),
)
@@ -993,7 +993,7 @@ func newFileCheckpointsDB(
}
err = cpdb.checkpoints.Unmarshal(content)
if err != nil {
- log.L().Error("checkpoint file is broken", zap.String("path", path), zap.Error(err))
+ log.FromContext(ctx).Error("checkpoint file is broken", zap.String("path", path), zap.Error(err))
}
// FIXME: patch for empty map may need initialize manually, because currently
// FIXME: a map of zero size -> marshall -> unmarshall -> become nil, see checkpoint_test.go
@@ -1253,7 +1253,7 @@ func (cpdb *FileCheckpointsDB) InsertEngineCheckpoints(_ context.Context, tableN
return errors.Trace(cpdb.save())
}
-func (cpdb *FileCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpointDiff) error {
+func (cpdb *FileCheckpointsDB) Update(_ context.Context, checkpointDiffs map[string]*TableCheckpointDiff) error {
cpdb.lock.Lock()
defer cpdb.lock.Unlock()
@@ -1330,7 +1330,7 @@ func (*NullCheckpointsDB) DumpChunks(context.Context, io.Writer) error {
func (cpdb *MySQLCheckpointsDB) RemoveCheckpoint(ctx context.Context, tableName string) error {
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.With(zap.String("table", tableName)),
+ Logger: log.FromContext(ctx).With(zap.String("table", tableName)),
}
if tableName == allTables {
@@ -1362,7 +1362,7 @@ func (cpdb *MySQLCheckpointsDB) MoveCheckpoints(ctx context.Context, taskID int6
newSchema := fmt.Sprintf("`%s.%d.bak`", cpdb.schema[1:len(cpdb.schema)-1], taskID)
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.With(zap.Int64("taskID", taskID)),
+ Logger: log.FromContext(ctx).With(zap.Int64("taskID", taskID)),
}
createSchemaQuery := "CREATE SCHEMA IF NOT EXISTS " + newSchema
@@ -1402,7 +1402,7 @@ func (cpdb *MySQLCheckpointsDB) GetLocalStoringTables(ctx context.Context) (map[
CheckpointStatusMaxInvalid, CheckpointStatusIndexImported,
CheckpointStatusMaxInvalid, CheckpointStatusImported)
- err := common.Retry("get local storing tables", log.L(), func() error {
+ err := common.Retry("get local storing tables", log.FromContext(ctx), func() error {
targetTables = make(map[string][]int32)
rows, err := cpdb.db.QueryContext(ctx, query) // #nosec G201
if err != nil {
@@ -1453,7 +1453,7 @@ func (cpdb *MySQLCheckpointsDB) IgnoreErrorCheckpoint(ctx context.Context, table
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.With(zap.String("table", tableName)),
+ Logger: log.FromContext(ctx).With(zap.String("table", tableName)),
}
err := s.Transact(ctx, "ignore error checkpoints", func(c context.Context, tx *sql.Tx) error {
if _, e := tx.ExecContext(c, engineQuery, tableName); e != nil {
@@ -1510,7 +1510,7 @@ func (cpdb *MySQLCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tabl
s := common.SQLWithRetry{
DB: cpdb.db,
- Logger: log.With(zap.String("table", tableName)),
+ Logger: log.FromContext(ctx).With(zap.String("table", tableName)),
}
err := s.Transact(ctx, "destroy error checkpoints", func(c context.Context, tx *sql.Tx) error {
// Obtain the list of tables
diff --git a/br/pkg/lightning/checkpoints/checkpoints_file_test.go b/br/pkg/lightning/checkpoints/checkpoints_file_test.go
index 5911b9952a1c2..824434cb78f82 100644
--- a/br/pkg/lightning/checkpoints/checkpoints_file_test.go
+++ b/br/pkg/lightning/checkpoints/checkpoints_file_test.go
@@ -119,7 +119,7 @@ func newFileCheckpointsDB(t *testing.T) (*checkpoints.FileCheckpointsDB, func())
}
ccm.MergeInto(cpd)
- cpdb.Update(map[string]*checkpoints.TableCheckpointDiff{"`db1`.`t2`": cpd})
+ cpdb.Update(ctx, map[string]*checkpoints.TableCheckpointDiff{"`db1`.`t2`": cpd})
return cpdb, func() {
err := cpdb.Close()
require.NoError(t, err)
@@ -135,7 +135,7 @@ func setInvalidStatus(cpdb *checkpoints.FileCheckpointsDB) {
scm.SetInvalid()
scm.MergeInto(cpd)
- cpdb.Update(map[string]*checkpoints.TableCheckpointDiff{
+ cpdb.Update(context.Background(), map[string]*checkpoints.TableCheckpointDiff{
"`db1`.`t2`": cpd,
"`db2`.`t3`": cpd,
})
diff --git a/br/pkg/lightning/checkpoints/checkpoints_sql_test.go b/br/pkg/lightning/checkpoints/checkpoints_sql_test.go
index 85a23f379fec9..a0f59ab771915 100644
--- a/br/pkg/lightning/checkpoints/checkpoints_sql_test.go
+++ b/br/pkg/lightning/checkpoints/checkpoints_sql_test.go
@@ -221,7 +221,7 @@ func TestNormalOperations(t *testing.T) {
s.mock.ExpectCommit()
s.mock.MatchExpectationsInOrder(false)
- cpdb.Update(map[string]*checkpoints.TableCheckpointDiff{"`db1`.`t2`": cpd})
+ cpdb.Update(ctx, map[string]*checkpoints.TableCheckpointDiff{"`db1`.`t2`": cpd})
s.mock.MatchExpectationsInOrder(true)
require.Nil(t, s.mock.ExpectationsWereMet())
diff --git a/br/pkg/lightning/checkpoints/glue_checkpoint.go b/br/pkg/lightning/checkpoints/glue_checkpoint.go
index 30b540426f2a7..b0f5278c7e89a 100644
--- a/br/pkg/lightning/checkpoints/glue_checkpoint.go
+++ b/br/pkg/lightning/checkpoints/glue_checkpoint.go
@@ -59,9 +59,9 @@ var _ DB = (*GlueCheckpointsDB)(nil)
// dropPreparedStmt drops the statement and when meet an error,
// print an error message.
-func dropPreparedStmt(session Session, stmtID uint32) {
+func dropPreparedStmt(ctx context.Context, session Session, stmtID uint32) {
if err := session.DropPreparedStmt(stmtID); err != nil {
- log.L().Error("failed to drop prepared statement", log.ShortError(err))
+ log.FromContext(ctx).Error("failed to drop prepared statement", log.ShortError(err))
}
}
@@ -69,7 +69,7 @@ func NewGlueCheckpointsDB(ctx context.Context, se Session, f func() (Session, er
var escapedSchemaName strings.Builder
common.WriteMySQLIdentifier(&escapedSchemaName, schemaName)
schema := escapedSchemaName.String()
- logger := log.With(zap.String("schema", schemaName))
+ logger := log.FromContext(ctx).With(zap.String("schema", schemaName))
sql := fmt.Sprintf(CreateDBTemplate, schema)
err := common.Retry("create checkpoints database", logger, func() error {
@@ -123,7 +123,7 @@ func NewGlueCheckpointsDB(ctx context.Context, se Session, f func() (Session, er
}
func (g GlueCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, dbInfo map[string]*TidbDBInfo) error {
- logger := log.L()
+ logger := log.FromContext(ctx)
se, err := g.getSessionFunc()
if err != nil {
return errors.Trace(err)
@@ -135,7 +135,7 @@ func (g GlueCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, d
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, stmtID)
+ defer dropPreparedStmt(ctx, s, stmtID)
_, err = s.ExecutePreparedStmt(c, stmtID, []types.Datum{
types.NewIntDatum(cfg.TaskID),
types.NewStringDatum(cfg.Mydumper.SourceDir),
@@ -155,7 +155,7 @@ func (g GlueCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, d
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, stmtID2)
+ defer dropPreparedStmt(ctx, s, stmtID2)
for _, db := range dbInfo {
for _, table := range db.Tables {
@@ -177,7 +177,7 @@ func (g GlueCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, d
}
func (g GlueCheckpointsDB) TaskCheckpoint(ctx context.Context) (*TaskCheckpoint, error) {
- logger := log.L()
+ logger := log.FromContext(ctx)
sql := fmt.Sprintf(ReadTaskTemplate, g.schema, CheckpointTableNameTask)
se, err := g.getSessionFunc()
if err != nil {
@@ -225,7 +225,7 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe
cp := &TableCheckpoint{
Engines: map[int32]*EngineCheckpoint{},
}
- logger := log.With(zap.String("table", tableName))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName))
se, err := g.getSessionFunc()
if err != nil {
return nil, errors.Trace(err)
@@ -351,7 +351,7 @@ func (g GlueCheckpointsDB) Close() error {
}
func (g GlueCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tableName string, checkpointMap map[int32]*EngineCheckpoint) error {
- logger := log.With(zap.String("table", tableName))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName))
se, err := g.getSessionFunc()
if err != nil {
return errors.Trace(err)
@@ -363,13 +363,13 @@ func (g GlueCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tableNam
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, engineStmt)
+ defer dropPreparedStmt(ctx, s, engineStmt)
chunkStmt, _, _, err := s.PrepareStmt(fmt.Sprintf(ReplaceChunkTemplate, g.schema, CheckpointTableNameChunk))
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, chunkStmt)
+ defer dropPreparedStmt(ctx, s, chunkStmt)
for engineID, engine := range checkpointMap {
_, err := s.ExecutePreparedStmt(c, engineStmt, []types.Datum{
@@ -411,11 +411,11 @@ func (g GlueCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tableNam
return errors.Trace(err)
}
-func (g GlueCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpointDiff) error {
- logger := log.L()
+func (g GlueCheckpointsDB) Update(ctx context.Context, checkpointDiffs map[string]*TableCheckpointDiff) error {
+ logger := log.FromContext(ctx)
se, err := g.getSessionFunc()
if err != nil {
- log.L().Error("can't get a session to update GlueCheckpointsDB", zap.Error(errors.Trace(err)))
+ log.FromContext(ctx).Error("can't get a session to update GlueCheckpointsDB", zap.Error(errors.Trace(err)))
return err
}
defer se.Close()
@@ -429,22 +429,22 @@ func (g GlueCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpointDif
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, chunkStmt)
+ defer dropPreparedStmt(ctx, s, chunkStmt)
rebaseStmt, _, _, err := s.PrepareStmt(rebaseQuery)
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, rebaseStmt)
+ defer dropPreparedStmt(ctx, s, rebaseStmt)
tableStatusStmt, _, _, err := s.PrepareStmt(tableStatusQuery)
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, tableStatusStmt)
+ defer dropPreparedStmt(ctx, s, tableStatusStmt)
engineStatusStmt, _, _, err := s.PrepareStmt(engineStatusQuery)
if err != nil {
return errors.Trace(err)
}
- defer dropPreparedStmt(s, engineStatusStmt)
+ defer dropPreparedStmt(ctx, s, engineStatusStmt)
for tableName, cpd := range checkpointDiffs {
if cpd.hasStatus {
@@ -504,7 +504,7 @@ func (g GlueCheckpointsDB) Update(checkpointDiffs map[string]*TableCheckpointDif
}
func (g GlueCheckpointsDB) RemoveCheckpoint(ctx context.Context, tableName string) error {
- logger := log.With(zap.String("table", tableName))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName))
se, err := g.getSessionFunc()
if err != nil {
return errors.Trace(err)
@@ -541,7 +541,7 @@ func (g GlueCheckpointsDB) RemoveCheckpoint(ctx context.Context, tableName strin
func (g GlueCheckpointsDB) MoveCheckpoints(ctx context.Context, taskID int64) error {
newSchema := fmt.Sprintf("`%s.%d.bak`", g.schema[1:len(g.schema)-1], taskID)
- logger := log.With(zap.Int64("taskID", taskID))
+ logger := log.FromContext(ctx).With(zap.Int64("taskID", taskID))
se, err := g.getSessionFunc()
if err != nil {
return errors.Trace(err)
@@ -596,7 +596,7 @@ func (g GlueCheckpointsDB) GetLocalStoringTables(ctx context.Context) (map[strin
CheckpointStatusMaxInvalid, CheckpointStatusIndexImported,
CheckpointStatusMaxInvalid, CheckpointStatusImported)
- err = common.Retry("get local storing tables", log.L(), func() error {
+ err = common.Retry("get local storing tables", log.FromContext(ctx), func() error {
targetTables = make(map[string][]int32)
rs, err := se.Execute(ctx, query)
if err != nil {
@@ -622,7 +622,7 @@ func (g GlueCheckpointsDB) GetLocalStoringTables(ctx context.Context) (map[strin
}
func (g GlueCheckpointsDB) IgnoreErrorCheckpoint(ctx context.Context, tableName string) error {
- logger := log.With(zap.String("table", tableName))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName))
se, err := g.getSessionFunc()
if err != nil {
return errors.Trace(err)
@@ -658,7 +658,7 @@ func (g GlueCheckpointsDB) IgnoreErrorCheckpoint(ctx context.Context, tableName
}
func (g GlueCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tableName string) ([]DestroyedTableCheckpoint, error) {
- logger := log.With(zap.String("table", tableName))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName))
se, err := g.getSessionFunc()
if err != nil {
return nil, errors.Trace(err)
diff --git a/br/pkg/lightning/common/conn.go b/br/pkg/lightning/common/conn.go
index eb9b598e64c55..fcf2aaa55fbf3 100644
--- a/br/pkg/lightning/common/conn.go
+++ b/br/pkg/lightning/common/conn.go
@@ -34,6 +34,7 @@ type ConnPool struct {
next int
cap int
newConn func(ctx context.Context) (*grpc.ClientConn, error)
+ logger log.Logger
}
func (p *ConnPool) TakeConns() (conns []*grpc.ClientConn) {
@@ -48,7 +49,7 @@ func (p *ConnPool) TakeConns() (conns []*grpc.ClientConn) {
func (p *ConnPool) Close() {
for _, c := range p.TakeConns() {
if err := c.Close(); err != nil {
- log.L().Warn("failed to close clientConn", zap.String("target", c.Target()), log.ShortError(err))
+ p.logger.Warn("failed to close clientConn", zap.String("target", c.Target()), log.ShortError(err))
}
}
}
@@ -72,13 +73,12 @@ func (p *ConnPool) get(ctx context.Context) (*grpc.ClientConn, error) {
}
// NewConnPool creates a new connPool by the specified conn factory function and capacity.
-func NewConnPool(capacity int, newConn func(ctx context.Context) (*grpc.ClientConn, error)) *ConnPool {
+func NewConnPool(capacity int, newConn func(ctx context.Context) (*grpc.ClientConn, error), logger log.Logger) *ConnPool {
return &ConnPool{
cap: capacity,
conns: make([]*grpc.ClientConn, 0, capacity),
newConn: newConn,
-
- mu: sync.Mutex{},
+ logger: logger,
}
}
@@ -100,7 +100,7 @@ func (conns *GRPCConns) GetGrpcConn(ctx context.Context, storeID uint64, tcpConc
conns.mu.Lock()
defer conns.mu.Unlock()
if _, ok := conns.conns[storeID]; !ok {
- conns.conns[storeID] = NewConnPool(tcpConcurrency, newConn)
+ conns.conns[storeID] = NewConnPool(tcpConcurrency, newConn, log.FromContext(ctx))
}
return conns.conns[storeID].get(ctx)
}
diff --git a/br/pkg/lightning/config/config_test.go b/br/pkg/lightning/config/config_test.go
index 881f89de3d380..555cbed109f1d 100644
--- a/br/pkg/lightning/config/config_test.go
+++ b/br/pkg/lightning/config/config_test.go
@@ -517,7 +517,7 @@ func TestInvalidTOML(t *testing.T) {
delimiter = '\'
backslash-escape = true
`))
- require.EqualError(t, err, "Near line 0 (last key parsed ''): bare keys cannot contain '['")
+ require.EqualError(t, err, "Near line 2 (last key parsed ''): expected '.' or '=', but got '[' instead")
}
func TestTOMLUnusedKeys(t *testing.T) {
diff --git a/br/pkg/lightning/errormanager/BUILD.bazel b/br/pkg/lightning/errormanager/BUILD.bazel
index e8c61ab00f0b2..7aea8447865e8 100644
--- a/br/pkg/lightning/errormanager/BUILD.bazel
+++ b/br/pkg/lightning/errormanager/BUILD.bazel
@@ -26,6 +26,7 @@ go_test(
embed = [":errormanager"],
deps = [
"//br/pkg/lightning/config",
+ "//br/pkg/lightning/log",
"//br/pkg/utils",
"@com_github_data_dog_go_sqlmock//:go-sqlmock",
"@com_github_stretchr_testify//require",
diff --git a/br/pkg/lightning/errormanager/errormanager.go b/br/pkg/lightning/errormanager/errormanager.go
index 965191373688e..b900d19c3a431 100644
--- a/br/pkg/lightning/errormanager/errormanager.go
+++ b/br/pkg/lightning/errormanager/errormanager.go
@@ -122,6 +122,7 @@ type ErrorManager struct {
configError *config.MaxError
remainingError config.MaxError
dupResolution config.DuplicateResolutionAlgorithm
+ logger log.Logger
}
func (em *ErrorManager) TypeErrorsRemain() int64 {
@@ -129,12 +130,13 @@ func (em *ErrorManager) TypeErrorsRemain() int64 {
}
// New creates a new error manager.
-func New(db *sql.DB, cfg *config.Config) *ErrorManager {
+func New(db *sql.DB, cfg *config.Config, logger log.Logger) *ErrorManager {
em := &ErrorManager{
taskID: cfg.TaskID,
configError: &cfg.App.MaxError,
remainingError: cfg.App.MaxError,
dupResolution: cfg.TikvImporter.DuplicateResolution,
+ logger: logger,
}
if len(cfg.App.TaskInfoSchemaName) != 0 {
em.db = db
@@ -151,7 +153,7 @@ func (em *ErrorManager) Init(ctx context.Context) error {
exec := common.SQLWithRetry{
DB: em.db,
- Logger: log.L(),
+ Logger: em.logger,
}
sqls := make([][2]string, 0)
@@ -351,6 +353,7 @@ func (em *ErrorManager) ResolveAllConflictKeys(
go func() {
//nolint:staticcheck
+ //lint:ignore SA2000
taskWg.Add(1)
taskCh <- [2]int64{0, math.MaxInt64}
taskWg.Wait()
@@ -456,17 +459,17 @@ func (em *ErrorManager) LogErrorDetails() {
cnt, errType, em.fmtTableName(tblName))
}
if errCnt := em.typeErrors(); errCnt > 0 {
- log.L().Warn(fmtErrMsg(errCnt, "data type", typeErrorTableName))
+ em.logger.Warn(fmtErrMsg(errCnt, "data type", typeErrorTableName))
}
if errCnt := em.syntaxError(); errCnt > 0 {
- log.L().Warn(fmtErrMsg(errCnt, "data type", syntaxErrorTableName))
+ em.logger.Warn(fmtErrMsg(errCnt, "data type", syntaxErrorTableName))
}
if errCnt := em.charsetError(); errCnt > 0 {
// TODO: add charset table name
- log.L().Warn(fmtErrMsg(errCnt, "data type", ""))
+ em.logger.Warn(fmtErrMsg(errCnt, "data type", ""))
}
if errCnt := em.conflictError(); errCnt > 0 {
- log.L().Warn(fmtErrMsg(errCnt, "data type", conflictErrorTableName))
+ em.logger.Warn(fmtErrMsg(errCnt, "data type", conflictErrorTableName))
}
}
diff --git a/br/pkg/lightning/errormanager/errormanager_test.go b/br/pkg/lightning/errormanager/errormanager_test.go
index 63446cdd4c573..38f81b51f0299 100644
--- a/br/pkg/lightning/errormanager/errormanager_test.go
+++ b/br/pkg/lightning/errormanager/errormanager_test.go
@@ -25,6 +25,7 @@ import (
"testing"
"github.com/DATA-DOG/go-sqlmock"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
@@ -41,7 +42,7 @@ func TestInit(t *testing.T) {
cfg.App.MaxError.Type.Store(10)
cfg.App.TaskInfoSchemaName = "lightning_errors"
- em := New(db, cfg)
+ em := New(db, cfg, log.L())
require.Equal(t, cfg.TikvImporter.DuplicateResolution, em.dupResolution)
require.Equal(t, cfg.App.MaxError.Type.Load(), em.remainingError.Type.Load())
require.Equal(t, cfg.App.MaxError.Conflict.Load(), em.remainingError.Conflict.Load())
@@ -162,7 +163,7 @@ func TestResolveAllConflictKeys(t *testing.T) {
cfg := config.NewConfig()
cfg.TikvImporter.DuplicateResolution = config.DupeResAlgRemove
cfg.App.TaskInfoSchemaName = "lightning_errors"
- em := New(db, cfg)
+ em := New(db, cfg, log.L())
ctx := context.Background()
err = em.Init(ctx)
require.NoError(t, err)
diff --git a/br/pkg/lightning/lightning.go b/br/pkg/lightning/lightning.go
index 5cf3df9389d21..111b7c93b59b4 100644
--- a/br/pkg/lightning/lightning.go
+++ b/br/pkg/lightning/lightning.go
@@ -266,6 +266,7 @@ func (l *Lightning) RunOnce(taskCtx context.Context, taskCfg *config.Config, glu
glue: glue,
promFactory: l.promFactory,
promRegistry: l.promRegistry,
+ logger: log.L(),
}
return l.run(taskCtx, taskCfg, o)
}
@@ -287,6 +288,7 @@ func (l *Lightning) RunServer() error {
o := &options{
promFactory: l.promFactory,
promRegistry: l.promRegistry,
+ logger: log.L(),
}
err = l.run(context.Background(), task, o)
if err != nil && !common.IsContextCanceledError(err) {
@@ -310,6 +312,7 @@ func (l *Lightning) RunOnceWithOptions(taskCtx context.Context, taskCfg *config.
o := &options{
promFactory: l.promFactory,
promRegistry: l.promRegistry,
+ logger: log.L(),
}
for _, opt := range opts {
opt(o)
@@ -357,7 +360,7 @@ var (
func (l *Lightning) run(taskCtx context.Context, taskCfg *config.Config, o *options) (err error) {
build.LogInfo(build.Lightning)
- log.L().Info("cfg", zap.Stringer("cfg", taskCfg))
+ o.logger.Info("cfg", zap.Stringer("cfg", taskCfg))
utils.LogEnvVariables()
@@ -368,6 +371,7 @@ func (l *Lightning) run(taskCtx context.Context, taskCfg *config.Config, o *opti
}()
ctx := metric.NewContext(taskCtx, metrics)
+ ctx = log.NewContext(ctx, o.logger)
ctx, cancel := context.WithCancel(ctx)
l.cancelLock.Lock()
l.cancel = cancel
@@ -457,7 +461,7 @@ func (l *Lightning) run(taskCtx context.Context, taskCfg *config.Config, o *opti
return common.NormalizeOrWrapErr(common.ErrStorageUnknown, walkErr)
}
- loadTask := log.L().Begin(zap.InfoLevel, "load data source")
+ loadTask := o.logger.Begin(zap.InfoLevel, "load data source")
var mdl *mydump.MDLoader
mdl, err = mydump.NewMyDumpLoaderWithStore(ctx, taskCfg, s)
loadTask.End(zap.ErrorLevel, err)
@@ -466,13 +470,13 @@ func (l *Lightning) run(taskCtx context.Context, taskCfg *config.Config, o *opti
}
err = checkSystemRequirement(taskCfg, mdl.GetDatabases())
if err != nil {
- log.L().Error("check system requirements failed", zap.Error(err))
+ o.logger.Error("check system requirements failed", zap.Error(err))
return common.ErrSystemRequirementNotMet.Wrap(err).GenWithStackByArgs()
}
// check table schema conflicts
err = checkSchemaConflict(taskCfg, mdl.GetDatabases())
if err != nil {
- log.L().Error("checkpoint schema conflicts with data files", zap.Error(err))
+ o.logger.Error("checkpoint schema conflicts with data files", zap.Error(err))
return errors.Trace(err)
}
@@ -493,7 +497,7 @@ func (l *Lightning) run(taskCtx context.Context, taskCfg *config.Config, o *opti
procedure, err = restore.NewRestoreController(ctx, taskCfg, param)
if err != nil {
- log.L().Error("restore failed", log.ShortError(err))
+ o.logger.Error("restore failed", log.ShortError(err))
return errors.Trace(err)
}
defer procedure.Close()
@@ -835,7 +839,9 @@ func handleLogLevel(w http.ResponseWriter, req *http.Request) {
return
}
oldLevel := log.SetLevel(zapcore.InfoLevel)
- log.L().Info("changed log level", zap.Stringer("old", oldLevel), zap.Stringer("new", logLevel.Level))
+ log.L().Info("changed log level. No effects if task has specified its logger",
+ zap.Stringer("old", oldLevel),
+ zap.Stringer("new", logLevel.Level))
log.SetLevel(logLevel.Level)
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("{}"))
@@ -941,7 +947,7 @@ func CleanupMetas(ctx context.Context, cfg *config.Config, tableName string) err
if err != nil || !exist {
return errors.Trace(err)
}
- return errors.Trace(restore.MaybeCleanupAllMetas(ctx, db, cfg.App.MetaSchemaName, tableMetaExist))
+ return errors.Trace(restore.MaybeCleanupAllMetas(ctx, log.L(), db, cfg.App.MetaSchemaName, tableMetaExist))
}
func SwitchMode(ctx context.Context, cfg *config.Config, tls *common.TLS, mode string) error {
diff --git a/br/pkg/lightning/lightning_serial_test.go b/br/pkg/lightning/lightning_serial_test.go
index cf5e43d19435a..398d4e90eec6f 100644
--- a/br/pkg/lightning/lightning_serial_test.go
+++ b/br/pkg/lightning/lightning_serial_test.go
@@ -25,6 +25,7 @@ import (
"github.com/pingcap/tidb/br/pkg/lightning/checkpoints"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/glue"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/stretchr/testify/require"
)
@@ -65,6 +66,7 @@ func TestRun(t *testing.T) {
glue: invalidGlue,
promRegistry: lightning.promRegistry,
promFactory: lightning.promFactory,
+ logger: log.L(),
}
err = lightning.run(ctx, &config.Config{
Mydumper: config.MydumperRuntime{
diff --git a/br/pkg/lightning/log/log.go b/br/pkg/lightning/log/log.go
index aa61023a29cc9..2dab04f925182 100644
--- a/br/pkg/lightning/log/log.go
+++ b/br/pkg/lightning/log/log.go
@@ -230,3 +230,21 @@ func (task *Task) End(level zapcore.Level, err error, extraFields ...zap.Field)
}
return elapsed
}
+
+type ctxKeyType struct{}
+
+var ctxKey ctxKeyType
+
+// NewContext returns a new context with the provided logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, ctxKey, logger)
+}
+
+// FromContext returns the logger stored in the context.
+func FromContext(ctx context.Context) Logger {
+ m, ok := ctx.Value(ctxKey).(Logger)
+ if !ok {
+ return appLogger
+ }
+ return m
+}
diff --git a/br/pkg/lightning/mydump/csv_parser.go b/br/pkg/lightning/mydump/csv_parser.go
index 758092df960b4..5c538269ebdce 100644
--- a/br/pkg/lightning/mydump/csv_parser.go
+++ b/br/pkg/lightning/mydump/csv_parser.go
@@ -22,6 +22,7 @@ import (
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/lightning/config"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/metric"
"github.com/pingcap/tidb/br/pkg/lightning/worker"
"github.com/pingcap/tidb/types"
@@ -123,7 +124,7 @@ func NewCSVParser(
}
metrics, _ := metric.FromContext(ctx)
return &CSVParser{
- blockParser: makeBlockParser(reader, blockBufSize, ioWorkers, metrics),
+ blockParser: makeBlockParser(reader, blockBufSize, ioWorkers, metrics, log.FromContext(ctx)),
cfg: cfg,
charsetConvertor: charsetConvertor,
comma: []byte(separator),
diff --git a/br/pkg/lightning/mydump/loader.go b/br/pkg/lightning/mydump/loader.go
index 09ef6229c45bc..30f4f14c1464c 100644
--- a/br/pkg/lightning/mydump/loader.go
+++ b/br/pkg/lightning/mydump/loader.go
@@ -41,7 +41,7 @@ type MDDatabaseMeta struct {
func (m *MDDatabaseMeta) GetSchema(ctx context.Context, store storage.ExternalStorage) string {
schema, err := ExportStatement(ctx, store, m.SchemaFile, m.charSet)
if err != nil {
- log.L().Warn("failed to extract table schema",
+ log.FromContext(ctx).Warn("failed to extract table schema",
zap.String("Path", m.SchemaFile.FileMeta.Path),
log.ShortError(err),
)
@@ -78,7 +78,7 @@ type SourceFileMeta struct {
func (m *MDTableMeta) GetSchema(ctx context.Context, store storage.ExternalStorage) (string, error) {
schema, err := ExportStatement(ctx, store, m.SchemaFile, m.charSet)
if err != nil {
- log.L().Error("failed to extract table schema",
+ log.FromContext(ctx).Error("failed to extract table schema",
zap.String("Path", m.SchemaFile.FileMeta.Path),
log.ShortError(err),
)
@@ -157,7 +157,7 @@ func NewMyDumpLoaderWithStore(ctx context.Context, cfg *config.Config, store sto
fileRouteRules = append(fileRouteRules, defaultFileRouteRules...)
}
- fileRouter, err := NewFileRouter(fileRouteRules)
+ fileRouter, err := NewFileRouter(fileRouteRules, log.FromContext(ctx))
if err != nil {
return nil, common.ErrInvalidConfig.Wrap(err).GenWithStack("parse file routing rule failed")
}
@@ -300,7 +300,7 @@ func (s *mdLoaderSetup) listFiles(ctx context.Context, store storage.ExternalSto
// meaning the file and chunk orders will be the same everytime it is called
// (as long as the source is immutable).
err := store.WalkDir(ctx, &storage.WalkOption{}, func(path string, size int64) error {
- logger := log.With(zap.String("path", path))
+ logger := log.FromContext(ctx).With(zap.String("path", path))
res, err := s.loader.fileRouter.Route(filepath.ToSlash(path))
if err != nil {
diff --git a/br/pkg/lightning/mydump/parquet_parser.go b/br/pkg/lightning/mydump/parquet_parser.go
index 789163c18bb01..50ae2d9d63960 100644
--- a/br/pkg/lightning/mydump/parquet_parser.go
+++ b/br/pkg/lightning/mydump/parquet_parser.go
@@ -208,7 +208,7 @@ func NewParquetParser(
Reader: reader,
columns: columns,
columnMetas: columnMetas,
- logger: log.L(),
+ logger: log.FromContext(ctx),
}, nil
}
@@ -377,7 +377,7 @@ func (pp *ParquetParser) ReadRow() error {
}
for i := 0; i < length; i++ {
pp.lastRow.Length += getDatumLen(v.Field(i))
- if err := setDatumValue(&pp.lastRow.Row[i], v.Field(i), pp.columnMetas[i]); err != nil {
+ if err := setDatumValue(&pp.lastRow.Row[i], v.Field(i), pp.columnMetas[i], pp.logger); err != nil {
return err
}
}
@@ -401,7 +401,7 @@ func getDatumLen(v reflect.Value) int {
// convert a parquet value to Datum
//
// See: https://github.com/apache/parquet-format/blob/master/LogicalTypes.md
-func setDatumValue(d *types.Datum, v reflect.Value, meta *parquet.SchemaElement) error {
+func setDatumValue(d *types.Datum, v reflect.Value, meta *parquet.SchemaElement, logger log.Logger) error {
switch v.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
d.SetUint64(v.Uint())
@@ -417,10 +417,10 @@ func setDatumValue(d *types.Datum, v reflect.Value, meta *parquet.SchemaElement)
if v.IsNil() {
d.SetNull()
} else {
- return setDatumValue(d, v.Elem(), meta)
+ return setDatumValue(d, v.Elem(), meta, logger)
}
default:
- log.L().Error("unknown value", zap.Stringer("kind", v.Kind()),
+ logger.Error("unknown value", zap.Stringer("kind", v.Kind()),
zap.String("type", v.Type().Name()), zap.Reflect("value", v.Interface()))
return errors.Errorf("unknown value: %v", v)
}
diff --git a/br/pkg/lightning/mydump/parser.go b/br/pkg/lightning/mydump/parser.go
index cf303e903be65..5949016dc74b0 100644
--- a/br/pkg/lightning/mydump/parser.go
+++ b/br/pkg/lightning/mydump/parser.go
@@ -60,13 +60,19 @@ type blockParser struct {
metrics *metric.Metrics
}
-func makeBlockParser(reader ReadSeekCloser, blockBufSize int64, ioWorkers *worker.Pool, metrics *metric.Metrics) blockParser {
+func makeBlockParser(
+ reader ReadSeekCloser,
+ blockBufSize int64,
+ ioWorkers *worker.Pool,
+ metrics *metric.Metrics,
+ logger log.Logger,
+) blockParser {
return blockParser{
reader: MakePooledReader(reader, ioWorkers),
blockBuf: make([]byte, blockBufSize*config.BufferSizeScale),
remainBuf: &bytes.Buffer{},
appendBuf: &bytes.Buffer{},
- Logger: log.L(),
+ Logger: logger,
rowPool: &sync.Pool{
New: func() interface{} {
return make([]types.Datum, 0, 16)
@@ -147,7 +153,7 @@ func NewChunkParser(
}
metrics, _ := metric.FromContext(ctx)
return &ChunkParser{
- blockParser: makeBlockParser(reader, blockBufSize, ioWorkers, metrics),
+ blockParser: makeBlockParser(reader, blockBufSize, ioWorkers, metrics, log.FromContext(ctx)),
escFlavor: escFlavor,
}
}
diff --git a/br/pkg/lightning/mydump/reader.go b/br/pkg/lightning/mydump/reader.go
index 7db6bf2bf8ed5..5ba5cf974ef30 100644
--- a/br/pkg/lightning/mydump/reader.go
+++ b/br/pkg/lightning/mydump/reader.go
@@ -107,7 +107,7 @@ func ExportStatement(ctx context.Context, store storage.ExternalStorage, sqlFile
data, err = decodeCharacterSet(data, characterSet)
if err != nil {
- log.L().Error("cannot decode input file, please convert to target encoding manually",
+ log.FromContext(ctx).Error("cannot decode input file, please convert to target encoding manually",
zap.String("encoding", characterSet),
zap.String("Path", sqlFile.FileMeta.Path),
)
diff --git a/br/pkg/lightning/mydump/region.go b/br/pkg/lightning/mydump/region.go
index b347d27bb9ab8..04cc75e5567ae 100644
--- a/br/pkg/lightning/mydump/region.go
+++ b/br/pkg/lightning/mydump/region.go
@@ -170,7 +170,7 @@ func MakeTableRegions(
break
}
if err != nil {
- log.L().Error("make source file region error", zap.Error(err), zap.String("file_path", info.FileMeta.Path))
+ log.FromContext(ctx).Error("make source file region error", zap.Error(err), zap.String("file_path", info.FileMeta.Path))
break
}
}
@@ -240,7 +240,7 @@ func MakeTableRegions(
}
}
- log.L().Info("makeTableRegions", zap.Int("filesCount", len(meta.DataFiles)),
+ log.FromContext(ctx).Info("makeTableRegions", zap.Int("filesCount", len(meta.DataFiles)),
zap.Int64("MaxRegionSize", int64(cfg.Mydumper.MaxRegionSize)),
zap.Int("RegionsCount", len(filesRegions)),
zap.Float64("BatchSize", batchSize),
@@ -274,10 +274,10 @@ func makeSourceFileRegion(
}
sizePerRow, err := GetSampledAvgRowSize(&fi, cfg, ioWorkers, store)
if err == nil && sizePerRow != 0 {
- log.L().Warn("fail to sample file", zap.String("path", fi.FileMeta.Path), zap.Error(err))
+ log.FromContext(ctx).Warn("fail to sample file", zap.String("path", fi.FileMeta.Path), zap.Error(err))
divisor = sizePerRow
}
- log.L().Debug("avg row size", zap.String("path", fi.FileMeta.Path), zap.Int64("size per row", sizePerRow))
+ log.FromContext(ctx).Debug("avg row size", zap.String("path", fi.FileMeta.Path), zap.Int64("size per row", sizePerRow))
// If a csv file is overlarge, we need to split it into multiple regions.
// Note: We can only split a csv file whose format is strict.
// We increase the check threshold by 1/10 of the `max-region-size` because the source file size dumped by tools
@@ -305,7 +305,7 @@ func makeSourceFileRegion(
})
if tableRegion.Size() > tableRegionSizeWarningThreshold {
- log.L().Warn(
+ log.FromContext(ctx).Warn(
"file is too big to be processed efficiently; we suggest splitting it at 256 MB each",
zap.String("file", fi.FileMeta.Path),
zap.Int64("size", dataFileSize))
@@ -467,7 +467,7 @@ func SplitLargeFile(
if !errors.ErrorEqual(err, io.EOF) {
return 0, nil, nil, err
}
- log.L().Warn("file contains no terminator at end",
+ log.FromContext(ctx).Warn("file contains no terminator at end",
zap.String("path", dataFile.FileMeta.Path),
zap.String("terminator", cfg.Mydumper.CSV.Terminator))
pos = dataFile.FileMeta.FileSize
diff --git a/br/pkg/lightning/mydump/router.go b/br/pkg/lightning/mydump/router.go
index 223d82edb90e9..c3a6ff3aae161 100644
--- a/br/pkg/lightning/mydump/router.go
+++ b/br/pkg/lightning/mydump/router.go
@@ -142,11 +142,11 @@ func (c chainRouters) Route(path string) (*RouteResult, error) {
return nil, nil
}
-func NewFileRouter(cfg []*config.FileRouteRule) (FileRouter, error) {
+func NewFileRouter(cfg []*config.FileRouteRule, logger log.Logger) (FileRouter, error) {
res := make([]FileRouter, 0, len(cfg))
p := regexRouterParser{}
for _, c := range cfg {
- rule, err := p.Parse(c)
+ rule, err := p.Parse(c, logger)
if err != nil {
return nil, err
}
@@ -180,7 +180,7 @@ func (r *RegexRouter) Route(path string) (*RouteResult, error) {
type regexRouterParser struct{}
-func (p regexRouterParser) Parse(r *config.FileRouteRule) (*RegexRouter, error) {
+func (p regexRouterParser) Parse(r *config.FileRouteRule, logger log.Logger) (*RegexRouter, error) {
rule := &RegexRouter{}
if r.Path == "" && r.Pattern == "" {
return nil, errors.New("`path` and `pattern` must not be both empty in [[mydumper.files]]")
@@ -225,7 +225,7 @@ func (p regexRouterParser) Parse(r *config.FileRouteRule) (*RegexRouter, error)
if unescape {
val, err := url.PathUnescape(value)
if err != nil {
- log.L().Warn("unescape string failed, will be ignored", zap.String("value", value),
+ logger.Warn("unescape string failed, will be ignored", zap.String("value", value),
zap.Error(err))
} else {
value = val
diff --git a/br/pkg/lightning/mydump/router_test.go b/br/pkg/lightning/mydump/router_test.go
index 4df92b74f45c5..7401027cfbd36 100644
--- a/br/pkg/lightning/mydump/router_test.go
+++ b/br/pkg/lightning/mydump/router_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/pingcap/tidb/br/pkg/lightning/config"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/util/filter"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -21,7 +22,7 @@ func TestRouteParser(t *testing.T) {
{Pattern: `^(?:[^/]*/)*([^/.]+)\.([^./]+)(?:\.[0-9]+)?\.(csv|sql)`, Schema: "$1-schema", Table: "$1-table", Type: "$2"},
}
for _, r := range rules {
- _, err := NewFileRouter([]*config.FileRouteRule{r})
+ _, err := NewFileRouter([]*config.FileRouteRule{r}, log.L())
assert.NoError(t, err)
}
@@ -32,7 +33,7 @@ func TestRouteParser(t *testing.T) {
{Pattern: `^(?:[^/]*/)*([^/.]+)\.([^./]+)(?:\.[0-9]+)?\.(csv|sql)`, Schema: "$1", Table: "$2", Type: "$3", Key: "$4"},
}
for _, r := range invalidRules {
- _, err := NewFileRouter([]*config.FileRouteRule{r})
+ _, err := NewFileRouter([]*config.FileRouteRule{r}, log.L())
assert.Error(t, err)
}
}
@@ -40,31 +41,31 @@ func TestRouteParser(t *testing.T) {
func TestInvalidRouteRule(t *testing.T) {
rule := &config.FileRouteRule{}
rules := []*config.FileRouteRule{rule}
- _, err := NewFileRouter(rules)
+ _, err := NewFileRouter(rules, log.L())
require.Regexp(t, "`path` and `pattern` must not be both empty in \\[\\[mydumper.files\\]\\]", err.Error())
rule.Pattern = `^(?:[^/]*/)*([^/.]+)\.(?P[^./]+)(?:\.(?P[0-9]+))?\.(?Pcsv|sql)(?:\.(?P[A-Za-z0-9]+))?$`
- _, err = NewFileRouter(rules)
+ _, err = NewFileRouter(rules, log.L())
require.Regexp(t, "field 'type' match pattern can't be empty", err.Error())
rule.Type = "$type"
- _, err = NewFileRouter(rules)
+ _, err = NewFileRouter(rules, log.L())
require.Regexp(t, "field 'schema' match pattern can't be empty", err.Error())
rule.Schema = "$schema"
- _, err = NewFileRouter(rules)
+ _, err = NewFileRouter(rules, log.L())
require.Regexp(t, "invalid named capture '\\$schema'", err.Error())
rule.Schema = "$1"
- _, err = NewFileRouter(rules)
+ _, err = NewFileRouter(rules, log.L())
require.Regexp(t, "field 'table' match pattern can't be empty", err.Error())
rule.Table = "$table"
- _, err = NewFileRouter(rules)
+ _, err = NewFileRouter(rules, log.L())
require.NoError(t, err)
rule.Path = "/tmp/1.sql"
- _, err = NewFileRouter(rules)
+ _, err = NewFileRouter(rules, log.L())
require.Regexp(t, "can't set both `path` and `pattern` field in \\[\\[mydumper.files\\]\\]", err.Error())
}
@@ -73,7 +74,7 @@ func TestSingleRouteRule(t *testing.T) {
{Pattern: `^(?:[^/]*/)*([^/.]+)\.(?P[^./]+)(?:\.(?P[0-9]+))?\.(?Pcsv|sql)(?:\.(?P[A-Za-z0-9]+))?$`, Schema: "$1", Table: "$table", Type: "$type", Key: "$key", Compression: "$cp"},
}
- r, err := NewFileRouter(rules)
+ r, err := NewFileRouter(rules, log.L())
require.NoError(t, err)
inputOutputMap := map[string][]string{
@@ -107,7 +108,7 @@ func TestSingleRouteRule(t *testing.T) {
}
rule := &config.FileRouteRule{Pattern: `^(?:[^/]*/)*([^/.]+)\.(?P[^./]+)(?:\.(?P[0-9]+))?\.(?P\w+)(?:\.(?P[A-Za-z0-9]+))?$`, Schema: "$1", Table: "$table", Type: "$type", Key: "$key", Compression: "$cp"}
- r, err = NewFileRouter([]*config.FileRouteRule{rule})
+ r, err = NewFileRouter([]*config.FileRouteRule{rule}, log.L())
require.NoError(t, err)
require.NotNil(t, r)
invalidMatchPaths := []string{
@@ -131,7 +132,7 @@ func TestMultiRouteRule(t *testing.T) {
{Pattern: `^(?:[^/]*/)*(?P[^/.]+)\.(?P[^./]+)(?:\.(?P[0-9]+))?\.(?Pcsv|sql)(?:\.(?P[A-Za-z0-9]+))?$`, Schema: "$schema", Table: "$table", Type: "$type", Key: "$key", Compression: "$cp"},
}
- r, err := NewFileRouter(rules)
+ r, err := NewFileRouter(rules, log.L())
require.NoError(t, err)
inputOutputMap := map[string][]string{
@@ -163,7 +164,7 @@ func TestMultiRouteRule(t *testing.T) {
// add another rule that match same pattern with the third rule, the result should be no different
p := &config.FileRouteRule{Pattern: `^(?P[^/.]+)\.(?P[^./]+)(?:\.(?P[0-9]+))?\.(?Pcsv|sql)(?:\.(?P[A-Za-z0-9]+))?$`, Schema: "test_schema", Table: "test_table", Type: "$type", Key: "$key", Compression: "$cp"}
rules = append(rules, p)
- r, err = NewFileRouter(rules)
+ r, err = NewFileRouter(rules, log.L())
require.NoError(t, err)
for path, fields := range inputOutputMap {
res, err := r.Route(path)
@@ -209,7 +210,7 @@ func TestRouteExpanding(t *testing.T) {
for pat, value := range tablePatternResMap {
rule.Table = pat
- router, err := NewFileRouter([]*config.FileRouteRule{rule})
+ router, err := NewFileRouter([]*config.FileRouteRule{rule}, log.L())
assert.NoError(t, err)
res, err := router.Route(path)
assert.NoError(t, err)
@@ -220,7 +221,7 @@ func TestRouteExpanding(t *testing.T) {
invalidPatterns := []string{"$1_$schema", "$schema_$table_name", "$6"}
for _, pat := range invalidPatterns {
rule.Table = pat
- _, err := NewFileRouter([]*config.FileRouteRule{rule})
+ _, err := NewFileRouter([]*config.FileRouteRule{rule}, log.L())
assert.Error(t, err)
}
}
@@ -235,7 +236,7 @@ func TestRouteWithPath(t *testing.T) {
Key: "$key",
}
r := *rule
- router, err := NewFileRouter([]*config.FileRouteRule{&r})
+ router, err := NewFileRouter([]*config.FileRouteRule{&r}, log.L())
require.NoError(t, err)
res, err := router.Route(fileName)
require.NoError(t, err)
diff --git a/br/pkg/lightning/restore/check_info.go b/br/pkg/lightning/restore/check_info.go
index 92ff3d2a5385c..442fae5a3e18b 100644
--- a/br/pkg/lightning/restore/check_info.go
+++ b/br/pkg/lightning/restore/check_info.go
@@ -402,7 +402,7 @@ func (rc *Controller) estimateSourceData(ctx context.Context) (int64, error) {
bigTableCount := 0
tableCount := 0
unSortedTableCount := 0
- errMgr := errormanager.New(nil, rc.cfg)
+ errMgr := errormanager.New(nil, rc.cfg, log.FromContext(ctx))
for _, db := range rc.dbMetas {
info, ok := rc.dbInfos[db.Name]
if !ok {
@@ -455,7 +455,7 @@ func (rc *Controller) estimateSourceData(ctx context.Context) (int64, error) {
}
// localResource checks the local node has enough resources for this import when local backend enabled;
-func (rc *Controller) localResource(sourceSize int64) error {
+func (rc *Controller) localResource(ctx context.Context, sourceSize int64) error {
if rc.isSourceInLocal() {
sourceDir := strings.TrimPrefix(rc.cfg.Mydumper.SourceDir, storage.LocalURIPrefix)
same, err := common.SameDisk(sourceDir, rc.cfg.TikvImporter.SortedKVDir)
@@ -489,7 +489,7 @@ func (rc *Controller) localResource(sourceSize int64) error {
units.BytesSize(float64(sourceSize)),
units.BytesSize(float64(localAvailable)), units.BytesSize(float64(localAvailable)))
passed = false
- log.L().Error(message)
+ log.FromContext(ctx).Error(message)
default:
message = fmt.Sprintf("local disk space may not enough to finish import, "+
"estimate sorted data size is %s, but local available is %s,"+
@@ -497,7 +497,7 @@ func (rc *Controller) localResource(sourceSize int64) error {
units.BytesSize(float64(sourceSize)),
units.BytesSize(float64(localAvailable)), units.BytesSize(float64(rc.cfg.TikvImporter.DiskQuota)))
passed = true
- log.L().Warn(message)
+ log.FromContext(ctx).Warn(message)
}
rc.checkTemplate.Collect(Critical, passed, message)
return nil
@@ -510,7 +510,7 @@ func (rc *Controller) CheckpointIsValid(ctx context.Context, tableInfo *mydump.M
tableCheckPoint, err := rc.checkpointsDB.Get(ctx, uniqueName)
if err != nil {
// there is no checkpoint
- log.L().Debug("no checkpoint detected", zap.String("table", uniqueName))
+ log.FromContext(ctx).Debug("no checkpoint detected", zap.String("table", uniqueName))
return nil, true
}
// if checkpoint enable and not missing, we skip the check table empty progress.
@@ -570,12 +570,12 @@ func (rc *Controller) CheckpointIsValid(ctx context.Context, tableInfo *mydump.M
}
}
if len(columns) == 0 {
- log.L().Debug("no valid checkpoint detected", zap.String("table", uniqueName))
+ log.FromContext(ctx).Debug("no valid checkpoint detected", zap.String("table", uniqueName))
return nil, false
}
info := rc.dbInfos[tableInfo.DB].Tables[tableInfo.Name]
if info != nil {
- permFromTiDB, err := parseColumnPermutations(info.Core, columns, nil)
+ permFromTiDB, err := parseColumnPermutations(info.Core, columns, nil, log.FromContext(ctx))
if err != nil {
msgs = append(msgs, fmt.Sprintf("failed to calculate columns %s, table %s's info has changed,"+
"consider remove this checkpoint, and start import again.", err.Error(), uniqueName))
@@ -641,7 +641,7 @@ func (rc *Controller) readFirstRow(ctx context.Context, dataFileMeta mydump.Sour
// SchemaIsValid checks the import file and cluster schema is match.
func (rc *Controller) SchemaIsValid(ctx context.Context, tableInfo *mydump.MDTableMeta) ([]string, error) {
if len(tableInfo.DataFiles) == 0 {
- log.L().Info("no data files detected", zap.String("db", tableInfo.DB), zap.String("table", tableInfo.Name))
+ log.FromContext(ctx).Info("no data files detected", zap.String("db", tableInfo.DB), zap.String("table", tableInfo.Name))
return nil, nil
}
@@ -673,7 +673,7 @@ func (rc *Controller) SchemaIsValid(ctx context.Context, tableInfo *mydump.MDTab
// only check the first file of this table.
dataFile := tableInfo.DataFiles[0]
- log.L().Info("datafile to check", zap.String("db", tableInfo.DB),
+ log.FromContext(ctx).Info("datafile to check", zap.String("db", tableInfo.DB),
zap.String("table", tableInfo.Name), zap.String("path", dataFile.FileMeta.Path))
// get columns name from data file.
dataFileMeta := dataFile.FileMeta
@@ -687,7 +687,7 @@ func (rc *Controller) SchemaIsValid(ctx context.Context, tableInfo *mydump.MDTab
return nil, errors.Trace(err)
}
if colsFromDataFile == nil && len(row) == 0 {
- log.L().Info("file contains no data, skip checking against schema validity", zap.String("path", dataFileMeta.Path))
+ log.FromContext(ctx).Info("file contains no data, skip checking against schema validity", zap.String("path", dataFileMeta.Path))
return msgs, nil
}
@@ -898,7 +898,7 @@ outer:
level := Warn
if hasUniqueField && len(rows) > 1 {
level = Critical
- } else if !checkFieldCompatibility(tableInfo.Core, ignoreColsSet, rows[0]) {
+ } else if !checkFieldCompatibility(tableInfo.Core, ignoreColsSet, rows[0], log.FromContext(ctx)) {
// if there are only 1 csv file or there is not unique key, try to check if all columns are compatible with string value
level = Critical
}
@@ -907,10 +907,15 @@ outer:
return nil
}
-func checkFieldCompatibility(tbl *model.TableInfo, ignoreCols map[string]struct{}, values []types.Datum) bool {
+func checkFieldCompatibility(
+ tbl *model.TableInfo,
+ ignoreCols map[string]struct{},
+ values []types.Datum,
+ logger log.Logger,
+) bool {
se := kv.NewSession(&kv.SessionOptions{
SQLMode: mysql.ModeStrictTransTables,
- })
+ }, logger)
for i, col := range tbl.Columns {
// do not check ignored columns
if _, ok := ignoreCols[col.Name.L]; ok {
@@ -921,7 +926,7 @@ func checkFieldCompatibility(tbl *model.TableInfo, ignoreCols map[string]struct{
}
_, err := table.CastValue(se, values[i], col, true, false)
if err != nil {
- log.L().Error("field value is not consistent with column type", zap.String("value", values[i].GetString()),
+ logger.Error("field value is not consistent with column type", zap.String("value", values[i].GetString()),
zap.Any("column_info", col), zap.Error(err))
return false
}
@@ -956,7 +961,7 @@ func (rc *Controller) sampleDataFromTable(
if err != nil {
return errors.Trace(err)
}
- kvEncoder, err := rc.backend.NewEncoder(tbl, &kv.SessionOptions{
+ kvEncoder, err := rc.backend.NewEncoder(ctx, tbl, &kv.SessionOptions{
SQLMode: rc.cfg.TiDB.SQLMode,
Timestamp: 0,
SysVars: rc.sysVars,
@@ -991,7 +996,7 @@ func (rc *Controller) sampleDataFromTable(
panic(fmt.Sprintf("file '%s' with unknown source type '%s'", sampleFile.Path, sampleFile.Type.String()))
}
defer parser.Close()
- logTask := log.With(zap.String("table", tableMeta.Name)).Begin(zap.InfoLevel, "sample file")
+ logTask := log.FromContext(ctx).With(zap.String("table", tableMeta.Name)).Begin(zap.InfoLevel, "sample file")
igCols, err := rc.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(dbName, tableMeta.Name, rc.cfg.Mydumper.CaseSensitive)
if err != nil {
return errors.Trace(err)
@@ -1017,7 +1022,11 @@ outloop:
case nil:
if !initializedColumns {
if len(columnPermutation) == 0 {
- columnPermutation, err = createColumnPermutation(columnNames, igCols.ColumnsMap(), tableInfo)
+ columnPermutation, err = createColumnPermutation(
+ columnNames,
+ igCols.ColumnsMap(),
+ tableInfo,
+ log.FromContext(ctx))
if err != nil {
return errors.Trace(err)
}
@@ -1036,7 +1045,7 @@ outloop:
var dataChecksum, indexChecksum verification.KVChecksum
kvs, encodeErr := kvEncoder.Encode(logTask.Logger, lastRow.Row, lastRow.RowID, columnPermutation, sampleFile.Path, offset)
if encodeErr != nil {
- encodeErr = errMgr.RecordTypeError(ctx, log.L(), tableInfo.Name.O, sampleFile.Path, offset,
+ encodeErr = errMgr.RecordTypeError(ctx, log.FromContext(ctx), tableInfo.Name.O, sampleFile.Path, offset,
"" /* use a empty string here because we don't actually record */, encodeErr)
if encodeErr != nil {
return errors.Annotatef(encodeErr, "in file at offset %d", offset)
@@ -1075,7 +1084,7 @@ outloop:
if rowSize > 0 && kvSize > rowSize {
tableMeta.IndexRatio = float64(kvSize) / float64(rowSize)
}
- log.L().Info("Sample source data", zap.String("table", tableMeta.Name), zap.Float64("IndexRatio", tableMeta.IndexRatio), zap.Bool("IsSourceOrder", tableMeta.IsRowOrdered))
+ log.FromContext(ctx).Info("Sample source data", zap.String("table", tableMeta.Name), zap.Float64("IndexRatio", tableMeta.IndexRatio), zap.Bool("IsSourceOrder", tableMeta.IsRowOrdered))
return nil
}
@@ -1159,7 +1168,7 @@ func tableContainsData(ctx context.Context, db utils.DBExecutor, tableName strin
query := "select 1 from " + tableName + " limit 1"
exec := common.SQLWithRetry{
DB: db,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
var dump int
err := exec.QueryRow(ctx, "check table empty", query, &dump)
diff --git a/br/pkg/lightning/restore/check_info_test.go b/br/pkg/lightning/restore/check_info_test.go
index 42772b64ea6ab..abdfcf232f0a9 100644
--- a/br/pkg/lightning/restore/check_info_test.go
+++ b/br/pkg/lightning/restore/check_info_test.go
@@ -583,9 +583,11 @@ func TestLocalResource(t *testing.T) {
ioWorkers: worker.NewPool(context.Background(), 1, "io"),
}
+ ctx := context.Background()
+
// 1. source-size is smaller than disk-size, won't trigger error information
rc.checkTemplate = NewSimpleTemplate()
- err = rc.localResource(1000)
+ err = rc.localResource(ctx, 1000)
require.NoError(t, err)
tmpl := rc.checkTemplate.(*SimpleTemplate)
require.Equal(t, 1, tmpl.warnFailedCount)
@@ -594,7 +596,7 @@ func TestLocalResource(t *testing.T) {
// 2. source-size is bigger than disk-size, with default disk-quota will trigger a critical error
rc.checkTemplate = NewSimpleTemplate()
- err = rc.localResource(4096)
+ err = rc.localResource(ctx, 4096)
require.NoError(t, err)
tmpl = rc.checkTemplate.(*SimpleTemplate)
require.Equal(t, 1, tmpl.warnFailedCount)
@@ -604,7 +606,7 @@ func TestLocalResource(t *testing.T) {
// 3. source-size is bigger than disk-size, with a vaild disk-quota will trigger a warning
rc.checkTemplate = NewSimpleTemplate()
rc.cfg.TikvImporter.DiskQuota = config.ByteSize(1024)
- err = rc.localResource(4096)
+ err = rc.localResource(ctx, 4096)
require.NoError(t, err)
tmpl = rc.checkTemplate.(*SimpleTemplate)
require.Equal(t, 1, tmpl.warnFailedCount)
diff --git a/br/pkg/lightning/restore/checksum.go b/br/pkg/lightning/restore/checksum.go
index 4a537c10f626a..20fc77462c196 100644
--- a/br/pkg/lightning/restore/checksum.go
+++ b/br/pkg/lightning/restore/checksum.go
@@ -138,7 +138,7 @@ func (e *tidbChecksumExecutor) Checksum(ctx context.Context, tableInfo *checkpoi
tableName := common.UniqueTable(tableInfo.DB, tableInfo.Name)
- task := log.With(zap.String("table", tableName)).Begin(zap.InfoLevel, "remote checksum")
+ task := log.FromContext(ctx).With(zap.String("table", tableName)).Begin(zap.InfoLevel, "remote checksum")
// ADMIN CHECKSUM TABLE , example.
// mysql> admin checksum table test.t;
@@ -171,7 +171,7 @@ func DoChecksum(ctx context.Context, table *checkpoints.TidbTableInfo) (*RemoteC
return nil, errors.New("No gcLifeTimeManager found in context, check context initialization")
}
- task := log.With(zap.String("table", table.Name)).Begin(zap.InfoLevel, "remote checksum")
+ task := log.FromContext(ctx).With(zap.String("table", table.Name)).Begin(zap.InfoLevel, "remote checksum")
cs, err := manager.Checksum(ctx, table)
dur := task.End(zap.ErrorLevel, err)
@@ -232,7 +232,7 @@ func (m *gcLifeTimeManager) removeOneJob(ctx context.Context, db *sql.DB) {
"UPDATE mysql.tidb SET VARIABLE_VALUE = '%s' WHERE VARIABLE_NAME = 'tikv_gc_life_time'",
m.oriGCLifeTime,
)
- log.L().Warn("revert GC lifetime failed, please reset the GC lifetime manually after Lightning completed",
+ log.FromContext(ctx).Warn("revert GC lifetime failed, please reset the GC lifetime manually after Lightning completed",
zap.String("query", query),
log.ShortError(err),
)
@@ -309,7 +309,7 @@ func (e *tikvChecksumManager) checksumDB(ctx context.Context, tableInfo *checkpo
}, nil
}
- log.L().Warn("remote checksum failed", zap.String("db", tableInfo.DB),
+ log.FromContext(ctx).Warn("remote checksum failed", zap.String("db", tableInfo.DB),
zap.String("table", tableInfo.Name), zap.Error(err),
zap.Int("concurrency", distSQLScanConcurrency), zap.Int("retry", i))
@@ -442,7 +442,7 @@ func (m *gcTTLManager) updateGCTTL(ctx context.Context) error {
}
func (m *gcTTLManager) doUpdateGCTTL(ctx context.Context, ts uint64) error {
- log.L().Debug("update PD safePoint limit with TTL",
+ log.FromContext(ctx).Debug("update PD safePoint limit with TTL",
zap.Uint64("currnet_ts", ts))
var err error
if ts > 0 {
@@ -460,7 +460,7 @@ func (m *gcTTLManager) start(ctx context.Context) {
updateGCTTL := func() {
if err := m.updateGCTTL(ctx); err != nil {
- log.L().Warn("failed to update service safe point, checksum may fail if gc triggered", zap.Error(err))
+ log.FromContext(ctx).Warn("failed to update service safe point, checksum may fail if gc triggered", zap.Error(err))
}
}
@@ -471,7 +471,7 @@ func (m *gcTTLManager) start(ctx context.Context) {
for {
select {
case <-ctx.Done():
- log.L().Info("service safe point keeper exited")
+ log.FromContext(ctx).Info("service safe point keeper exited")
return
case <-updateTick.C:
updateGCTTL()
diff --git a/br/pkg/lightning/restore/chunk_restore_test.go b/br/pkg/lightning/restore/chunk_restore_test.go
index 84b0560544896..59d083d85561c 100644
--- a/br/pkg/lightning/restore/chunk_restore_test.go
+++ b/br/pkg/lightning/restore/chunk_restore_test.go
@@ -253,7 +253,7 @@ func (s *chunkRestoreSuite) TestEncodeLoop() {
kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 1234567895,
- }, nil)
+ }, nil, log.L())
require.NoError(s.T(), err)
cfg := config.NewConfig()
rc := &Controller{pauser: DeliverPauser, cfg: cfg}
@@ -280,7 +280,7 @@ func (s *chunkRestoreSuite) TestEncodeLoopCanceled() {
kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 1234567896,
- }, nil)
+ }, nil, log.L())
require.NoError(s.T(), err)
go cancel()
@@ -298,7 +298,7 @@ func (s *chunkRestoreSuite) TestEncodeLoopForcedError() {
kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 1234567897,
- }, nil)
+ }, nil, log.L())
require.NoError(s.T(), err)
// close the chunk so reading it will result in the "file already closed" error.
@@ -318,7 +318,7 @@ func (s *chunkRestoreSuite) TestEncodeLoopDeliverLimit() {
kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 1234567898,
- }, nil)
+ }, nil, log.L())
require.NoError(s.T(), err)
dir := s.T().TempDir()
@@ -375,7 +375,7 @@ func (s *chunkRestoreSuite) TestEncodeLoopDeliverErrored() {
kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 1234567898,
- }, nil)
+ }, nil, log.L())
require.NoError(s.T(), err)
go func() {
@@ -402,7 +402,8 @@ func (s *chunkRestoreSuite) TestEncodeLoopColumnsMismatch() {
ctx := context.Background()
cfg := config.NewConfig()
- errorMgr := errormanager.New(nil, cfg)
+ logger := log.L()
+ errorMgr := errormanager.New(nil, cfg, logger)
rc := &Controller{pauser: DeliverPauser, cfg: cfg, errorMgr: errorMgr}
reader, err := store.Open(ctx, fileName)
@@ -417,7 +418,8 @@ func (s *chunkRestoreSuite) TestEncodeLoopColumnsMismatch() {
kvsCh := make(chan []deliveredKVs, 2)
deliverCompleteCh := make(chan deliverResult)
- kvEncoder, err := tidb.NewTiDBBackend(nil, config.ReplaceOnDup, errorMgr).NewEncoder(
+ kvEncoder, err := tidb.NewTiDBBackend(ctx, nil, config.ReplaceOnDup, errorMgr).NewEncoder(
+ ctx,
s.tr.encTable,
&kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
@@ -511,7 +513,13 @@ func (s *chunkRestoreSuite) testEncodeLoopIgnoreColumnsCSV(
kvsCh := make(chan []deliveredKVs, 2)
deliverCompleteCh := make(chan deliverResult)
- kvEncoder, err := tidb.NewTiDBBackend(nil, config.ReplaceOnDup, errormanager.New(nil, config.NewConfig())).NewEncoder(
+ kvEncoder, err := tidb.NewTiDBBackend(
+ ctx,
+ nil,
+ config.ReplaceOnDup,
+ errormanager.New(nil, config.NewConfig(), log.L()),
+ ).NewEncoder(
+ ctx,
s.tr.encTable,
&kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
@@ -558,7 +566,7 @@ func (s *chunkRestoreSuite) TestRestore() {
mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).Times(1)
mockWriter := mock.NewMockEngineWriter(controller)
mockBackend.EXPECT().LocalWriter(ctx, gomock.Any(), gomock.Any()).Return(mockWriter, nil).AnyTimes()
- mockBackend.EXPECT().NewEncoder(gomock.Any(), gomock.Any()).Return(mockEncoder{}, nil).Times(1)
+ mockBackend.EXPECT().NewEncoder(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockEncoder{}, nil).Times(1)
mockWriter.EXPECT().IsSynced().Return(true).AnyTimes()
mockWriter.EXPECT().AppendRows(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
diff --git a/br/pkg/lightning/restore/meta_manager.go b/br/pkg/lightning/restore/meta_manager.go
index 3476eace1d292..0af04e69feedb 100644
--- a/br/pkg/lightning/restore/meta_manager.go
+++ b/br/pkg/lightning/restore/meta_manager.go
@@ -37,7 +37,7 @@ type dbMetaMgrBuilder struct {
func (b *dbMetaMgrBuilder) Init(ctx context.Context) error {
exec := common.SQLWithRetry{
DB: b.db,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
HideQueryLog: redact.NeedRedact(),
}
metaDBSQL := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", common.EscapeIdentifier(b.schema))
@@ -382,10 +382,10 @@ func (m *dbTableMetaMgr) AllocTableRowIDs(ctx context.Context, rawRowIDMax int64
ck := verify.MakeKVChecksum(baseTotalBytes, baseTotalKvs, baseChecksum)
checksum = &ck
}
- log.L().Info("allocate table row_id base", zap.String("table", m.tr.tableName),
+ log.FromContext(ctx).Info("allocate table row_id base", zap.String("table", m.tr.tableName),
zap.Int64("row_id_base", newRowIDBase))
if checksum != nil {
- log.L().Info("checksum base", zap.Any("checksum", checksum))
+ log.FromContext(ctx).Info("checksum base", zap.Any("checksum", checksum))
}
return checksum, newRowIDBase, nil
}
@@ -519,7 +519,7 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks
ck := verify.MakeKVChecksum(totalBytes, totalKvs, totalChecksum)
baseTotalChecksum = &ck
}
- log.L().Info("check table checksum", zap.String("table", m.tr.tableName),
+ log.FromContext(ctx).Info("check table checksum", zap.String("table", m.tr.tableName),
zap.Bool("checksum", needChecksum), zap.String("new_status", newStatus.String()))
return
}
@@ -536,7 +536,7 @@ func (m *dbTableMetaMgr) FinishTable(ctx context.Context) error {
func RemoveTableMetaByTableName(ctx context.Context, db *sql.DB, metaTable, tableName string) error {
exec := &common.SQLWithRetry{
DB: db,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
query := fmt.Sprintf("DELETE FROM %s", metaTable)
var args []interface{}
@@ -636,7 +636,7 @@ type storedCfgs struct {
func (m *dbTaskMetaMgr) InitTask(ctx context.Context, source int64) error {
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
// avoid override existing metadata if the meta is already inserted.
stmt := fmt.Sprintf(`INSERT INTO %s (task_id, status, source_bytes) values (?, ?, ?) ON DUPLICATE KEY UPDATE state = ?`, m.tableName)
@@ -647,7 +647,7 @@ func (m *dbTaskMetaMgr) InitTask(ctx context.Context, source int64) error {
func (m *dbTaskMetaMgr) CheckTaskExist(ctx context.Context) (bool, error) {
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
// avoid override existing metadata if the meta is already inserted.
exist := false
@@ -689,7 +689,7 @@ func (m *dbTaskMetaMgr) CheckTasksExclusively(ctx context.Context, action func(t
defer conn.Close()
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
err = exec.Exec(ctx, "enable pessimistic transaction", "SET SESSION tidb_txn_mode = 'pessimistic';")
if err != nil {
@@ -747,7 +747,7 @@ func (m *dbTaskMetaMgr) CheckAndPausePdSchedulers(ctx context.Context) (pdutil.U
defer conn.Close()
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
err = exec.Exec(ctx, "enable pessimistic transaction", "SET SESSION tidb_txn_mode = 'pessimistic';")
if err != nil {
@@ -830,7 +830,7 @@ func (m *dbTaskMetaMgr) CheckAndPausePdSchedulers(ctx context.Context) (pdutil.U
// try to rollback the stopped schedulers
cancelFunc := m.pd.MakeUndoFunctionByConfig(pausedCfg.RestoreCfg)
if err1 := cancelFunc(ctx); err1 != nil {
- log.L().Warn("undo remove schedulers failed", zap.Error(err1))
+ log.FromContext(ctx).Warn("undo remove schedulers failed", zap.Error(err1))
}
return errors.Trace(err)
}
@@ -878,7 +878,7 @@ func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool
defer conn.Close()
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
err = exec.Exec(ctx, "enable pessimistic transaction", "SET SESSION tidb_txn_mode = 'pessimistic';")
if err != nil {
@@ -923,7 +923,7 @@ func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool
allFinished = false
// check if other task still running
if state == taskStateNormal {
- log.L().Info("unfinished task found", zap.Int64("task_id", taskID),
+ log.FromContext(ctx).Info("unfinished task found", zap.Int64("task_id", taskID),
zap.Stringer("status", status))
switchBack = false
}
@@ -957,7 +957,7 @@ func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool
return nil
})
- log.L().Info("check all task finish status", zap.Bool("task_finished", finished),
+ log.FromContext(ctx).Info("check all task finish status", zap.Bool("task_finished", finished),
zap.Bool("all_finished", allFinished), zap.Bool("switch_back", switchBack))
return switchBack, allFinished, err
@@ -966,7 +966,7 @@ func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool
func (m *dbTaskMetaMgr) Cleanup(ctx context.Context) error {
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
// avoid override existing metadata if the meta is already inserted.
stmt := fmt.Sprintf("DROP TABLE %s;", m.tableName)
@@ -979,7 +979,7 @@ func (m *dbTaskMetaMgr) Cleanup(ctx context.Context) error {
func (m *dbTaskMetaMgr) CleanupTask(ctx context.Context) error {
exec := &common.SQLWithRetry{
DB: m.session,
- Logger: log.L(),
+ Logger: log.FromContext(ctx),
}
stmt := fmt.Sprintf("DELETE FROM %s WHERE task_id = %d;", m.tableName, m.taskID)
err := exec.Exec(ctx, "clean up task", stmt)
@@ -991,14 +991,20 @@ func (m *dbTaskMetaMgr) Close() {
}
func (m *dbTaskMetaMgr) CleanupAllMetas(ctx context.Context) error {
- return MaybeCleanupAllMetas(ctx, m.session, m.schemaName, true)
+ return MaybeCleanupAllMetas(ctx, log.FromContext(ctx), m.session, m.schemaName, true)
}
// MaybeCleanupAllMetas remove the meta schema if there is no unfinished tables
-func MaybeCleanupAllMetas(ctx context.Context, db *sql.DB, schemaName string, tableMetaExist bool) error {
+func MaybeCleanupAllMetas(
+ ctx context.Context,
+ logger log.Logger,
+ db *sql.DB,
+ schemaName string,
+ tableMetaExist bool,
+) error {
exec := &common.SQLWithRetry{
DB: db,
- Logger: log.L(),
+ Logger: logger,
}
// check if all tables are finished
@@ -1009,7 +1015,7 @@ func MaybeCleanupAllMetas(ctx context.Context, db *sql.DB, schemaName string, ta
return errors.Trace(err)
}
if cnt > 0 {
- log.L().Warn("there are unfinished table in table meta table, cleanup skipped.")
+ logger.Warn("there are unfinished table in table meta table, cleanup skipped.")
return nil
}
}
diff --git a/br/pkg/lightning/restore/restore.go b/br/pkg/lightning/restore/restore.go
index 9778c5d4be352..31a48c620846a 100644
--- a/br/pkg/lightning/restore/restore.go
+++ b/br/pkg/lightning/restore/restore.go
@@ -187,6 +187,7 @@ const (
)
type Controller struct {
+ taskCtx context.Context
cfg *config.Config
dbMetas []*mydump.MDDatabaseMeta
dbInfos map[string]*checkpoints.TidbDBInfo
@@ -301,7 +302,7 @@ func NewRestoreControllerWithPauser(
if err != nil {
return nil, errors.Trace(err)
}
- errorMgr := errormanager.New(db, cfg)
+ errorMgr := errormanager.New(db, cfg, log.FromContext(ctx))
if err := errorMgr.Init(ctx); err != nil {
return nil, common.ErrInitErrManager.Wrap(err).GenWithStackByArgs()
}
@@ -309,7 +310,7 @@ func NewRestoreControllerWithPauser(
var backend backend.Backend
switch cfg.TikvImporter.Backend {
case config.BackendTiDB:
- backend = tidb.NewTiDBBackend(db, cfg.TikvImporter.OnDuplicate, errorMgr)
+ backend = tidb.NewTiDBBackend(ctx, db, cfg.TikvImporter.OnDuplicate, errorMgr)
case config.BackendLocal:
var rLimit local.Rlim_t
rLimit, err = local.GetSystemRLimit()
@@ -325,7 +326,7 @@ func NewRestoreControllerWithPauser(
if cfg.TikvImporter.DuplicateResolution != config.DupeResAlgNone {
if err := tikv.CheckTiKVVersion(ctx, tls, cfg.TiDB.PdAddr, minTiKVVersionForDuplicateResolution, maxTiKVVersionForDuplicateResolution); err != nil {
if berrors.Is(err, berrors.ErrVersionMismatch) {
- log.L().Warn("TiKV version doesn't support duplicate resolution. The resolution algorithm will fall back to 'none'", zap.Error(err))
+ log.FromContext(ctx).Warn("TiKV version doesn't support duplicate resolution. The resolution algorithm will fall back to 'none'", zap.Error(err))
cfg.TikvImporter.DuplicateResolution = config.DupeResAlgNone
} else {
return nil, common.ErrCheckKVVersion.Wrap(err).GenWithStackByArgs()
@@ -364,6 +365,7 @@ func NewRestoreControllerWithPauser(
}
rc := &Controller{
+ taskCtx: ctx,
cfg: cfg,
dbMetas: p.DBMetas,
tableWorkers: nil,
@@ -378,7 +380,7 @@ func NewRestoreControllerWithPauser(
tls: tls,
checkTemplate: NewSimpleTemplate(),
- errorSummaries: makeErrorSummaries(log.L()),
+ errorSummaries: makeErrorSummaries(log.FromContext(ctx)),
checkpointsDB: cpdb,
saveCpCh: make(chan saveCp),
closedEngineLimit: worker.NewPool(ctx, cfg.App.TableConcurrency*2, "closed-engine"),
@@ -410,7 +412,7 @@ func (rc *Controller) Run(ctx context.Context) error {
rc.cleanCheckpoints,
}
- task := log.L().Begin(zap.InfoLevel, "the whole procedure")
+ task := log.FromContext(ctx).Begin(zap.InfoLevel, "the whole procedure")
var err error
finished := false
@@ -474,13 +476,14 @@ type schemaJob struct {
}
type restoreSchemaWorker struct {
- ctx context.Context
- quit context.CancelFunc
- jobCh chan *schemaJob
- errCh chan error
- wg sync.WaitGroup
- glue glue.Glue
- store storage.ExternalStorage
+ ctx context.Context
+ quit context.CancelFunc
+ logger log.Logger
+ jobCh chan *schemaJob
+ errCh chan error
+ wg sync.WaitGroup
+ glue glue.Glue
+ store storage.ExternalStorage
}
func (worker *restoreSchemaWorker) addJob(sqlStr string, job *schemaJob) error {
@@ -617,9 +620,9 @@ loop:
break loop
}
}
- logger := log.With(zap.String("db", job.dbName), zap.String("table", job.tblName))
+ logger := worker.logger.With(zap.String("db", job.dbName), zap.String("table", job.tblName))
sqlWithRetry := common.SQLWithRetry{
- Logger: log.L(),
+ Logger: worker.logger,
DB: session,
}
for _, stmt := range job.stmts {
@@ -694,16 +697,17 @@ func (rc *Controller) restoreSchema(ctx context.Context) error {
// create table with schema file
// we can handle the duplicated created with createIfNotExist statement
// and we will check the schema in TiDB is valid with the datafile in DataCheck later.
- logTask := log.L().Begin(zap.InfoLevel, "restore all schema")
+ logTask := log.FromContext(ctx).Begin(zap.InfoLevel, "restore all schema")
concurrency := mathutil.Min(rc.cfg.App.RegionConcurrency, 8)
childCtx, cancel := context.WithCancel(ctx)
worker := restoreSchemaWorker{
- ctx: childCtx,
- quit: cancel,
- jobCh: make(chan *schemaJob, concurrency),
- errCh: make(chan error),
- glue: rc.tidbGlue,
- store: rc.store,
+ ctx: childCtx,
+ quit: cancel,
+ logger: log.FromContext(ctx),
+ jobCh: make(chan *schemaJob, concurrency),
+ errCh: make(chan error),
+ glue: rc.tidbGlue,
+ store: rc.store,
}
for i := 0; i < concurrency; i++ {
go worker.doJob()
@@ -740,12 +744,12 @@ func (rc *Controller) initCheckpoint(ctx context.Context) error {
return common.ErrInitCheckpoint.Wrap(err).GenWithStackByArgs()
}
failpoint.Inject("InitializeCheckpointExit", func() {
- log.L().Warn("exit triggered", zap.String("failpoint", "InitializeCheckpointExit"))
+ log.FromContext(ctx).Warn("exit triggered", zap.String("failpoint", "InitializeCheckpointExit"))
os.Exit(0)
})
rc.checkpointsWg.Add(1) // checkpointsWg will be done in `rc.listenCheckpointUpdates`
- go rc.listenCheckpointUpdates()
+ go rc.listenCheckpointUpdates(log.FromContext(ctx))
// Estimate the number of chunks for progress reporting
return rc.estimateChunkCountIntoMetrics(ctx)
@@ -815,7 +819,7 @@ func verifyLocalFile(ctx context.Context, cpdb checkpoints.DB, dir string) error
file := local.Engine{UUID: eID}
err := file.Exist(dir)
if err != nil {
- log.L().Error("can't find local file",
+ log.FromContext(ctx).Error("can't find local file",
zap.String("table name", tableName),
zap.Int32("engine ID", engineID))
if os.IsNotExist(err) {
@@ -906,7 +910,7 @@ func firstErr(errors ...error) error {
func (rc *Controller) saveStatusCheckpoint(ctx context.Context, tableName string, engineID int32, err error, statusIfSucceed checkpoints.CheckpointStatus) error {
merger := &checkpoints.StatusCheckpointMerger{Status: statusIfSucceed, EngineID: engineID}
- logger := log.L().With(zap.String("table", tableName), zap.Int32("engine_id", engineID),
+ logger := log.FromContext(ctx).With(zap.String("table", tableName), zap.Int32("engine_id", engineID),
zap.String("new_status", statusIfSucceed.MetricName()), zap.Error(err))
logger.Debug("update checkpoint")
@@ -946,7 +950,7 @@ func (rc *Controller) saveStatusCheckpoint(ctx context.Context, tableName string
}
// listenCheckpointUpdates will combine several checkpoints together to reduce database load.
-func (rc *Controller) listenCheckpointUpdates() {
+func (rc *Controller) listenCheckpointUpdates(logger log.Logger) {
var lock sync.Mutex
coalesed := make(map[string]*checkpoints.TableCheckpointDiff)
var waiters []chan<- error
@@ -967,7 +971,7 @@ func (rc *Controller) listenCheckpointUpdates() {
failpoint.Inject("SlowDownCheckpointUpdate", func() {})
if len(cpd) > 0 {
- err := rc.checkpointsDB.Update(cpd)
+ err := rc.checkpointsDB.Update(rc.taskCtx, cpd)
for _, w := range ws {
w <- common.NormalizeOrWrapErr(common.ErrUpdateCheckpoint, err)
}
@@ -1031,7 +1035,7 @@ func (rc *Controller) listenCheckpointUpdates() {
rc.checkpointsWg.Done()
rc.checkpointsWg.Wait()
if err := common.KillMySelf(); err != nil {
- log.L().Warn("KillMySelf() failed to kill itself", log.ShortError(err))
+ logger.Warn("KillMySelf() failed to kill itself", log.ShortError(err))
}
for scp := range rc.saveCpCh {
if scp.waitCh != nil {
@@ -1100,10 +1104,10 @@ func (rc *Controller) buildRunPeriodicActionAndCancelFunc(ctx context.Context, s
for {
select {
case <-ctx.Done():
- log.L().Warn("stopping periodic actions", log.ShortError(ctx.Err()))
+ log.FromContext(ctx).Warn("stopping periodic actions", log.ShortError(ctx.Err()))
return
case <-stop:
- log.L().Info("everything imported, stopping periodic actions")
+ log.FromContext(ctx).Info("everything imported, stopping periodic actions")
return
case <-switchModeChan:
@@ -1113,7 +1117,7 @@ func (rc *Controller) buildRunPeriodicActionAndCancelFunc(ctx context.Context, s
case <-logProgressChan:
metrics, ok := metric.FromContext(ctx)
if !ok {
- log.L().Warn("couldn't find metrics from context, skip log progress")
+ log.FromContext(ctx).Warn("couldn't find metrics from context, skip log progress")
continue
}
// log the current progress periodically, so OPS will know that we're still working
@@ -1216,7 +1220,7 @@ func (rc *Controller) buildRunPeriodicActionAndCancelFunc(ctx context.Context, s
}
// Note: a speed of 28 MiB/s roughly corresponds to 100 GiB/hour.
- log.L().Info("progress",
+ log.FromContext(ctx).Info("progress",
zap.String("total", fmt.Sprintf("%.1f%%", totalPercent*100)),
// zap.String("files", fmt.Sprintf("%.0f/%.0f (%.1f%%)", finished, estimated, finished/estimated*100)),
zap.String("tables", fmt.Sprintf("%.0f/%.0f%s", completedTables, totalTables, formatPercent(completedTables, totalTables))),
@@ -1241,7 +1245,7 @@ func (rc *Controller) buildRunPeriodicActionAndCancelFunc(ctx context.Context, s
}
}
}, func(do bool) {
- log.L().Info("cancel periodic actions", zap.Bool("do", do))
+ log.FromContext(ctx).Info("cancel periodic actions", zap.Bool("do", do))
for _, f := range cancelFuncs {
f(do)
}
@@ -1289,7 +1293,7 @@ func (rc *Controller) keepPauseGCForDupeRes(ctx context.Context) (<-chan struct{
paused = true
break
}
- log.L().Warn(
+ log.FromContext(ctx).Warn(
"Failed to register GC safe point because the current minimum safe point is newer"+
" than what we assume, will retry newMinSafePoint next time",
zap.Uint64("minSafePoint", minSafePoint),
@@ -1312,11 +1316,11 @@ func (rc *Controller) keepPauseGCForDupeRes(ctx context.Context) (<-chan struct{
case <-ticker.C:
minSafePoint, err := pdCli.UpdateServiceGCSafePoint(ctx, serviceID, ttl, safePoint)
if err != nil {
- log.L().Warn("Failed to register GC safe point", zap.Error(err))
+ log.FromContext(ctx).Warn("Failed to register GC safe point", zap.Error(err))
continue
}
if minSafePoint > safePoint {
- log.L().Warn("The current minimum safe point is newer than what we hold, duplicate records are at"+
+ log.FromContext(ctx).Warn("The current minimum safe point is newer than what we hold, duplicate records are at"+
"risk of being GC and not detectable",
zap.Uint64("safePoint", safePoint),
zap.Uint64("minSafePoint", minSafePoint),
@@ -1326,7 +1330,7 @@ func (rc *Controller) keepPauseGCForDupeRes(ctx context.Context) (<-chan struct{
case <-ctx.Done():
stopCtx, cancelFunc := context.WithTimeout(context.Background(), time.Second*5)
if _, err := pdCli.UpdateServiceGCSafePoint(stopCtx, serviceID, 0, safePoint); err != nil {
- log.L().Warn("Failed to reset safe point ttl to zero", zap.Error(err))
+ log.FromContext(ctx).Warn("Failed to reset safe point ttl to zero", zap.Error(err))
}
// just make compiler happy
cancelFunc()
@@ -1354,7 +1358,7 @@ func (rc *Controller) restoreTables(ctx context.Context) (finalErr error) {
}()
}
- logTask := log.L().Begin(zap.InfoLevel, "restore all tables data")
+ logTask := log.FromContext(ctx).Begin(zap.InfoLevel, "restore all tables data")
if rc.tableWorkers == nil {
rc.tableWorkers = worker.NewPool(ctx, rc.cfg.App.TableConcurrency, "table")
}
@@ -1498,7 +1502,7 @@ func (rc *Controller) restoreTables(ctx context.Context) (finalErr error) {
if err != nil {
return errors.Trace(err)
}
- tr, err := NewTableRestore(tableName, tableMeta, dbInfo, tableInfo, cp, igCols.ColumnsMap())
+ tr, err := NewTableRestore(tableName, tableMeta, dbInfo, tableInfo, cp, igCols.ColumnsMap(), log.FromContext(ctx))
if err != nil {
return errors.Trace(err)
}
@@ -1684,12 +1688,12 @@ func (rc *Controller) outpuErrorSummary() {
// do full compaction for the whole data.
func (rc *Controller) fullCompact(ctx context.Context) error {
if !rc.cfg.PostRestore.Compact {
- log.L().Info("skip full compaction")
+ log.FromContext(ctx).Info("skip full compaction")
return nil
}
// wait until any existing level-1 compact to complete first.
- task := log.L().Begin(zap.InfoLevel, "wait for completion of existing level 1 compaction")
+ task := log.FromContext(ctx).Begin(zap.InfoLevel, "wait for completion of existing level 1 compaction")
for !rc.compactState.CAS(compactStateIdle, compactStateDoing) {
time.Sleep(100 * time.Millisecond)
}
@@ -1724,7 +1728,7 @@ func (rc *Controller) switchTiKVMode(ctx context.Context, mode sstpb.SwitchMode)
return
}
- log.L().Info("switch import mode", zap.Stringer("mode", mode))
+ log.FromContext(ctx).Info("switch import mode", zap.Stringer("mode", mode))
// It is fine if we miss some stores which did not switch to Import mode,
// since we're running it periodically, so we exclude disconnected stores.
@@ -1789,7 +1793,7 @@ func (rc *Controller) enforceDiskQuota(ctx context.Context) {
m.LocalStorageUsageBytesGauge.WithLabelValues("mem").Set(float64(totalMemSize))
}
- logger := log.With(
+ logger := log.FromContext(ctx).With(
zap.Int64("diskSize", totalDiskSize),
zap.Int64("memSize", totalMemSize),
zap.Int64("quota", quota),
@@ -1850,7 +1854,7 @@ func (rc *Controller) setGlobalVariables(ctx context.Context) error {
// we should enable/disable new collation here since in server mode, tidb config
// may be different in different tasks
collate.SetNewCollationEnabledForTest(enabled)
- log.L().Info("new_collation_enabled", zap.Bool("enabled", enabled))
+ log.FromContext(ctx).Info("new_collation_enabled", zap.Bool("enabled", enabled))
return nil
}
@@ -1868,7 +1872,7 @@ func (rc *Controller) cleanCheckpoints(ctx context.Context) error {
return nil
}
- logger := log.With(
+ logger := log.FromContext(ctx).With(
zap.Stringer("keepAfterSuccess", rc.cfg.Checkpoint.KeepAfterSuccess),
zap.Int64("taskID", rc.cfg.TaskID),
)
@@ -1958,13 +1962,13 @@ func (rc *Controller) preCheckRequirements(ctx context.Context) error {
needCheck = taskCheckpoints == nil
}
if needCheck {
- err = rc.localResource(source)
+ err = rc.localResource(ctx, source)
if err != nil {
return common.ErrCheckLocalResource.Wrap(err).GenWithStackByArgs()
}
if err := rc.clusterResource(ctx, source); err != nil {
if err1 := rc.taskMgr.CleanupTask(ctx); err1 != nil {
- log.L().Warn("cleanup task failed", zap.Error(err1))
+ log.FromContext(ctx).Warn("cleanup task failed", zap.Error(err1))
return common.ErrMetaMgrUnknown.Wrap(err).GenWithStackByArgs()
}
}
@@ -1982,7 +1986,7 @@ func (rc *Controller) preCheckRequirements(ctx context.Context) error {
if !taskExist && rc.taskMgr != nil {
err := rc.taskMgr.CleanupTask(ctx)
if err != nil {
- log.L().Warn("cleanup task failed", zap.Error(err))
+ log.FromContext(ctx).Warn("cleanup task failed", zap.Error(err))
}
}
return common.ErrPreCheckFailed.GenWithStackByArgs(rc.checkTemplate.FailedMsg())
@@ -2509,7 +2513,7 @@ func (cr *chunkRestore) encodeLoop(
hasIgnoredEncodeErr := false
if encodeErr != nil {
- rowText := tidb.EncodeRowForRecord(t.encTable, rc.cfg.TiDB.SQLMode, lastRow.Row, cr.chunk.ColumnPermutation)
+ rowText := tidb.EncodeRowForRecord(ctx, t.encTable, rc.cfg.TiDB.SQLMode, lastRow.Row, cr.chunk.ColumnPermutation)
encodeErr = rc.errorMgr.RecordTypeError(ctx, logger, t.tableName, cr.chunk.Key.Path, newOffset, rowText, encodeErr)
if encodeErr != nil {
err = common.ErrEncodeKV.Wrap(encodeErr).GenWithStackByArgs(&cr.chunk.Key, newOffset)
@@ -2570,7 +2574,7 @@ func (cr *chunkRestore) restore(
rc *Controller,
) error {
// Create the encoder.
- kvEncoder, err := rc.backend.NewEncoder(t.encTable, &kv.SessionOptions{
+ kvEncoder, err := rc.backend.NewEncoder(ctx, t.encTable, &kv.SessionOptions{
SQLMode: rc.cfg.TiDB.SQLMode,
Timestamp: cr.chunk.Timestamp,
SysVars: rc.sysVars,
diff --git a/br/pkg/lightning/restore/restore_test.go b/br/pkg/lightning/restore/restore_test.go
index 852a3a4831e73..73996b08d3987 100644
--- a/br/pkg/lightning/restore/restore_test.go
+++ b/br/pkg/lightning/restore/restore_test.go
@@ -69,7 +69,7 @@ func TestNewTableRestore(t *testing.T) {
for _, tc := range testCases {
tableInfo := dbInfo.Tables[tc.name]
tableName := common.UniqueTable("mockdb", tableInfo.Name)
- tr, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil)
+ tr, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil, log.L())
require.NotNil(t, tr)
require.NoError(t, err)
}
@@ -85,7 +85,7 @@ func TestNewTableRestoreFailure(t *testing.T) {
}}
tableName := common.UniqueTable("mockdb", "failure")
- _, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil)
+ _, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil, log.L())
require.Regexp(t, `failed to tables\.TableFromMeta.*`, err.Error())
}
@@ -218,7 +218,7 @@ func TestPreCheckFailed(t *testing.T) {
metaMgrBuilder: failMetaMgrBuilder{},
checkTemplate: NewSimpleTemplate(),
tidbGlue: g,
- errorMgr: errormanager.New(nil, cfg),
+ errorMgr: errormanager.New(nil, cfg, log.L()),
}
mock.ExpectBegin()
diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go
index ef3245bbcd7fb..491a59fa1c33b 100644
--- a/br/pkg/lightning/restore/table_restore.go
+++ b/br/pkg/lightning/restore/table_restore.go
@@ -67,6 +67,7 @@ func NewTableRestore(
tableInfo *checkpoints.TidbTableInfo,
cp *checkpoints.TableCheckpoint,
ignoreColumns map[string]struct{},
+ logger log.Logger,
) (*TableRestore, error) {
idAlloc := kv.NewPanickingAllocators(cp.AllocBase)
tbl, err := tables.TableFromMeta(idAlloc, tableInfo.Core)
@@ -81,7 +82,7 @@ func NewTableRestore(
tableMeta: tableMeta,
encTable: tbl,
alloc: idAlloc,
- logger: log.With(zap.String("table", tableName)),
+ logger: logger.With(zap.String("table", tableName)),
ignoreColumns: ignoreColumns,
}, nil
}
@@ -118,7 +119,11 @@ func (tr *TableRestore) populateChunks(ctx context.Context, rc *Controller, cp *
Timestamp: timestamp,
}
if len(chunk.Chunk.Columns) > 0 {
- perms, err := parseColumnPermutations(tr.tableInfo.Core, chunk.Chunk.Columns, tr.ignoreColumns)
+ perms, err := parseColumnPermutations(
+ tr.tableInfo.Core,
+ chunk.Chunk.Columns,
+ tr.ignoreColumns,
+ log.FromContext(ctx))
if err != nil {
return errors.Trace(err)
}
@@ -166,7 +171,7 @@ func (tr *TableRestore) RebaseChunkRowIDs(cp *checkpoints.TableCheckpoint, rowID
//
// The argument `columns` _must_ be in lower case.
func (tr *TableRestore) initializeColumns(columns []string, ccp *checkpoints.ChunkCheckpoint) error {
- colPerm, err := createColumnPermutation(columns, tr.ignoreColumns, tr.tableInfo.Core)
+ colPerm, err := createColumnPermutation(columns, tr.ignoreColumns, tr.tableInfo.Core, tr.logger)
if err != nil {
return err
}
@@ -174,7 +179,12 @@ func (tr *TableRestore) initializeColumns(columns []string, ccp *checkpoints.Chu
return nil
}
-func createColumnPermutation(columns []string, ignoreColumns map[string]struct{}, tableInfo *model.TableInfo) ([]int, error) {
+func createColumnPermutation(
+ columns []string,
+ ignoreColumns map[string]struct{},
+ tableInfo *model.TableInfo,
+ logger log.Logger,
+) ([]int, error) {
var colPerm []int
if len(columns) == 0 {
colPerm = make([]int, 0, len(tableInfo.Columns)+1)
@@ -195,7 +205,7 @@ func createColumnPermutation(columns []string, ignoreColumns map[string]struct{}
}
} else {
var err error
- colPerm, err = parseColumnPermutations(tableInfo, columns, ignoreColumns)
+ colPerm, err = parseColumnPermutations(tableInfo, columns, ignoreColumns, logger)
if err != nil {
return nil, errors.Trace(err)
}
@@ -620,11 +630,11 @@ func (tr *TableRestore) restoreEngine(
if rc.isLocalBackend() && common.IsContextCanceledError(err) {
// ctx is canceled, so to avoid Close engine failed, we use `context.Background()` here
if _, err2 := dataEngine.Close(context.Background(), dataEngineCfg); err2 != nil {
- log.L().Warn("flush all chunk checkpoints failed before manually exits", zap.Error(err2))
+ log.FromContext(ctx).Warn("flush all chunk checkpoints failed before manually exits", zap.Error(err2))
return nil, errors.Trace(err)
}
if err2 := trySavePendingChunks(context.Background()); err2 != nil {
- log.L().Warn("flush all chunk checkpoints failed before manually exits", zap.Error(err2))
+ log.FromContext(ctx).Warn("flush all chunk checkpoints failed before manually exits", zap.Error(err2))
}
}
return nil, errors.Trace(err)
@@ -864,7 +874,12 @@ func (tr *TableRestore) postProcess(
return true, nil
}
-func parseColumnPermutations(tableInfo *model.TableInfo, columns []string, ignoreColumns map[string]struct{}) ([]int, error) {
+func parseColumnPermutations(
+ tableInfo *model.TableInfo,
+ columns []string,
+ ignoreColumns map[string]struct{},
+ logger log.Logger,
+) ([]int, error) {
colPerm := make([]int, 0, len(tableInfo.Columns)+1)
columnMap := make(map[string]int)
@@ -896,7 +911,7 @@ func parseColumnPermutations(tableInfo *model.TableInfo, columns []string, ignor
if _, ignore := ignoreColumns[colInfo.Name.L]; !ignore {
colPerm = append(colPerm, i)
} else {
- log.L().Debug("column ignored by user requirements",
+ logger.Debug("column ignored by user requirements",
zap.Stringer("table", tableInfo.Name),
zap.String("colName", colInfo.Name.O),
zap.Stringer("colType", &colInfo.FieldType),
@@ -905,7 +920,7 @@ func parseColumnPermutations(tableInfo *model.TableInfo, columns []string, ignor
}
} else {
if len(colInfo.GeneratedExprString) == 0 {
- log.L().Warn("column missing from data file, going to fill with default value",
+ logger.Warn("column missing from data file, going to fill with default value",
zap.Stringer("table", tableInfo.Name),
zap.String("colName", colInfo.Name.O),
zap.Stringer("colType", &colInfo.FieldType),
diff --git a/br/pkg/lightning/restore/table_restore_test.go b/br/pkg/lightning/restore/table_restore_test.go
index 1e32846babf77..87aa389c7167b 100644
--- a/br/pkg/lightning/restore/table_restore_test.go
+++ b/br/pkg/lightning/restore/table_restore_test.go
@@ -163,7 +163,7 @@ func (s *tableRestoreSuiteBase) setupSuite(t *testing.T) {
func (s *tableRestoreSuiteBase) setupTest(t *testing.T) {
// Collect into the test TableRestore structure
var err error
- s.tr, err = NewTableRestore("`db`.`table`", s.tableMeta, s.dbInfo, s.tableInfo, &checkpoints.TableCheckpoint{}, nil)
+ s.tr, err = NewTableRestore("`db`.`table`", s.tableMeta, s.dbInfo, s.tableInfo, &checkpoints.TableCheckpoint{}, nil, log.L())
require.NoError(t, err)
s.cfg = config.NewConfig()
@@ -358,12 +358,12 @@ func (s *tableRestoreSuite) TestRestoreEngineFailed() {
require.NoError(s.T(), err)
_, indexUUID := backend.MakeUUID("`db`.`table`", -1)
_, dataUUID := backend.MakeUUID("`db`.`table`", 0)
- realBackend := tidb.NewTiDBBackend(nil, "replace", nil)
+ realBackend := tidb.NewTiDBBackend(ctx, nil, "replace", nil)
mockBackend.EXPECT().OpenEngine(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
mockBackend.EXPECT().OpenEngine(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
mockBackend.EXPECT().CloseEngine(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
- mockBackend.EXPECT().NewEncoder(gomock.Any(), gomock.Any()).
- Return(realBackend.NewEncoder(tbl, &kv.SessionOptions{})).
+ mockBackend.EXPECT().NewEncoder(gomock.Any(), gomock.Any(), gomock.Any()).
+ Return(realBackend.NewEncoder(ctx, tbl, &kv.SessionOptions{})).
AnyTimes()
mockBackend.EXPECT().MakeEmptyRows().Return(realBackend.MakeEmptyRows()).AnyTimes()
mockBackend.EXPECT().LocalWriter(gomock.Any(), gomock.Any(), dataUUID).Return(noop.Writer{}, nil)
@@ -455,7 +455,7 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() {
cfg.Mydumper.StrictFormat = true
rc := &Controller{cfg: cfg, ioWorkers: worker.NewPool(context.Background(), 1, "io"), store: store}
- tr, err := NewTableRestore("`db`.`table`", tableMeta, s.dbInfo, s.tableInfo, &checkpoints.TableCheckpoint{}, nil)
+ tr, err := NewTableRestore("`db`.`table`", tableMeta, s.dbInfo, s.tableInfo, &checkpoints.TableCheckpoint{}, nil, log.L())
require.NoError(s.T(), err)
require.NoError(s.T(), tr.populateChunks(context.Background(), rc, cp))
@@ -720,7 +720,7 @@ func (s *tableRestoreSuite) TestInitializeColumnsGenerated() {
require.NoError(s.T(), err)
core.State = model.StatePublic
tableInfo := &checkpoints.TidbTableInfo{Name: "table", DB: "db", Core: core}
- s.tr, err = NewTableRestore("`db`.`table`", s.tableMeta, s.dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil)
+ s.tr, err = NewTableRestore("`db`.`table`", s.tableMeta, s.dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil, log.L())
require.NoError(s.T(), err)
ccp := &checkpoints.ChunkCheckpoint{}
@@ -938,7 +938,7 @@ func (s *tableRestoreSuite) TestTableRestoreMetrics() {
closedEngineLimit: worker.NewPool(ctx, 1, "closed_engine"),
store: s.store,
metaMgrBuilder: noopMetaMgrBuilder{},
- errorMgr: errormanager.New(nil, cfg),
+ errorMgr: errormanager.New(nil, cfg, log.L()),
taskMgr: noopTaskMetaMgr{},
}
go func() {
@@ -990,7 +990,7 @@ func (s *tableRestoreSuite) TestSaveStatusCheckpoint() {
checkpointsDB: checkpoints.NewNullCheckpointsDB(),
}
rc.checkpointsWg.Add(1)
- go rc.listenCheckpointUpdates()
+ go rc.listenCheckpointUpdates(log.L())
rc.errorSummaries = makeErrorSummaries(log.L())
@@ -1327,11 +1327,11 @@ func (s *tableRestoreSuite) TestEstimate() {
require.NoError(s.T(), err)
mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).AnyTimes()
- mockBackend.EXPECT().NewEncoder(gomock.Any(), gomock.Any()).Return(kv.NewTableKVEncoder(tbl, &kv.SessionOptions{
+ mockBackend.EXPECT().NewEncoder(gomock.Any(), gomock.Any(), gomock.Any()).Return(kv.NewTableKVEncoder(tbl, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 0,
AutoRandomSeed: 0,
- }, nil)).AnyTimes()
+ }, nil, log.L())).AnyTimes()
importer := backend.MakeBackend(mockBackend)
s.cfg.TikvImporter.Backend = config.BackendLocal
diff --git a/br/pkg/lightning/restore/tidb.go b/br/pkg/lightning/restore/tidb.go
index 7bdbf09c665a7..9cf278a67d1cc 100644
--- a/br/pkg/lightning/restore/tidb.go
+++ b/br/pkg/lightning/restore/tidb.go
@@ -104,7 +104,7 @@ func DBFromConfig(ctx context.Context, dsn config.DBStore) (*sql.DB, error) {
for k, v := range vars {
q := fmt.Sprintf("SET SESSION %s = '%s';", k, v)
if _, err1 := db.ExecContext(ctx, q); err1 != nil {
- log.L().Warn("set session variable failed, will skip this query", zap.String("query", q),
+ log.FromContext(ctx).Warn("set session variable failed, will skip this query", zap.String("query", q),
zap.Error(err1))
delete(vars, k)
}
@@ -142,7 +142,7 @@ func (timgr *TiDBManager) Close() {
}
func InitSchema(ctx context.Context, g glue.Glue, database string, tablesSchema map[string]string) error {
- logger := log.With(zap.String("db", database))
+ logger := log.FromContext(ctx).With(zap.String("db", database))
sqlExecutor := g.GetSQLExecutor()
var createDatabase strings.Builder
@@ -223,7 +223,7 @@ func createIfNotExistsStmt(p *parser.Parser, createTable, dbName, tblName string
func (timgr *TiDBManager) DropTable(ctx context.Context, tableName string) error {
sql := common.SQLWithRetry{
DB: timgr.db,
- Logger: log.With(zap.String("table", tableName)),
+ Logger: log.FromContext(ctx).With(zap.String("table", tableName)),
}
return sql.Exec(ctx, "drop table", "DROP TABLE "+tableName)
}
@@ -287,7 +287,7 @@ func LoadSchemaInfo(
func ObtainGCLifeTime(ctx context.Context, db *sql.DB) (string, error) {
var gcLifeTime string
- err := common.SQLWithRetry{DB: db, Logger: log.L()}.QueryRow(
+ err := common.SQLWithRetry{DB: db, Logger: log.FromContext(ctx)}.QueryRow(
ctx,
"obtain GC lifetime",
"SELECT VARIABLE_VALUE FROM mysql.tidb WHERE VARIABLE_NAME = 'tikv_gc_life_time'",
@@ -299,7 +299,7 @@ func ObtainGCLifeTime(ctx context.Context, db *sql.DB) (string, error) {
func UpdateGCLifeTime(ctx context.Context, db *sql.DB, gcLifeTime string) error {
sql := common.SQLWithRetry{
DB: db,
- Logger: log.With(zap.String("gcLifeTime", gcLifeTime)),
+ Logger: log.FromContext(ctx).With(zap.String("gcLifeTime", gcLifeTime)),
}
return sql.Exec(ctx, "update GC lifetime",
"UPDATE mysql.tidb SET VARIABLE_VALUE = ? WHERE VARIABLE_NAME = 'tikv_gc_life_time'",
@@ -326,10 +326,10 @@ func ObtainImportantVariables(ctx context.Context, g glue.SQLExecutor, needTiDBV
}
}
query.WriteString("')")
- kvs, err := g.QueryStringsWithLog(ctx, query.String(), "obtain system variables", log.L())
+ kvs, err := g.QueryStringsWithLog(ctx, query.String(), "obtain system variables", log.FromContext(ctx))
if err != nil {
// error is not fatal
- log.L().Warn("obtain system variables failed, use default variables instead", log.ShortError(err))
+ log.FromContext(ctx).Warn("obtain system variables failed, use default variables instead", log.ShortError(err))
}
// convert result into a map. fill in any missing variables with default values.
@@ -359,7 +359,7 @@ func ObtainNewCollationEnabled(ctx context.Context, g glue.SQLExecutor) (bool, e
ctx,
"SELECT variable_value FROM mysql.tidb WHERE variable_name = 'new_collation_enabled'",
"obtain new collation enabled",
- log.L(),
+ log.FromContext(ctx),
)
if err == nil && newCollationVal == "True" {
newCollationEnabled = true
@@ -379,7 +379,7 @@ func ObtainNewCollationEnabled(ctx context.Context, g glue.SQLExecutor) (bool, e
// See: https://github.com/pingcap/tidb/blob/64698ef9a3358bfd0fdc323996bb7928a56cadca/ddl/ddl_api.go#L2528-L2533
func AlterAutoIncrement(ctx context.Context, g glue.SQLExecutor, tableName string, incr uint64) error {
var query string
- logger := log.With(zap.String("table", tableName), zap.Uint64("auto_increment", incr))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName), zap.Uint64("auto_increment", incr))
if incr > math.MaxInt64 {
// automatically set max value
logger.Warn("auto_increment out of the maximum value TiDB supports, automatically set to the max", zap.Uint64("auto_increment", incr))
@@ -401,7 +401,7 @@ func AlterAutoIncrement(ctx context.Context, g glue.SQLExecutor, tableName strin
}
func AlterAutoRandom(ctx context.Context, g glue.SQLExecutor, tableName string, randomBase uint64, maxAutoRandom uint64) error {
- logger := log.With(zap.String("table", tableName), zap.Uint64("auto_random", randomBase))
+ logger := log.FromContext(ctx).With(zap.String("table", tableName), zap.Uint64("auto_random", randomBase))
if randomBase == maxAutoRandom+1 {
// insert a tuple with key maxAutoRandom
randomBase = maxAutoRandom
diff --git a/br/pkg/lightning/run_options.go b/br/pkg/lightning/run_options.go
index 2665faddeb40a..a7b5b90770c02 100644
--- a/br/pkg/lightning/run_options.go
+++ b/br/pkg/lightning/run_options.go
@@ -16,8 +16,10 @@ package lightning
import (
"github.com/pingcap/tidb/br/pkg/lightning/glue"
+ "github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/util/promutil"
+ "go.uber.org/zap"
)
type options struct {
@@ -27,6 +29,7 @@ type options struct {
checkpointName string
promFactory promutil.Factory
promRegistry promutil.Registry
+ logger log.Logger
}
type Option func(*options)
@@ -71,3 +74,10 @@ func WithPromRegistry(r promutil.Registry) Option {
o.promRegistry = r
}
}
+
+// WithLogger sets the logger to a lightning task.
+func WithLogger(logger *zap.Logger) Option {
+ return func(o *options) {
+ o.logger = log.Logger{Logger: logger}
+ }
+}
diff --git a/br/pkg/mock/backend.go b/br/pkg/mock/backend.go
index 7eba5180694ac..04896d4a8efd1 100644
--- a/br/pkg/mock/backend.go
+++ b/br/pkg/mock/backend.go
@@ -1,8 +1,6 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/pingcap/tidb/br/pkg/lightning/backend (interfaces: AbstractBackend,EngineWriter)
-// $ mockgen -package mock -mock_names 'AbstractBackend=MockBackend' github.com/pingcap/tidb/br/pkg/lightning/backend AbstractBackend,EngineWriter
-
// Package mock is a generated GoMock package.
package mock
@@ -228,18 +226,18 @@ func (mr *MockBackendMockRecorder) MakeEmptyRows() *gomock.Call {
}
// NewEncoder mocks base method.
-func (m *MockBackend) NewEncoder(arg0 table.Table, arg1 *kv.SessionOptions) (kv.Encoder, error) {
+func (m *MockBackend) NewEncoder(arg0 context.Context, arg1 table.Table, arg2 *kv.SessionOptions) (kv.Encoder, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "NewEncoder", arg0, arg1)
+ ret := m.ctrl.Call(m, "NewEncoder", arg0, arg1, arg2)
ret0, _ := ret[0].(kv.Encoder)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NewEncoder indicates an expected call of NewEncoder.
-func (mr *MockBackendMockRecorder) NewEncoder(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockBackendMockRecorder) NewEncoder(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewEncoder", reflect.TypeOf((*MockBackend)(nil).NewEncoder), arg0, arg1)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewEncoder", reflect.TypeOf((*MockBackend)(nil).NewEncoder), arg0, arg1, arg2)
}
// OpenEngine mocks base method.
diff --git a/build/BUILD.bazel b/build/BUILD.bazel
index 294fe5b131748..7bf1a9778f709 100644
--- a/build/BUILD.bazel
+++ b/build/BUILD.bazel
@@ -1,6 +1,42 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "nogo")
+load("//build/linter/staticcheck:def.bzl", "staticcheck_analyzers")
+
+STATICHECK_ANALYZERS = [
+ "S1002",
+ "S1004",
+ "S1007",
+ "S1009",
+ "S1010",
+ "S1012",
+ "S1019",
+ "S1020",
+ "S1021",
+ "S1024",
+ "S1030",
+ "SA2000",
+ "SA2001",
+ "SA2003",
+ "SA3000",
+ "SA3001",
+ "SA4009",
+ "SA5000",
+ "SA5001",
+ "SA5002",
+ "SA5003",
+ "SA5004",
+ "SA5005",
+ "SA5007",
+ "SA5008",
+ "SA5009",
+ "SA5010",
+ #"SA5011",
+ "SA5012",
+ "SA6000",
+ "SA6001",
+ "SA6005",
+]
nogo(
name = "tidb_nogo",
@@ -45,5 +81,7 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library",
- ],
+ "//build/linter/durationcheck:durationcheck",
+ "//build/linter/prealloc:prealloc",
+ ] + staticcheck_analyzers(STATICHECK_ANALYZERS),
)
diff --git a/build/linter/durationcheck/BUILD.bazel b/build/linter/durationcheck/BUILD.bazel
new file mode 100644
index 0000000000000..556720ba0609c
--- /dev/null
+++ b/build/linter/durationcheck/BUILD.bazel
@@ -0,0 +1,12 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "durationcheck",
+ srcs = ["analyzer.go"],
+ importpath = "github.com/pingcap/tidb/build/linter/durationcheck",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//build/linter/util",
+ "@com_github_charithe_durationcheck//:durationcheck",
+ ],
+)
diff --git a/build/linter/durationcheck/analyzer.go b/build/linter/durationcheck/analyzer.go
new file mode 100644
index 0000000000000..1ebee429959fd
--- /dev/null
+++ b/build/linter/durationcheck/analyzer.go
@@ -0,0 +1,27 @@
+// Copyright 2022 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package durationcheck
+
+import (
+ "github.com/charithe/durationcheck"
+ "github.com/pingcap/tidb/build/linter/util"
+)
+
+// Analyzer is the analyzer struct of durationcheck.
+var Analyzer = durationcheck.Analyzer
+
+func init() {
+ util.SkipAnalyzer(Analyzer)
+}
diff --git a/build/linter/prealloc/BUILD.bazel b/build/linter/prealloc/BUILD.bazel
new file mode 100644
index 0000000000000..452108a450df5
--- /dev/null
+++ b/build/linter/prealloc/BUILD.bazel
@@ -0,0 +1,13 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "prealloc",
+ srcs = ["analyzer.go"],
+ importpath = "github.com/pingcap/tidb/build/linter/prealloc",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//build/linter/util",
+ "@com_github_golangci_prealloc//:prealloc",
+ "@org_golang_x_tools//go/analysis",
+ ],
+)
diff --git a/build/linter/prealloc/analyzer.go b/build/linter/prealloc/analyzer.go
new file mode 100644
index 0000000000000..5e9d38aa8daa9
--- /dev/null
+++ b/build/linter/prealloc/analyzer.go
@@ -0,0 +1,60 @@
+// Copyright 2022 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prealloc
+
+import (
+ "go/ast"
+
+ "github.com/golangci/prealloc"
+ "github.com/pingcap/tidb/build/linter/util"
+ "golang.org/x/tools/go/analysis"
+)
+
+// Settings is the settings for preallocation.
+type Settings struct {
+ Simple bool
+ RangeLoops bool `mapstructure:"range-loops"`
+ ForLoops bool `mapstructure:"range-loops"`
+}
+
+// Name is the name of the analyzer.
+const Name = "prealloc"
+
+// Analyzer is the analyzer struct of prealloc.
+var Analyzer = &analysis.Analyzer{
+ Name: Name,
+ Doc: "Finds slice declarations that could potentially be preallocated",
+ Run: run,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ s := &Settings{
+ Simple: true,
+ RangeLoops: true,
+ ForLoops: false,
+ }
+ for _, f := range pass.Files {
+ hints := prealloc.Check([]*ast.File{f}, s.Simple, s.RangeLoops, s.ForLoops)
+ for _, hint := range hints {
+ pass.Reportf(hint.Pos, "[%s] Consider preallocating %s", Name, util.FormatCode(hint.DeclaredSliceName))
+ }
+ }
+
+ return nil, nil
+}
+
+func init() {
+ util.SkipAnalyzer(Analyzer)
+}
diff --git a/build/linter/staticcheck/BUILD.bazel b/build/linter/staticcheck/BUILD.bazel
new file mode 100644
index 0000000000000..6525dc6be346a
--- /dev/null
+++ b/build/linter/staticcheck/BUILD.bazel
@@ -0,0 +1,26 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("//build/linter/staticcheck:def.bzl", "ANALYZERS")
+
+[
+ go_library(
+ name = analyzer,
+ srcs = [
+ "analyzer.go",
+ "util.go",
+ ],
+ importpath = "github.com/pingcap/tidb/build/linter/staticcheck/" + analyzer,
+ visibility = ["//visibility:public"],
+ x_defs = {"name": analyzer},
+ deps = [
+ "//build/linter/util",
+ "@co_honnef_go_tools//analysis/lint",
+ "@co_honnef_go_tools//quickfix",
+ "@co_honnef_go_tools//simple",
+ "@co_honnef_go_tools//staticcheck",
+ "@co_honnef_go_tools//stylecheck",
+ "@co_honnef_go_tools//unused",
+ "@org_golang_x_tools//go/analysis",
+ ],
+ )
+ for analyzer in ANALYZERS
+]
diff --git a/build/linter/staticcheck/analyzer.go b/build/linter/staticcheck/analyzer.go
new file mode 100644
index 0000000000000..cb13969093aa9
--- /dev/null
+++ b/build/linter/staticcheck/analyzer.go
@@ -0,0 +1,33 @@
+// Copyright 2022 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package staticcheck
+
+import (
+ "github.com/pingcap/tidb/build/linter/util"
+ "golang.org/x/tools/go/analysis"
+)
+
+var (
+ // Value to be added during stamping
+ name = "dummy value please replace using x_defs"
+
+ // Analyzer is an analyzer from staticcheck.
+ Analyzer *analysis.Analyzer
+)
+
+func init() {
+ Analyzer = FindAnalyzerByName(name)
+ util.SkipAnalyzer(Analyzer)
+}
diff --git a/build/linter/staticcheck/def.bzl b/build/linter/staticcheck/def.bzl
new file mode 100644
index 0000000000000..2e375d543b710
--- /dev/null
+++ b/build/linter/staticcheck/def.bzl
@@ -0,0 +1,167 @@
+# All analyzers in staticcheck.
+#
+# Generate this list by running:
+#
+# > bazel run //staticcheck/cmd/list_analyzers
+#
+ANALYZERS = [
+ "QF1001",
+ "QF1002",
+ "QF1003",
+ "QF1004",
+ "QF1005",
+ "QF1006",
+ "QF1007",
+ "QF1008",
+ "QF1009",
+ "QF1010",
+ "QF1011",
+ "QF1012",
+ "S1000",
+ "S1001",
+ "S1002",
+ "S1003",
+ "S1004",
+ "S1005",
+ "S1006",
+ "S1007",
+ "S1008",
+ "S1009",
+ "S1010",
+ "S1011",
+ "S1012",
+ "S1016",
+ "S1017",
+ "S1018",
+ "S1019",
+ "S1020",
+ "S1021",
+ "S1023",
+ "S1024",
+ "S1025",
+ "S1028",
+ "S1029",
+ "S1030",
+ "S1031",
+ "S1032",
+ "S1033",
+ "S1034",
+ "S1035",
+ "S1036",
+ "S1037",
+ "S1038",
+ "S1039",
+ "S1040",
+ "SA1000",
+ "SA1001",
+ "SA1002",
+ "SA1003",
+ "SA1004",
+ "SA1005",
+ "SA1006",
+ "SA1007",
+ "SA1008",
+ "SA1010",
+ "SA1011",
+ "SA1012",
+ "SA1013",
+ "SA1014",
+ "SA1015",
+ "SA1016",
+ "SA1017",
+ "SA1018",
+ "SA1019",
+ "SA1020",
+ "SA1021",
+ "SA1023",
+ "SA1024",
+ "SA1025",
+ "SA1026",
+ "SA1027",
+ "SA1028",
+ "SA1029",
+ "SA1030",
+ "SA2000",
+ "SA2001",
+ "SA2002",
+ "SA2003",
+ "SA3000",
+ "SA3001",
+ "SA4000",
+ "SA4001",
+ "SA4003",
+ "SA4004",
+ "SA4005",
+ "SA4006",
+ "SA4008",
+ "SA4009",
+ "SA4010",
+ "SA4011",
+ "SA4012",
+ "SA4013",
+ "SA4014",
+ "SA4015",
+ "SA4016",
+ "SA4017",
+ "SA4018",
+ "SA4019",
+ "SA4020",
+ "SA4021",
+ "SA4022",
+ "SA4023",
+ "SA4024",
+ "SA4025",
+ "SA4026",
+ "SA4027",
+ "SA4028",
+ "SA4029",
+ "SA4030",
+ "SA4031",
+ "SA5000",
+ "SA5001",
+ "SA5002",
+ "SA5003",
+ "SA5004",
+ "SA5005",
+ "SA5007",
+ "SA5008",
+ "SA5009",
+ "SA5010",
+ "SA5011",
+ "SA5012",
+ "SA6000",
+ "SA6001",
+ "SA6002",
+ "SA6003",
+ "SA6005",
+ "SA9001",
+ "SA9002",
+ "SA9003",
+ "SA9004",
+ "SA9005",
+ "SA9006",
+ "SA9007",
+ "SA9008",
+ "ST1000",
+ "ST1001",
+ "ST1003",
+ "ST1005",
+ "ST1006",
+ "ST1008",
+ "ST1011",
+ "ST1012",
+ "ST1013",
+ "ST1015",
+ "ST1016",
+ "ST1017",
+ "ST1018",
+ "ST1019",
+ "ST1020",
+ "ST1021",
+ "ST1022",
+ "ST1023",
+ "U1000",
+]
+
+def staticcheck_analyzers(analyzers, prefix_path = "//build/linter/staticcheck"):
+ return [prefix_path + ":" + a for a in analyzers]
diff --git a/build/linter/staticcheck/util.go b/build/linter/staticcheck/util.go
new file mode 100644
index 0000000000000..0c1928d564268
--- /dev/null
+++ b/build/linter/staticcheck/util.go
@@ -0,0 +1,55 @@
+// Copyright 2021 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package staticcheck
+
+import (
+ "fmt"
+
+ "golang.org/x/tools/go/analysis"
+ "honnef.co/go/tools/analysis/lint"
+ "honnef.co/go/tools/quickfix"
+ "honnef.co/go/tools/simple"
+ "honnef.co/go/tools/staticcheck"
+ "honnef.co/go/tools/stylecheck"
+ "honnef.co/go/tools/unused"
+)
+
+// Analyzers is the analyzers of staticcheck.
+var Analyzers = func() map[string]*analysis.Analyzer {
+ resMap := make(map[string]*analysis.Analyzer)
+
+ for _, analyzers := range [][]*lint.Analyzer{
+ quickfix.Analyzers,
+ simple.Analyzers,
+ staticcheck.Analyzers,
+ stylecheck.Analyzers,
+ {unused.Analyzer},
+ } {
+ for _, a := range analyzers {
+ resMap[a.Analyzer.Name] = a.Analyzer
+ }
+ }
+
+ return resMap
+}()
+
+// FindAnalyzerByName finds the analyzer with the given name.
+func FindAnalyzerByName(name string) *analysis.Analyzer {
+ if a, ok := Analyzers[name]; ok {
+ return a
+ }
+
+ panic(fmt.Sprintf("not a valid staticcheck analyzer: %s", name))
+}
diff --git a/build/linter/util/BUILD.bazel b/build/linter/util/BUILD.bazel
new file mode 100644
index 0000000000000..4ac3fec064d07
--- /dev/null
+++ b/build/linter/util/BUILD.bazel
@@ -0,0 +1,12 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "util",
+ srcs = ["util.go"],
+ importpath = "github.com/pingcap/tidb/build/linter/util",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@co_honnef_go_tools//analysis/report",
+ "@org_golang_x_tools//go/analysis",
+ ],
+)
diff --git a/build/linter/util/util.go b/build/linter/util/util.go
new file mode 100644
index 0000000000000..d476173a973a0
--- /dev/null
+++ b/build/linter/util/util.go
@@ -0,0 +1,149 @@
+// Copyright 2022 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "reflect"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "honnef.co/go/tools/analysis/report"
+)
+
+//
+type skipType int
+
+const (
+ skipNone skipType = iota
+ skipLinter
+ skipFile
+)
+
+// Directive is a comment of the form '//lint: [arguments...]' and `//nolint:`.
+// It represents instructions to the static analysis tool.
+type Directive struct {
+ Command skipType
+ Linters []string
+ Directive *ast.Comment
+ Node ast.Node
+}
+
+func parseDirective(s string) (cmd skipType, args []string) {
+ if strings.HasPrefix(s, "//lint:") {
+ s = strings.TrimPrefix(s, "//lint:")
+ fields := strings.Split(s, " ")
+ switch fields[0] {
+ case "ignore":
+ return skipLinter, fields[1:]
+ case "file-ignore":
+ return skipFile, fields[1:]
+ }
+ return skipNone, nil
+ }
+ s = strings.TrimPrefix(s, "//nolint:")
+ return skipLinter, []string{s}
+}
+
+// ParseDirectives extracts all directives from a list of Go files.
+func ParseDirectives(files []*ast.File, fset *token.FileSet) []Directive {
+ var dirs []Directive
+ for _, f := range files {
+ cm := ast.NewCommentMap(fset, f, f.Comments)
+ for node, cgs := range cm {
+ for _, cg := range cgs {
+ for _, c := range cg.List {
+ if !strings.HasPrefix(c.Text, "//lint:") && !strings.HasPrefix(c.Text, "//nolint:") {
+ continue
+ }
+ cmd, args := parseDirective(c.Text)
+ d := Directive{
+ Command: cmd,
+ Linters: args,
+ Directive: c,
+ Node: node,
+ }
+ dirs = append(dirs, d)
+ }
+ }
+ }
+ }
+ return dirs
+}
+
+func doDirectives(pass *analysis.Pass) (interface{}, error) {
+ return ParseDirectives(pass.Files, pass.Fset), nil
+}
+
+// Directives is a fact that contains a list of directives.
+var Directives = &analysis.Analyzer{
+ Name: "directives",
+ Doc: "extracts linter directives",
+ Run: doDirectives,
+ RunDespiteErrors: true,
+ ResultType: reflect.TypeOf([]Directive{}),
+}
+
+// SkipAnalyzer updates an analyzer from `staticcheck` and `golangci-linter` to make it work on nogo.
+// They have "lint:ignore" or "nolint" to make the analyzer ignore the code.
+func SkipAnalyzer(analyzer *analysis.Analyzer) {
+ analyzer.Requires = append(analyzer.Requires, Directives)
+ oldRun := analyzer.Run
+ analyzer.Run = func(p *analysis.Pass) (interface{}, error) {
+ pass := *p
+ oldReport := p.Report
+ pass.Report = func(diag analysis.Diagnostic) {
+ dirs := pass.ResultOf[Directives].([]Directive)
+ for _, dir := range dirs {
+ cmd := dir.Command
+ linters := dir.Linters
+ switch cmd {
+ case skipLinter:
+ ignorePos := report.DisplayPosition(pass.Fset, dir.Node.Pos())
+ nodePos := report.DisplayPosition(pass.Fset, diag.Pos)
+ if ignorePos.Filename != nodePos.Filename || ignorePos.Line != nodePos.Line {
+ continue
+ }
+ for _, check := range strings.Split(linters[0], ",") {
+ if strings.TrimSpace(check) == analyzer.Name {
+ return
+ }
+ }
+ case skipFile:
+ ignorePos := report.DisplayPosition(pass.Fset, dir.Node.Pos())
+ nodePos := report.DisplayPosition(pass.Fset, diag.Pos)
+ if ignorePos.Filename == nodePos.Filename {
+ return
+ }
+ default:
+ continue
+ }
+ }
+ oldReport(diag)
+ }
+ return oldRun(&pass)
+ }
+}
+
+// FormatCode is to format code for nogo.
+func FormatCode(code string) string {
+ if strings.Contains(code, "`") {
+ return code // TODO: properly escape or remove
+ }
+
+ return fmt.Sprintf("`%s`", code)
+}
diff --git a/build/nogo_config.json b/build/nogo_config.json
index c462883ab232e..5fe822cce2963 100644
--- a/build/nogo_config.json
+++ b/build/nogo_config.json
@@ -80,6 +80,12 @@
".*_generated\\.go$": "ignore generated code"
}
},
+ "durationcheck": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
"errorsas": {
"exclude_files": {
"/external/": "no need to vet third party code",
@@ -201,5 +207,207 @@
".*_generated\\.go$": "ignore generated code",
"parser/digester_test.go": "ignore code"
}
+ },
+ "S1002": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1004": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1007": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1009": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1010": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1012": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1019": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code",
+ "parser/parser.go": "ignore code"
+ }
+ },
+ "S1020": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1021": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code",
+ "tools/check/ut.go": "ignore code"
+ }
+ },
+ "S1024": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "S1030": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA2000": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA2001": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA2003": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA3000": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA3001": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA4009": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5000": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5001": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5002": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5003": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5004": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5005": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5007": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5008": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5009": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5010": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5011": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA5012": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA6000": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA6001": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "SA6005": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code"
+ }
+ },
+ "prealloc": {
+ "exclude_files": {
+ "/external/": "no need to vet third party code",
+ ".*_generated\\.go$": "ignore generated code",
+ "parser/yy_parser.go": "ignore generated code",
+ "/cgo/": "no need to vet third party code for cgo"
+ }
}
}
diff --git a/cmd/importer/db.go b/cmd/importer/db.go
index 49f3d0ec67ad5..8b0d7353b9adf 100644
--- a/cmd/importer/db.go
+++ b/cmd/importer/db.go
@@ -117,7 +117,7 @@ func genRowDatas(table *table, count int) ([]string, error) {
}
func genRowData(table *table) (string, error) {
- var values []byte // nolint: prealloc
+ var values []byte //nolint: prealloc
for _, column := range table.columns {
data, err := genColumnData(table, column)
if err != nil {
diff --git a/config/config.go b/config/config.go
index a701b1501bf15..0dc6cabc75727 100644
--- a/config/config.go
+++ b/config/config.go
@@ -117,6 +117,7 @@ var (
map[string]string{
"check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8",
"enable-collect-execution-info": "tidb_enable_collect_execution_info",
+ "max-server-connections": "max_connections",
},
},
{
@@ -255,7 +256,8 @@ type Config struct {
// BallastObjectSize set the initial size of the ballast object, the unit is byte.
BallastObjectSize int `toml:"ballast-object-size" json:"ballast-object-size"`
// EnableGlobalKill indicates whether to enable global kill.
- EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"`
+ EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"`
+ TrxSummary TrxSummary `toml:"transaction-summary" json:"transaction-summary"`
// The following items are deprecated. We need to keep them here temporarily
// to support the upgrade process. They can be removed in future.
@@ -473,6 +475,7 @@ type Instance struct {
EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"`
PluginDir string `toml:"plugin_dir" json:"plugin_dir"`
PluginLoad string `toml:"plugin_load" json:"plugin_load"`
+ MaxConnections uint32 `toml:"max_connections" json:"max_connections"`
}
func (l *Log) getDisableTimestamp() bool {
@@ -721,6 +724,22 @@ type PessimisticTxn struct {
PessimisticAutoCommit AtomicBool `toml:"pessimistic-auto-commit" json:"pessimistic-auto-commit"`
}
+// TrxSummary is the config for transaction summary collecting.
+type TrxSummary struct {
+ // how many transaction summary in `transaction_summary` each TiDB node should keep.
+ TransactionSummaryCapacity uint `toml:"transaction-summary-capacity" json:"transaction-summary-capacity"`
+ // how long a transaction should be executed to make it be recorded in `transaction_id_digest`.
+ TransactionIDDigestMinDuration uint `toml:"transaction-id-digest-min-duration" json:"transaction-id-digest-min-duration"`
+}
+
+// Valid Validatse TrxSummary configs
+func (config *TrxSummary) Valid() error {
+ if config.TransactionSummaryCapacity > 5000 {
+ return errors.New("transaction-summary.transaction-summary-capacity should not be larger than 5000")
+ }
+ return nil
+}
+
// DefaultPessimisticTxn returns the default configuration for PessimisticTxn
func DefaultPessimisticTxn() PessimisticTxn {
return PessimisticTxn{
@@ -731,6 +750,15 @@ func DefaultPessimisticTxn() PessimisticTxn {
}
}
+// DefaultTrxSummary returns the default configuration for TrxSummary collector
+func DefaultTrxSummary() TrxSummary {
+ // TrxSummary is not enabled by default before GA
+ return TrxSummary{
+ TransactionSummaryCapacity: 500,
+ TransactionIDDigestMinDuration: 2147483647,
+ }
+}
+
// Plugin is the config for plugin
type Plugin struct {
Dir string `toml:"dir" json:"dir"`
@@ -824,6 +852,7 @@ var defaultConf = Config{
EnableCollectExecutionInfo: true,
PluginDir: "/data/deploy/plugin",
PluginLoad: "",
+ MaxConnections: 0,
},
Status: Status{
ReportStatus: true,
@@ -916,6 +945,7 @@ var defaultConf = Config{
EnableForwarding: defTiKVCfg.EnableForwarding,
NewCollationsEnabledOnFirstBootstrap: true,
EnableGlobalKill: true,
+ TrxSummary: DefaultTrxSummary(),
}
var (
@@ -1183,6 +1213,9 @@ func (c *Config) Valid() error {
if err := c.TiKVClient.Valid(); err != nil {
return err
}
+ if err := c.TrxSummary.Valid(); err != nil {
+ return err
+ }
if c.Performance.TxnTotalSizeLimit > 1<<40 {
return fmt.Errorf("txn-total-size-limit should be less than %d", 1<<40)
diff --git a/config/config.toml.example b/config/config.toml.example
index 1e1fc7aae73f5..afc97e60e74f8 100644
--- a/config/config.toml.example
+++ b/config/config.toml.example
@@ -89,9 +89,6 @@ repair-mode = false
# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error.
repair-table-list = []
-# The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited.
-max-server-connections = 0
-
# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time.
new_collations_enabled_on_first_bootstrap = true
@@ -468,3 +465,6 @@ tidb_slow_log_threshold = 300
# tidb_record_plan_in_slow_log is used to enable record query plan in slow log.
# 0 is disable. 1 is enable.
tidb_record_plan_in_slow_log = 1
+
+# The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited.
+max_connections = 0
diff --git a/config/config_test.go b/config/config_test.go
index 391bd874d3942..2e044062bd4e8 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -282,9 +282,6 @@ repair-mode = false
# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error.
repair-table-list = []
-# The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited.
-max-server-connections = 0
-
# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time.
new_collations_enabled_on_first_bootstrap = true
@@ -309,6 +306,11 @@ deprecate-integer-display-length = false
# See https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html for more details.
enable-enum-length-limit = true
+[instance]
+
+# The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited.
+max_connections = 0
+
[log]
# Log level: debug, info, warn, error, fatal.
level = "info"
@@ -707,7 +709,7 @@ unrecognized-option-test = true
match, err := regexp.Match("(?:.|\n)*invalid configuration option(?:.|\n)*", []byte(err.Error()))
require.NoError(t, err)
require.True(t, match)
- require.Equal(t, uint32(0), conf.MaxServerConnections)
+ require.Equal(t, uint32(0), conf.Instance.MaxConnections)
err = f.Truncate(0)
require.NoError(t, err)
@@ -722,7 +724,6 @@ delay-clean-table-lock = 5
split-region-max-num=10000
server-version = "test_version"
repair-mode = true
-max-server-connections = 200
max-index-length = 3080
index-limit = 70
table-column-count-limit = 4000
@@ -768,6 +769,8 @@ grpc-keepalive-timeout = 10
grpc-concurrent-streams = 2048
grpc-initial-window-size = 10240
grpc-max-send-msg-size = 40960
+[instance]
+max_connections = 200
`)
require.NoError(t, err)
@@ -797,7 +800,7 @@ grpc-max-send-msg-size = 40960
require.Equal(t, uint64(10000), conf.SplitRegionMaxNum)
require.True(t, conf.RepairMode)
require.Equal(t, uint64(16), conf.TiKVClient.ResolveLockLiteThreshold)
- require.Equal(t, uint32(200), conf.MaxServerConnections)
+ require.Equal(t, uint32(200), conf.Instance.MaxConnections)
require.Equal(t, []string{"tiflash"}, conf.IsolationRead.Engines)
require.Equal(t, 3080, conf.MaxIndexLength)
require.Equal(t, 70, conf.IndexLimit)
diff --git a/docs/design/2021-03-30-small-table-caching.md b/docs/design/2021-03-30-small-table-caching.md
new file mode 100644
index 0000000000000..2745b8e1026df
--- /dev/null
+++ b/docs/design/2021-03-30-small-table-caching.md
@@ -0,0 +1,174 @@
+# Proposal: Caching a table in memory
+
+- Author(@tiancaiamao)
+- Discussion PR: https://github.com/pingcap/tidb/pull/23673
+- Tracking Issue: https://github.com/pingcap/tidb/issues/25293
+
+## Introduction
+
+For a small, frequently visited, and rarely changed table, caching the whole table in memory in the TiDB server can improve performance.
+
+## Motivation or Background
+
+For tables that are too small, they are located in just one region, that region would become a hotspot, and such hotspots would cause a performance bottleneck. By directly caching the small table data in the TiDB layer, such hotspot issues can be solved.
+
+We are caching tables for some scenarios already, in a way we're not realized. For example, our handling of global variables. To prevent row-at-a-time loading of each system variable from TiKV, we needed to implement the [sysvar cache](https://github.com/pingcap/tidb/pull/24359). This helps reduce `show variables like ..` latency, but the data set is small and the problem is very generic. It also doesn't help in cases where system variables read from `mysql.tidb` instead, which does not have a cache. The data of privilege related tables are also cached.
+
+It can also be used to improve the performance of join. The cached table could be used as the inner table, cutting down the network cost of loading data to TiDB. An example is the TPC-C test, there is an ITEM table. It stores the information of all the goods sold by the sales company, including the name and price of the goods. During the execution of the "order creation" transaction, the data in this table is used to determine the price of the order. Such a table is also a typical scenario that can be optimized.
+
+Last but not least, this feature is a voice from our customers. They meet the hotspot region problem, but they think it's too risky to enable follower read or coprocessor cache which may affect the whole cluster (according to their business model, they provide storage service to their customers, and the cluster is shared, they try to avoid a single change affecting the entire users).
+
+## Detailed Design
+
+If the update operation is not supported at all, the use case would be too limited. So even if we trade writing ability for reading performance, we should at least support updating.
+
+### Key algorithm
+
+There is a conceptual "read-write" lock to implement the caching operation.
+
+Before caching the data, the table needs to hold a "read lock", so that the data will not be modified afterward. This "read lock" is a record providing some meta-information. All modifications must check the lock meta-information first. If the record is in the read-locked state, modifications are forbidden.
+
+The "read" lock needs to maintain a lease for a while, and the lease should be renewed continuously. The underlying data will be safe as long as the read lock lease hold.
+
+When performing a modification, the meta-information should be checked first. If there is a "read" lock, the "read" lock needs to be upgraded to a "write intend" lock. This step prevents the read lock from renewing the lease. After the lease expires, the meta status is change to "write" lock, write operation can be performed afterward. And after the write operation finishes, the "write" lock should be cleared, so that the subsequent "read" lock can be added and the data can be cached again.
+
+The "write" lock also needs a TTL(time to live) to handle abnormal situations. For example, if the TiDB server crashes after the write lock succeeds, TTL gives us a hint of how to clean the orphan write lock.
+
+Put all things together, we have this interface:
+
+```
+type StateRemote interface {
+ Load()
+ LockForRead()
+ LockForWrite()
+ RenewLease()
+ WriteAndUnlock()
+}
+```
+
+### Management of lock meta information
+
+What’s the granularity of the lock? One option is to implement the meta lock at the key-value level. The advantage is that the granularity is finer and the updating performance is better. If we cache the key-value record, modifying one key would not affect the cache of the other keys. The disadvantage is that this will invade the current transaction layer implementation, and make our transaction more complex. Considering the current implementation of the transaction is very complex already, it is not wise to do so.
+
+The other option is to implement the meta lock at the table level. Each table corresponds to one meta lock record. As long as any key of the table is modified, the table cache becomes invalid. Assuming that we only deal with small tables, and this table is updated infrequently, this option should be acceptable.
+
+For updating operation, the `StateRemote` interface needs to be implemented. The following part of this section describes the management of lock meta information, where it should be stored, and how it is used.
+
+Such a table could be used to store the meta-information:
+
+```
+CREATE TABLE mysql.table_cache_meta (
+ tid int primary key,
+ lock_type enum('NONE','READ','INTEND','WRITE'),
+ lease uint64,
+ oldReadLease uint64,
+);
+```
+
+This table contains the `tid`, `lock_type` and `lease` information.
+
+- `tid` records the ID of the table being cached
+- `lock_type` decides whether the current state is read locked or write locked etc
+- `lease` is used for the read locks lease renewal, and also for cleanup the write locks
+- `oldReadLease` is necessary when the `lock_type` is 'INTEND', it stores the old read lease
+
+LockForRead() corresponds to this operation:
+
+```
+update table_cache_meta set lock ='READ', lease = %? where tid = %? and lock !='WRITE'
+```
+
+RenewLease() corresponds to this operation:
+
+```
+update table_cache_meta set lease = %? where tid = %? and lock ='READ'
+```
+
+LockForWrite() corresponds to this operation:
+
+```
+update table_cache_meta set lock ='INTEND' and oldLease = %? and lease = %?' where tid = %?
+sleep() // wait read lease gone
+update table_cache_meta set lock ='WRITE' and lease = %?' where tid = %?
+```
+
+### Caching and writing process
+
+The way to store the in-memory cached of the table data is the same with the [temporary table](2021-04-20-temporary-table.md).
+In short, the KV data of the table is loaded from TiKV, and is stored in a MemBuffer. To read that data, an extra UnionScan executor is used.
+
+
+A `CachedTable` struct will be introduced. It inherits and overloads the `Table` interface. `loadSchema` will periodically load the table information on schema change. For a cached table, the `CachedTable` will be reconstructed.
+
+`CachedTable` implements the `Table` interface, and overloads the `AddRecord` method. When `AddRecord` is called, the lock meta information needs to be checked first. This operation needs to acquire the "write" lock, that is to say, make the `lock_type` field of the meta table to 'WRITE'. After confirming that the lease time of the previous 'READ' lock has expired, the operation can be executed.
+
+The read operation flow diagram:
+
+![](./imgs/cached-table-read.png)
+
+
+The write operation flow diagram:
+
+![](./imgs/cached-table-write.png)
+
+
+### How to enable this feature
+
+The syntax of OceanBase's replicated table is to add the `DUPLICATE_SCOPE` option after the CREATE TABLE statement.
+
+I propose to use "ALTER TABLE t [CACHE|NOCACHE]" for the syntax. This is a switch that can be turned on or off. A user can decide to use it by themself. The write performance will become very bad after caching.
+
+"ALTER TABLE t CACHE" is a DDL operation. Assuming that all the TiDB instances know that a table is not cached, there is no correctness problem; Assuming that all tidb instances make a consensus that a table is cached, the correctness is guaranteed by the locking algorithm mentioned before. However, during the DDL operation, if some TiDB instances think that the cache is enabled, while some other instances think that the cache is not enabled, there would be a correctness issue.
+
+To address that problem, an intermediate `Switching` state is introduced, the schema change process is similar to the LOCK TABLE statement's implementation:
+
+> Disabled => Switching => Enabled
+
+- In the Disabled state, all TiDB instances know that there is no cache for the table, it can be read and written normally.
+- In the Enabled state, all TiDB instances know that the table is cached, and the "lock" meta information should be checked before reading and writing.
+- In the switching state, all read operation goes to the original table; and write operation needs to check the "write lock" first.
+
+The cluster may have Disabled and Switching, or Switching and Enabled on different TiDB instances at the same time, but Disabled and Enabled will not coexist at the same time.
+
+### Compatibility
+
+The entry of the feature is the 'ALTER TABLE' statement. If the user does not explicitly call it, there will be no impact.
+
+Metadata management will introduce persistent data. The old version TiDB does not access or use the meta-data, so there should not be any problem when upgrading or downgrading.
+
+It is unsafe to mix the old and new versions of TiDB when using cached tables. Rolling updates are safe, but there is a risk that if an old version of a TiDB server re-joins after a table is in cached 'Enabled' state it could modify the contents in an unsafe manner. Currently _downgrade_ is not technically supported by TiDB, but there is no technical mechanism which prevents an older versioned binary joining a newer versioned cluster. We need to solve this issue to perform meta-data upgrades, but it is currently blocked in the requirements phase, because we have not decided from which versions upgrade/downgrade will be supported.
+
+## Investigation & Alternatives
+
+### OceanBase's replicated table
+
+> "In order to cope with the high and low frequency of application access and the low-frequency access to small tables that can always access the latest data, and at the same time to ensure data consistency, currently we can only choose the solution of strong consistent read access to Leader data. However, due to the high access frequency, Leader is easy to become a performance bottleneck. In order to solve the problem of "small table broadcast" demand scenario, OceanBase 2.x version combines its own architecture to provide a copy table function, copying the copy of the related small table to all the OBServers of the tenant of the table. The table says to replicate the table, these copies are called replicas. The update transaction of the replicated table guarantees that the data is synchronized to all full-function replicas and replicas when committing, ensuring that the update transaction can be read on any OBServer of the tenant after the successful commit of the update transaction. Transaction modified data."
+
+Refer to https://zhuanlan.zhihu.com/p/78402011
+
+The biggest challenge of copying data to multiple machines is that the modification of data must take effect on all machines in real time, otherwise some machines read the new data and the other part reads the old data. This must be a function that does not meet the needs of users. OceanBase uses a special broadcast protocol to ensure the copy consistency of the replicated table. When the replicated table is modified, all the copies will be modified at the same time. Moreover, the modification operation will only take effect when the copies on all machines are modified successfully.
+
+In a distributed system, another thorny problem will arise again. If a machine fails during the process of modifying the replicated table, and the replica of the replicated table on the failed machine cannot be modified, then the replicated table can no longer be modified? If this problem is not solved, the replication table will drag down the user's operation in the event of a machine failure. Ocean Base uses a lease mechanism to solve this problem. The valid copy of the replicated table on each machine will get a lease. When the replicated table is modified, the modification must be synchronized to all leased replicas. In the event of a machine failure, the lease of the replicated table copy on the failed machine becomes invalid, and the invalidated copy will not be synchronized with new modifications, so subsequent modification operations of the replicated table will not be blocked. The invalid copy will also refuse the read operation to ensure that the old data will not be read. When the invalid copy is restored, the missing data can be chased up, and the lease will be re-granted after it catches up to the latest state."
+Reference https://developer.aliyun.com/article/750271
+
+They are optimized for join's small table broadcast. The agreement was successfully made from the majority to all copies, and write performance was sacrificed in exchange for read performance and consistency. At the implementation level, if you look at it from another perspective, OB is a table-level synchronization, which is equivalent to adding a learner role to the raft layer as a copy table.
+
+### Reference Tables of SingleStore (formerly MemSQL)
+
+> "Reference tables are relatively small tables that do not need to be distributed and are present on every node in the cluster. Reference tables are implemented via primary-secondary replication to every node in the cluster from the master aggregator."
+
+Reference https://docs.singlestore.com/v7.3/key-concepts-and-features/physical-schema-design/other-schema-concepts/reference-tables/
+
+### Collocated tables of YugaByteDB
+
+They put related tables on the same node, which facilitates operations such as joins to reduce network-level overhead. These tables can be cut to multiple tablets, but Colocated tables are deliberately not cut. It is not a concept of caching, but it is also a scenario where the broadcast of join small tables can be optimized.
+
+> "The assumptions behind tables that are collocated is that their data need not be automatically shared and distributed across nodes"
+
+Reference https://docs.yugabyte.com/latest/architecture/docdb-sharding/colocated-tables/
+https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/ysql-colocated-tables.md
+
+### Oracle also has a similar feature
+
+https://logicalread.com/oracle-11g-caching-table-in-memory-mc02/#.YFvijK_7QuU
+
diff --git a/docs/design/2021-04-26-lock-view.md b/docs/design/2021-04-26-lock-view.md
index 3db4cb6dd8666..09b6e8a114854 100644
--- a/docs/design/2021-04-26-lock-view.md
+++ b/docs/design/2021-04-26-lock-view.md
@@ -220,11 +220,11 @@ Default: 10000
#### TiDB Config File `transaction-summary.transaction-id-digest-min-duration`
-Specifies how long a transaction should be executed to make it be recorded in `transaction_id_digest`.
+Specifies how long a transaction should be executed to make it be recorded in `transaction_id_digest` and considered when calculating `trx_summary`.
Dynamically changeable via HTTP API.
-Value: 0 to 60000
+Value: 0 to 2147483647
Unit: ms
@@ -232,7 +232,7 @@ Default: 1000
#### TiDB Config File `transaction-summary.transaction-summary-capacity`
-Specifies how many transaction summary in `transaction_summary` each TiDB node should keep.
+Specifies how many transaction summary in `trx_summary` each TiDB node should keep.
Dynamically changeable via HTTP API.
diff --git a/docs/design/imgs/cached-table-read.png b/docs/design/imgs/cached-table-read.png
new file mode 100644
index 0000000000000..d78309d71510d
Binary files /dev/null and b/docs/design/imgs/cached-table-read.png differ
diff --git a/docs/design/imgs/cached-table-write.png b/docs/design/imgs/cached-table-write.png
new file mode 100644
index 0000000000000..2df7445042135
Binary files /dev/null and b/docs/design/imgs/cached-table-write.png differ
diff --git a/docs/tidb_http_api.md b/docs/tidb_http_api.md
index c9d63f5ea4e6b..e820b23324fc2 100644
--- a/docs/tidb_http_api.md
+++ b/docs/tidb_http_api.md
@@ -557,3 +557,30 @@ timezone.*
# reset the size of the ballast object (2GB in this example)
curl -v -X POST -d "2147483648" http://{TiDBIP}:10080/debug/ballast-object-sz
```
+
+
+1. Set deadlock history table capacity
+
+ ```shell
+ curl -X POST -d "deadlock_history_capacity={number}" http://{TiDBIP}:10080/settings
+ ```
+
+1. Set whether deadlock history (`DEADLOCKS`) collect retryable deadlocks
+
+ ```shell
+ curl -X POST -d "deadlock_history_collect_retryable={bool_val}" http://{TiDBIP}:10080/settings
+ ```
+
+1. Set transaction_id to digest mapping minimum duration threshold, only transactions which last longer than this threshold will be collected into `TRX_SUMMARY`.
+
+ ```shell
+ curl -X POST -d "transaction_id_digest_min_duration={number}" http://{TiDBIP}:10080/settings
+ ```
+
+ Unit of duration here is ms.
+
+1. Set transaction summary table (`TRX_SUMMARY`) capacity
+
+ ```shell
+ curl -X POST -d "transaction_summary_capacity={number}" http://{TiDBIP}:10080/settings
+ ```
diff --git a/dumpling/README.md b/dumpling/README.md
index 00715370731c6..d90afa808cbf9 100644
--- a/dumpling/README.md
+++ b/dumpling/README.md
@@ -6,7 +6,6 @@
[![API Docs](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white)](https://pkg.go.dev/github.com/pingcap/dumpling)
[![Go Report Card](https://goreportcard.com/badge/github.com/pingcap/dumpling)](https://goreportcard.com/report/github.com/pingcap/dumpling)
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fpingcap%2Fdumpling.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fpingcap%2Fdumpling?ref=badge_shield)
-[![Discuss in Slack](https://img.shields.io/badge/slack-sig--migrate-4A154B?logo=slack)](https://slack.tidb.io/invite?team=tidb-community&channel=sig-migrate&ref=github_sig)
**Dumpling** is a tool and a Go library for creating SQL dump from a MySQL-compatible database.
It is intended to replace `mysqldump` and `mydumper` when targeting TiDB.
@@ -24,7 +23,7 @@ Features
- [ ] Write to cloud storage (S3, GCS) natively
- [x] Advanced table filtering
-Any questions? Let's discuss in [#sig-migrate in Slack](https://slack.tidb.io/invite?team=tidb-community&channel=sig-migrate&ref=github_sig)!
+Any questions? Let's discuss on [TiDB Internals forum](https://internals.tidb.io/)!
Building
--------
@@ -51,4 +50,4 @@ License
Dumpling is under the Apache 2.0 license. See the [LICENSE](./LICENSE) file for details.
-[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fpingcap%2Fdumpling.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fpingcap%2Fdumpling?ref=badge_large)
\ No newline at end of file
+[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fpingcap%2Fdumpling.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fpingcap%2Fdumpling?ref=badge_large)
diff --git a/dumpling/export/dump.go b/dumpling/export/dump.go
index 7f27759211a44..43d801430e6e8 100755
--- a/dumpling/export/dump.go
+++ b/dumpling/export/dump.go
@@ -81,10 +81,17 @@ func NewDumper(ctx context.Context, conf *Config) (*Dumper, error) {
selectTiDBTableRegionFunc: selectTiDBTableRegion,
}
+ var err error
+
d.metrics = newMetrics(conf.PromFactory, conf.Labels)
d.metrics.registerTo(conf.PromRegistry)
+ defer func() {
+ if err != nil {
+ d.metrics.unregisterFrom(conf.PromRegistry)
+ }
+ }()
- err := adjustConfig(conf,
+ err = adjustConfig(conf,
registerTLSConfig,
validateSpecifiedSQL,
adjustFileFormat)
diff --git a/dumpling/export/dump_test.go b/dumpling/export/dump_test.go
index 204826f8e7716..b059a1ae28ac2 100644
--- a/dumpling/export/dump_test.go
+++ b/dumpling/export/dump_test.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/DATA-DOG/go-sqlmock"
+ "github.com/pingcap/tidb/util/promutil"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
@@ -207,5 +208,20 @@ func TestAdjustTableCollation(t *testing.T) {
require.NoError(t, err)
require.Equal(t, expectedSQLs[i], newSQL)
}
+}
+
+func TestUnregisterMetrics(t *testing.T) {
+ ctx := context.Background()
+ conf := &Config{
+ SQL: "not empty",
+ Where: "not empty",
+ PromFactory: promutil.NewDefaultFactory(),
+ PromRegistry: promutil.NewDefaultRegistry(),
+ }
+ _, err := NewDumper(ctx, conf)
+ require.Error(t, err)
+ _, err = NewDumper(ctx, conf)
+ // should not panic
+ require.Error(t, err)
}
diff --git a/executor/analyze_test.go b/executor/analyze_test.go
index 32c2ec30bbd14..055591b8a9a7c 100644
--- a/executor/analyze_test.go
+++ b/executor/analyze_test.go
@@ -80,7 +80,7 @@ func TestAnalyzeFastSample(t *testing.T) {
}
handleCols := core.BuildHandleColsForAnalyze(tk.Session(), tblInfo, true, nil)
- var colsInfo []*model.ColumnInfo // nolint: prealloc
+ var colsInfo []*model.ColumnInfo //nolint: prealloc
var indicesInfo []*model.IndexInfo
for _, col := range tblInfo.Columns {
if mysql.HasPriKeyFlag(col.GetFlag()) {
diff --git a/executor/analyzetest/analyze_test.go b/executor/analyzetest/analyze_test.go
index aa2c586bb46ae..b37376132ec5b 100644
--- a/executor/analyzetest/analyze_test.go
+++ b/executor/analyzetest/analyze_test.go
@@ -732,6 +732,9 @@ func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domai
tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0"))
// Test analyze incremental with feedback.
+ // paging is not compatible with feedback.
+ tk.MustExec("set @@tidb_enable_paging = off")
+
tk.MustExec("insert into t values (3,3)")
oriProbability := statistics.FeedbackProbability.Load()
oriMinLogCount := handle.MinLogScanCount.Load()
diff --git a/executor/builder.go b/executor/builder.go
index d86200b573058..8a44c09aaf033 100644
--- a/executor/builder.go
+++ b/executor/builder.go
@@ -1768,7 +1768,9 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo
strings.ToLower(infoschema.TableClientErrorsSummaryByUser),
strings.ToLower(infoschema.TableClientErrorsSummaryByHost),
strings.ToLower(infoschema.TableAttributes),
- strings.ToLower(infoschema.TablePlacementPolicies):
+ strings.ToLower(infoschema.TablePlacementPolicies),
+ strings.ToLower(infoschema.TableTrxSummary),
+ strings.ToLower(infoschema.ClusterTableTrxSummary):
return &MemTableReaderExec{
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
table: v.Table,
@@ -2435,6 +2437,15 @@ func (b *executorBuilder) buildAnalyzeSamplingPushdown(task plannercore.AnalyzeC
}
// getAdjustedSampleRate calculate the sample rate by the table size. If we cannot get the table size. We use the 0.001 as the default sample rate.
+// From the paper "Random sampling for histogram construction: how much is enough?"'s Corollary 1 to Theorem 5,
+// for a table size n, histogram size k, maximum relative error in bin size f, and error probability gamma,
+// the minimum random sample size is
+// r = 4 * k * ln(2*n/gamma) / f^2
+// If we take f = 0.5, gamma = 0.01, n =1e6, we would got r = 305.82* k.
+// Since the there's log function over the table size n, the r grows slowly when the n increases.
+// If we take n = 1e12, a 300*k sample still gives <= 0.66 bin size error with probability 0.99.
+// So if we don't consider the top-n values, we can keep the sample size at 300*256.
+// But we may take some top-n before building the histogram, so we increase the sample a little.
func (b *executorBuilder) getAdjustedSampleRate(sctx sessionctx.Context, task plannercore.AnalyzeColumnsTask) float64 {
statsHandle := domain.GetDomain(sctx).StatsHandle()
defaultRate := 0.001
@@ -4099,6 +4110,7 @@ func (h kvRangeBuilderFromRangeAndPartition) buildKeyRangeSeparately(ranges []*r
}
func (h kvRangeBuilderFromRangeAndPartition) buildKeyRange(ranges []*ranger.Range) ([]kv.KeyRange, error) {
+ //nolint: prealloc
var ret []kv.KeyRange
for _, p := range h.partitions {
pid := p.GetPhysicalID()
@@ -4509,6 +4521,7 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE
concurrency: v.Concurrency,
}
+ // 1. initialize the splitters
splitters := make([]partitionSplitter, len(v.ByItemArrays))
switch v.SplitterType {
case plannercore.PartitionHashSplitterType:
@@ -4524,6 +4537,7 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE
}
shuffle.splitters = splitters
+ // 2. initialize the data sources (build the data sources from physical plan to executors)
shuffle.dataSources = make([]Executor, len(v.DataSources))
for i, dataSource := range v.DataSources {
shuffle.dataSources[i] = b.build(dataSource)
@@ -4532,13 +4546,24 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE
}
}
+ // 3. initialize the workers
head := v.Children()[0]
+ // A `PhysicalShuffleReceiverStub` for every worker have the same `DataSource` but different `Receiver`.
+ // We preallocate `PhysicalShuffleReceiverStub`s here and reuse them below.
+ stubs := make([]*plannercore.PhysicalShuffleReceiverStub, 0, len(v.DataSources))
+ for _, dataSource := range v.DataSources {
+ stub := plannercore.PhysicalShuffleReceiverStub{
+ DataSource: dataSource,
+ }.Init(b.ctx, dataSource.Stats(), dataSource.SelectBlockOffset(), nil)
+ stub.SetSchema(dataSource.Schema())
+ stubs = append(stubs, stub)
+ }
shuffle.workers = make([]*shuffleWorker, shuffle.concurrency)
for i := range shuffle.workers {
receivers := make([]*shuffleReceiver, len(v.DataSources))
for j, dataSource := range v.DataSources {
receivers[j] = &shuffleReceiver{
- baseExecutor: newBaseExecutor(b.ctx, dataSource.Schema(), dataSource.ID()),
+ baseExecutor: newBaseExecutor(b.ctx, dataSource.Schema(), stubs[j].ID()),
}
}
@@ -4546,12 +4571,9 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE
receivers: receivers,
}
- for j, dataSource := range v.DataSources {
- stub := plannercore.PhysicalShuffleReceiverStub{
- Receiver: (unsafe.Pointer)(receivers[j]),
- DataSource: dataSource,
- }.Init(b.ctx, dataSource.Stats(), dataSource.SelectBlockOffset(), nil)
- stub.SetSchema(dataSource.Schema())
+ for j := range v.DataSources {
+ stub := stubs[j]
+ stub.Receiver = (unsafe.Pointer)(receivers[j])
v.Tails[j].SetChildren(stub)
}
diff --git a/executor/delete.go b/executor/delete.go
index d72eae827d9e2..5e759e12db25c 100644
--- a/executor/delete.go
+++ b/executor/delete.go
@@ -158,20 +158,26 @@ func (e *DeleteExec) doBatchDelete(ctx context.Context) error {
}
func (e *DeleteExec) composeTblRowMap(tblRowMap tableRowMapType, colPosInfos []plannercore.TblColPosInfo, joinedRow []types.Datum) error {
- // iterate all the joined tables, and got the copresonding rows in joinedRow.
+ // iterate all the joined tables, and got the corresponding rows in joinedRow.
for _, info := range colPosInfos {
if unmatchedOuterRow(info, joinedRow) {
continue
}
if tblRowMap[info.TblID] == nil {
- tblRowMap[info.TblID] = kv.NewHandleMap()
+ tblRowMap[info.TblID] = kv.NewMemAwareHandleMap[[]types.Datum]()
}
handle, err := info.HandleCols.BuildHandleByDatums(joinedRow)
if err != nil {
return err
}
// tblRowMap[info.TblID][handle] hold the row datas binding to this table and this handle.
- tblRowMap[info.TblID].Set(handle, joinedRow[info.Start:info.End])
+ _, exist := tblRowMap[info.TblID].Get(handle)
+ memDelta := tblRowMap[info.TblID].Set(handle, joinedRow[info.Start:info.End])
+ if !exist {
+ memDelta += types.EstimatedMemUsage(joinedRow, 1)
+ memDelta += int64(handle.ExtraMemSize())
+ }
+ e.memTracker.Consume(memDelta)
}
return nil
}
@@ -240,6 +246,7 @@ func (e *DeleteExec) removeRow(ctx sessionctx.Context, t table.Table, h kv.Handl
// Close implements the Executor Close interface.
func (e *DeleteExec) Close() error {
+ defer e.memTracker.ReplaceBytesUsed(0)
return e.children[0].Close()
}
@@ -254,4 +261,4 @@ func (e *DeleteExec) Open(ctx context.Context) error {
// tableRowMapType is a map for unique (Table, Row) pair. key is the tableID.
// the key in map[int64]Row is the joined table handle, which represent a unique reference row.
// the value in map[int64]Row is the deleting row.
-type tableRowMapType map[int64]*kv.HandleMap
+type tableRowMapType map[int64]*kv.MemAwareHandleMap[[]types.Datum]
diff --git a/executor/explain_test.go b/executor/explain_test.go
index 01cd918d83a9e..9d0545fb5a6b0 100644
--- a/executor/explain_test.go
+++ b/executor/explain_test.go
@@ -310,6 +310,7 @@ func TestCheckActRowsWithUnistore(t *testing.T) {
tk.MustExec("create table t_unistore_act_rows(a int, b int, index(a, b))")
tk.MustExec("insert into t_unistore_act_rows values (1, 0), (1, 0), (2, 0), (2, 1)")
tk.MustExec("analyze table t_unistore_act_rows")
+ tk.MustExec("set @@tidb_merge_join_concurrency= 5;")
type testStruct struct {
sql string
@@ -353,6 +354,14 @@ func TestCheckActRowsWithUnistore(t *testing.T) {
sql: "with cte(a) as (select a from t_unistore_act_rows) select (select 1 from cte limit 1) from cte;",
expected: []string{"4", "4", "4", "4", "4"},
},
+ {
+ sql: "select a, row_number() over (partition by b) from t_unistore_act_rows;",
+ expected: []string{"4", "4", "4", "4", "4", "4", "4"},
+ },
+ {
+ sql: "select /*+ merge_join(t1, t2) */ * from t_unistore_act_rows t1 join t_unistore_act_rows t2 on t1.b = t2.b;",
+ expected: []string{"10", "10", "4", "4", "4", "4", "4", "4", "4", "4", "4", "4"},
+ },
}
// Default RPC encoding may cause statistics explain result differ and then the test unstable.
diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go
index b26687757942a..7f7766eda2b23 100644
--- a/executor/infoschema_reader.go
+++ b/executor/infoschema_reader.go
@@ -168,6 +168,10 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex
err = e.setDataForAttributes(sctx, is)
case infoschema.TablePlacementPolicies:
err = e.setDataFromPlacementPolicies(sctx)
+ case infoschema.TableTrxSummary:
+ err = e.setDataForTrxSummary(sctx)
+ case infoschema.ClusterTableTrxSummary:
+ err = e.setDataForClusterTrxSummary(sctx)
}
if err != nil {
return nil, err
@@ -2181,6 +2185,29 @@ func (e *memtableRetriever) setDataForClientErrorsSummary(ctx sessionctx.Context
return nil
}
+func (e *memtableRetriever) setDataForTrxSummary(ctx sessionctx.Context) error {
+ hasProcessPriv := hasPriv(ctx, mysql.ProcessPriv)
+ if !hasProcessPriv {
+ return nil
+ }
+ rows := txninfo.Recorder.DumpTrxSummary()
+ e.rows = rows
+ return nil
+}
+
+func (e *memtableRetriever) setDataForClusterTrxSummary(ctx sessionctx.Context) error {
+ err := e.setDataForTrxSummary(ctx)
+ if err != nil {
+ return err
+ }
+ rows, err := infoschema.AppendHostInfoToRows(ctx, e.rows)
+ if err != nil {
+ return err
+ }
+ e.rows = rows
+ return nil
+}
+
type stmtSummaryTableRetriever struct {
dummyCloser
table *model.TableInfo
@@ -2844,7 +2871,7 @@ func (e *TiFlashSystemTableRetriever) initialize(sctx sessionctx.Context, tiflas
}
func (e *TiFlashSystemTableRetriever) dataForTiFlashSystemTables(ctx sessionctx.Context, tidbDatabases string, tidbTables string) ([][]types.Datum, error) {
- var columnNames []string // nolint: prealloc
+ var columnNames []string //nolint: prealloc
for _, c := range e.outputCols {
if c.Name.O == "TIFLASH_INSTANCE" {
continue
diff --git a/executor/inspection_result.go b/executor/inspection_result.go
index debcf723a3e64..741508c5cf88f 100644
--- a/executor/inspection_result.go
+++ b/executor/inspection_result.go
@@ -727,6 +727,7 @@ func (c thresholdCheckInspection) inspect(ctx context.Context, sctx sessionctx.C
c.inspectThreshold3,
c.inspectForLeaderDrop,
}
+ //nolint: prealloc
var results []inspectionResult
for _, inspect := range inspects {
re := inspect(ctx, sctx, filter)
diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go
index 64729e3705dcb..080a7d5d83a4d 100644
--- a/executor/memtable_reader.go
+++ b/executor/memtable_reader.go
@@ -178,7 +178,7 @@ func fetchClusterConfig(sctx sessionctx.Context, nodeTypes, nodeAddrs set.String
return nil, err
}
serversInfo = filterClusterServerInfo(serversInfo, nodeTypes, nodeAddrs)
-
+ //nolint: prealloc
var finalRows [][]types.Datum
wg := sync.WaitGroup{}
ch := make(chan result, len(serversInfo))
@@ -271,7 +271,7 @@ func fetchClusterConfig(sctx sessionctx.Context, nodeTypes, nodeAddrs set.String
close(ch)
// Keep the original order to make the result more stable
- var results []result // nolint: prealloc
+ var results []result //nolint: prealloc
for result := range ch {
if result.err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(result.err)
@@ -349,7 +349,7 @@ func (e *clusterServerInfoRetriever) retrieve(ctx context.Context, sctx sessionc
wg.Wait()
close(ch)
// Keep the original order to make the result more stable
- var results []result // nolint: prealloc
+ var results []result //nolint: prealloc
for result := range ch {
if result.err != nil {
sctx.GetSessionVars().StmtCtx.AppendWarning(result.err)
@@ -565,7 +565,7 @@ func (e *clusterLogRetriever) startRetrieving(
// The retrieve progress may be abort
ctx, e.cancel = context.WithCancel(ctx)
- var results []chan logStreamResult // nolint: prealloc
+ var results []chan logStreamResult //nolint: prealloc
for _, srv := range serversInfo {
typ := srv.ServerType
address := srv.Address
@@ -1070,6 +1070,7 @@ func (e *tikvRegionPeersRetriever) isUnexpectedStoreID(storeID int64, storeMap m
func (e *tikvRegionPeersRetriever) packTiKVRegionPeersRows(
regionsInfo []helper.RegionInfo, storeMap map[int64]struct{}) ([][]types.Datum, error) {
+ //nolint: prealloc
var rows [][]types.Datum
for _, region := range regionsInfo {
records := make([][]types.Datum, 0, len(region.Peers))
diff --git a/executor/point_get_test.go b/executor/point_get_test.go
index e366c99d1e37c..95b2a4dbe9e4a 100644
--- a/executor/point_get_test.go
+++ b/executor/point_get_test.go
@@ -18,7 +18,6 @@ import (
"context"
"fmt"
"strings"
- "sync"
"testing"
"time"
@@ -783,7 +782,6 @@ func TestPointGetLockExistKey(t *testing.T) {
))
}
- var wg sync.WaitGroup
for i, one := range []struct {
rc bool
key string
@@ -793,14 +791,12 @@ func TestPointGetLockExistKey(t *testing.T) {
{rc: true, key: "primary key"},
{rc: true, key: "unique key"},
} {
- wg.Add(1)
+
tableName := fmt.Sprintf("t_%d", i)
- go func(rc bool, key string, tableName string) {
- defer wg.Done()
+ func(rc bool, key string, tableName string) {
testLock(rc, key, tableName)
}(one.rc, one.key, tableName)
}
- wg.Wait()
}
func TestWithTiDBSnapshot(t *testing.T) {
diff --git a/executor/set_test.go b/executor/set_test.go
index eb171e872d8c4..9a7213571fddc 100644
--- a/executor/set_test.go
+++ b/executor/set_test.go
@@ -1021,16 +1021,17 @@ func TestValidateSetVar(t *testing.T) {
result.Check(testkit.Rows("SYSTEM"))
// The following cases test value out of range and illegal type when setting system variables.
- // See https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html for more details.
+ // See https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html for more details.
tk.MustExec("set @@global.max_connections=100001")
tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|1292|Truncated incorrect max_connections value: '100001'"))
result = tk.MustQuery("select @@global.max_connections;")
result.Check(testkit.Rows("100000"))
+ // "max_connections == 0" means there is no limitation on the number of connections.
tk.MustExec("set @@global.max_connections=-1")
tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|1292|Truncated incorrect max_connections value: '-1'"))
result = tk.MustQuery("select @@global.max_connections;")
- result.Check(testkit.Rows("1"))
+ result.Check(testkit.Rows("0"))
err = tk.ExecToErr("set @@global.max_connections='hello'")
require.True(t, terror.ErrorEqual(err, variable.ErrWrongTypeForVar))
@@ -1077,7 +1078,7 @@ func TestValidateSetVar(t *testing.T) {
tk.MustExec("set @@global.max_connections=-1")
tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|1292|Truncated incorrect max_connections value: '-1'"))
result = tk.MustQuery("select @@global.max_connections;")
- result.Check(testkit.Rows("1"))
+ result.Check(testkit.Rows("0"))
err = tk.ExecToErr("set @@global.max_connections='hello'")
require.True(t, terror.ErrorEqual(err, variable.ErrWrongTypeForVar))
@@ -1333,15 +1334,15 @@ func TestSelectGlobalVar(t *testing.T) {
defer clean()
tk := testkit.NewTestKit(t, store)
- tk.MustQuery("select @@global.max_connections;").Check(testkit.Rows("151"))
- tk.MustQuery("select @@max_connections;").Check(testkit.Rows("151"))
+ tk.MustQuery("select @@global.max_connections;").Check(testkit.Rows("0"))
+ tk.MustQuery("select @@max_connections;").Check(testkit.Rows("0"))
tk.MustExec("set @@global.max_connections=100;")
tk.MustQuery("select @@global.max_connections;").Check(testkit.Rows("100"))
tk.MustQuery("select @@max_connections;").Check(testkit.Rows("100"))
- tk.MustExec("set @@global.max_connections=151;")
+ tk.MustExec("set @@global.max_connections=0;")
// test for unknown variable.
err := tk.ExecToErr("select @@invalid")
diff --git a/executor/show.go b/executor/show.go
index 9075444fd53f4..d1424244bdb47 100644
--- a/executor/show.go
+++ b/executor/show.go
@@ -29,6 +29,7 @@ import (
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
+ "github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
@@ -99,6 +100,12 @@ type ShowExec struct {
Extended bool // Used for `show extended columns from ...`
}
+type showTableRegionRowItem struct {
+ regionMeta
+ schedulingConstraints string
+ schedulingState string
+}
+
// Next implements the Executor Next interface.
func (e *ShowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
@@ -219,7 +226,7 @@ func (e *ShowExec) fetchAll(ctx context.Context) error {
case ast.ShowAnalyzeStatus:
return e.fetchShowAnalyzeStatus()
case ast.ShowRegions:
- return e.fetchShowTableRegions()
+ return e.fetchShowTableRegions(ctx)
case ast.ShowBuiltins:
return e.fetchShowBuiltins()
case ast.ShowBackups:
@@ -1818,7 +1825,7 @@ func (e *ShowExec) appendRow(row []interface{}) {
}
}
-func (e *ShowExec) fetchShowTableRegions() error {
+func (e *ShowExec) fetchShowTableRegions(ctx context.Context) error {
store := e.ctx.GetStore()
tikvStore, ok := store.(helper.Storage)
if !ok {
@@ -1858,22 +1865,82 @@ func (e *ShowExec) fetchShowTableRegions() error {
// Get table regions from from pd, not from regionCache, because the region cache maybe outdated.
var regions []regionMeta
if len(e.IndexName.L) != 0 {
+ // show table * index * region
indexInfo := tb.Meta().FindIndexByName(e.IndexName.L)
if indexInfo == nil {
return plannercore.ErrKeyDoesNotExist.GenWithStackByArgs(e.IndexName, tb.Meta().Name)
}
regions, err = getTableIndexRegions(indexInfo, physicalIDs, tikvStore, splitStore)
} else {
+ // show table * region
regions, err = getTableRegions(tb, physicalIDs, tikvStore, splitStore)
}
+ if err != nil {
+ return err
+ }
+ regionRowItem, err := e.fetchSchedulingInfo(ctx, regions, tb.Meta())
if err != nil {
return err
}
- e.fillRegionsToChunk(regions)
+
+ e.fillRegionsToChunk(regionRowItem)
return nil
}
+func (e *ShowExec) fetchSchedulingInfo(ctx context.Context, regions []regionMeta, tbInfo *model.TableInfo) ([]showTableRegionRowItem, error) {
+ scheduleState := make(map[int64]infosync.PlacementScheduleState)
+ schedulingConstraints := make(map[int64]*model.PlacementSettings)
+ regionRowItem := make([]showTableRegionRowItem, 0)
+ tblPlacement, err := e.getTablePlacement(tbInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ if tbInfo.GetPartitionInfo() != nil {
+ // partitioned table
+ for _, part := range tbInfo.GetPartitionInfo().Definitions {
+ _, err = fetchScheduleState(ctx, scheduleState, part.ID)
+ if err != nil {
+ return nil, err
+ }
+ placement, err := e.getPolicyPlacement(part.PlacementPolicyRef)
+ if err != nil {
+ return nil, err
+ }
+ if placement == nil {
+ schedulingConstraints[part.ID] = tblPlacement
+ } else {
+ schedulingConstraints[part.ID] = placement
+ }
+ }
+ } else {
+ // un-partitioned table or index
+ schedulingConstraints[tbInfo.ID] = tblPlacement
+ _, err = fetchScheduleState(ctx, scheduleState, tbInfo.ID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ var constraintStr string
+ var scheduleStateStr string
+ for i := range regions {
+ if constraint, ok := schedulingConstraints[regions[i].physicalID]; ok && constraint != nil {
+ constraintStr = constraint.String()
+ scheduleStateStr = scheduleState[regions[i].physicalID].String()
+ } else {
+ constraintStr = ""
+ scheduleStateStr = ""
+ }
+ regionRowItem = append(regionRowItem, showTableRegionRowItem{
+ regionMeta: regions[i],
+ schedulingConstraints: constraintStr,
+ schedulingState: scheduleStateStr,
+ })
+ }
+ return regionRowItem, nil
+}
+
func getTableRegions(tb table.Table, physicalIDs []int64, tikvStore helper.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) {
regions := make([]regionMeta, 0, len(physicalIDs))
uniqueRegionMap := make(map[uint64]struct{})
@@ -1900,7 +1967,7 @@ func getTableIndexRegions(indexInfo *model.IndexInfo, physicalIDs []int64, tikvS
return regions, nil
}
-func (e *ShowExec) fillRegionsToChunk(regions []regionMeta) {
+func (e *ShowExec) fillRegionsToChunk(regions []showTableRegionRowItem) {
for i := range regions {
e.result.AppendUint64(0, regions[i].region.Id)
e.result.AppendString(1, regions[i].start)
@@ -1926,6 +1993,8 @@ func (e *ShowExec) fillRegionsToChunk(regions []regionMeta) {
e.result.AppendUint64(8, regions[i].readBytes)
e.result.AppendInt64(9, regions[i].approximateSize)
e.result.AppendInt64(10, regions[i].approximateKeys)
+ e.result.AppendString(11, regions[i].schedulingConstraints)
+ e.result.AppendString(12, regions[i].schedulingState)
}
}
diff --git a/executor/split.go b/executor/split.go
index e31f894849685..52dba35747c43 100644
--- a/executor/split.go
+++ b/executor/split.go
@@ -620,6 +620,9 @@ type regionMeta struct {
readBytes uint64
approximateSize int64
approximateKeys int64
+
+ // this is for propagating scheduling info for this region
+ physicalID int64
}
func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) {
@@ -784,12 +787,15 @@ func getRegionMeta(tikvStore helper.Storage, regionMetas []*tikv.Region, uniqueR
continue
}
uniqueRegionMap[r.GetID()] = struct{}{}
- regions = append(regions, regionMeta{
- region: r.GetMeta(),
- leaderID: r.GetLeaderPeerID(),
- storeID: r.GetLeaderStoreID(),
- })
+ regions = append(regions,
+ regionMeta{
+ region: r.GetMeta(),
+ leaderID: r.GetLeaderPeerID(),
+ storeID: r.GetLeaderStoreID(),
+ physicalID: physicalTableID,
+ })
}
+
regions, err := getRegionInfo(tikvStore, regions)
if err != nil {
return regions, err
diff --git a/executor/splittest/split_table_test.go b/executor/splittest/split_table_test.go
index d578b53638c5c..9f5ceec8a01ea 100644
--- a/executor/splittest/split_table_test.go
+++ b/executor/splittest/split_table_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"github.com/pingcap/tidb/ddl"
+ "github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
@@ -266,7 +267,7 @@ func TestShowTableRegion(t *testing.T) {
// 4 regions to store record data.
// 1 region to store index data.
require.Len(t, rows, 5)
- require.Len(t, rows[0], 11)
+ require.Len(t, rows[0], 13)
tbl := external.GetTableByName(t, tk, "test", "t_regions")
// Check the region start key.
require.Equal(t, fmt.Sprintf("t_%d_r", tbl.Meta().ID), rows[0][1])
@@ -274,6 +275,11 @@ func TestShowTableRegion(t *testing.T) {
require.Equal(t, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID), rows[2][1])
require.Equal(t, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID), rows[3][1])
require.Equal(t, fmt.Sprintf("t_%d_r", tbl.Meta().ID), rows[4][2])
+ // Check scheduling constraint and scheduling state default value
+ for i := range rows {
+ require.Equal(t, "", rows[i][11])
+ require.Equal(t, "", rows[i][12])
+ }
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx between (-1000) and (1000) regions 4;`).Check(testkit.Rows("4 1"))
@@ -281,11 +287,17 @@ func TestShowTableRegion(t *testing.T) {
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
require.Len(t, rows, 4)
+ require.Len(t, rows[0], 13)
// Check the region start key.
require.Regexp(t, fmt.Sprintf("t_%d.*", tbl.Meta().ID), rows[0][1])
require.Regexp(t, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID), rows[1][1])
require.Regexp(t, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID), rows[2][1])
require.Regexp(t, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID), rows[3][1])
+ // Check scheduling constraint and scheduling state default value
+ for i := range rows {
+ require.Equal(t, "", rows[i][11])
+ require.Equal(t, "", rows[i][12])
+ }
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
@@ -593,4 +605,113 @@ func TestShowTableRegion(t *testing.T) {
// Test show table partition region on non-partition table.
err = tk.QueryToErr("show table t partition (p3,p4) index idx regions")
require.True(t, terror.ErrorEqual(err, plannercore.ErrPartitionClauseOnNonpartitioned))
+
+ // Test scheduling info for un-partitioned table with placement policy
+ tk.MustExec("drop table if exists t1_scheduling")
+ tk.MustExec("drop placement policy if exists pa1")
+ tk.MustExec("create placement policy p1 " +
+ "PRIMARY_REGION=\"cn-east-1\" " +
+ "REGIONS=\"cn-east-1,cn-east-2\"" +
+ "SCHEDULE=\"EVEN\"")
+ tk.MustExec("create table t1_scheduling (id int) placement policy p1")
+ re = tk.MustQuery("show table t1_scheduling regions")
+ rows = re.Rows()
+ require.Len(t, rows, 1)
+ require.Len(t, rows[0], 13)
+ tbl = external.GetTableByName(t, tk, "test", "t1_scheduling")
+ require.Equal(t, fmt.Sprintf("t_%d_", tbl.Meta().ID), rows[0][1])
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[0][11])
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[0][12])
+
+ // Test scheduling info for partitioned table with placement policy
+ tk.MustExec("drop table if exists t2_scheduling")
+ tk.MustExec("drop placement policy if exists p2")
+ tk.MustExec("create placement policy p2 " +
+ "LEADER_CONSTRAINTS=\"[+region=us-east-1]\" " +
+ "FOLLOWER_CONSTRAINTS=\"[+region=us-east-2]\" " +
+ "FOLLOWERS=3")
+ tk.MustExec("create table t2_scheduling (id INT) placement policy p1 partition by range (id) (" +
+ "partition p0 values less than (100) placement policy p2," +
+ "partition p1 values less than (1000)," +
+ "partition p2 values less than (10000)" +
+ ")")
+ re = tk.MustQuery("show table t2_scheduling regions")
+ rows = re.Rows()
+ require.Len(t, rows, 3)
+ require.Len(t, rows[0], 13)
+ tbl = external.GetTableByName(t, tk, "test", "t2_scheduling")
+ require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=3 FOLLOWER_CONSTRAINTS=\"[+region=us-east-2]\"", rows[0][11])
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[1][11])
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[2][11])
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[0][12])
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[1][12])
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[2][12])
+
+ // Test scheduling info for partitioned table after split to regions
+ tk.MustExec("drop table if exists t3_scheduling")
+ tk.MustExec("create table t3_scheduling (id INT) placement policy p1 partition by range (id) (" +
+ "partition p0 values less than (100) placement policy p2," +
+ "partition p1 values less than (1000)," +
+ "partition p2 values less than (10000)" +
+ ")")
+ tk.MustQuery("split partition table t3_scheduling between (0) and (10000) regions 4")
+ re = tk.MustQuery("show table t3_scheduling regions")
+ rows = re.Rows()
+ require.Len(t, rows, 12)
+ require.Len(t, rows[0], 13)
+ for i := range rows {
+ if i < 4 {
+ require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=3 FOLLOWER_CONSTRAINTS=\"[+region=us-east-2]\"", rows[i][11])
+ } else {
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[i][11])
+ }
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[i][12])
+ }
+
+ // Test scheduling info for un-partitioned table after split index to regions
+ tk.MustExec("drop table if exists t4_scheduling")
+ tk.MustExec("create table t4_scheduling (id INT, val INT, index idx1(val)) placement policy p1")
+ tk.MustQuery("split table t4_scheduling index idx1 between (0) and (12345) regions 3")
+ re = tk.MustQuery("show table t4_scheduling regions")
+ rows = re.Rows()
+ require.Len(t, rows, 4)
+ require.Len(t, rows[0], 13)
+ for i := range rows {
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[i][11])
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[i][12])
+ }
+
+ // Test scheduling info for partitioned table after split index to regions
+ tk.MustExec("drop table if exists t5_scheduling")
+ tk.MustExec("create table t5_scheduling (id INT, val INT, index idx1(val)) placement policy p1 partition by range (id) (" +
+ "partition p0 values less than (100) placement policy p2," +
+ "partition p1 values less than (1000)," +
+ "partition p2 values less than (10000)" +
+ ")")
+ tk.MustQuery("split table t5_scheduling index idx1 between (0) and (12345) regions 3")
+ re = tk.MustQuery("show table t5_scheduling regions")
+ rows = re.Rows()
+ require.Len(t, rows, 12)
+ require.Len(t, rows[0], 13)
+ for i := range rows {
+ if i < 4 {
+ require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=3 FOLLOWER_CONSTRAINTS=\"[+region=us-east-2]\"", rows[i][11])
+ } else {
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[i][11])
+ }
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[i][12])
+ }
+ re = tk.MustQuery("show table t5_scheduling index idx1 regions")
+ rows = re.Rows()
+ require.Len(t, rows, 9)
+ require.Len(t, rows[0], 13)
+ for i := range rows {
+ if i < 3 {
+ require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=3 FOLLOWER_CONSTRAINTS=\"[+region=us-east-2]\"", rows[i][11])
+ } else {
+ require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[i][11])
+ }
+ require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[i][12])
+ }
+
}
diff --git a/executor/update.go b/executor/update.go
index faf5b1d15e1bf..196f737aa057f 100644
--- a/executor/update.go
+++ b/executor/update.go
@@ -42,11 +42,11 @@ type UpdateExec struct {
// updatedRowKeys is a map for unique (TableAlias, handle) pair.
// The value is true if the row is changed, or false otherwise
- updatedRowKeys map[int]*kv.HandleMap
+ updatedRowKeys map[int]*kv.MemAwareHandleMap[bool]
tblID2table map[int64]table.Table
// mergedRowData is a map for unique (Table, handle) pair.
// The value is cached table row
- mergedRowData map[int64]*kv.HandleMap
+ mergedRowData map[int64]*kv.MemAwareHandleMap[[]types.Datum]
multiUpdateOnSameTable map[int64]bool
matched uint64 // a counter of matched rows during update
@@ -71,7 +71,7 @@ type UpdateExec struct {
// prepare `handles`, `tableUpdatable`, `changed` to avoid re-computations.
func (e *UpdateExec) prepare(row []types.Datum) (err error) {
if e.updatedRowKeys == nil {
- e.updatedRowKeys = make(map[int]*kv.HandleMap)
+ e.updatedRowKeys = make(map[int]*kv.MemAwareHandleMap[bool])
}
e.handles = e.handles[:0]
e.tableUpdatable = e.tableUpdatable[:0]
@@ -79,7 +79,7 @@ func (e *UpdateExec) prepare(row []types.Datum) (err error) {
e.matches = e.matches[:0]
for _, content := range e.tblColPosInfos {
if e.updatedRowKeys[content.Start] == nil {
- e.updatedRowKeys[content.Start] = kv.NewHandleMap()
+ e.updatedRowKeys[content.Start] = kv.NewMemAwareHandleMap[bool]()
}
handle, err := content.HandleCols.BuildHandleByDatums(row)
if err != nil {
@@ -102,7 +102,7 @@ func (e *UpdateExec) prepare(row []types.Datum) (err error) {
changed, ok := e.updatedRowKeys[content.Start].Get(handle)
if ok {
- e.changed = append(e.changed, changed.(bool))
+ e.changed = append(e.changed, changed)
e.matches = append(e.matches, false)
} else {
e.changed = append(e.changed, false)
@@ -114,7 +114,7 @@ func (e *UpdateExec) prepare(row []types.Datum) (err error) {
func (e *UpdateExec) merge(row, newData []types.Datum, mergeGenerated bool) error {
if e.mergedRowData == nil {
- e.mergedRowData = make(map[int64]*kv.HandleMap)
+ e.mergedRowData = make(map[int64]*kv.MemAwareHandleMap[[]types.Datum])
}
var mergedData []types.Datum
// merge updates from and into mergedRowData
@@ -135,13 +135,13 @@ func (e *UpdateExec) merge(row, newData []types.Datum, mergeGenerated bool) erro
flags := e.assignFlag[content.Start:content.End]
if e.mergedRowData[content.TblID] == nil {
- e.mergedRowData[content.TblID] = kv.NewHandleMap()
+ e.mergedRowData[content.TblID] = kv.NewMemAwareHandleMap[[]types.Datum]()
}
tbl := e.tblID2table[content.TblID]
oldData := row[content.Start:content.End]
newTableData := newData[content.Start:content.End]
if v, ok := e.mergedRowData[content.TblID].Get(handle); ok {
- mergedData = v.([]types.Datum)
+ mergedData = v
for i, flag := range flags {
if tbl.WritableCols()[i].IsGenerated() != mergeGenerated {
continue
@@ -156,7 +156,10 @@ func (e *UpdateExec) merge(row, newData []types.Datum, mergeGenerated bool) erro
} else {
mergedData = append([]types.Datum{}, newTableData...)
}
- e.mergedRowData[content.TblID].Set(handle, mergedData)
+
+ memDelta := e.mergedRowData[content.TblID].Set(handle, mergedData)
+ memDelta += types.EstimatedMemUsage(mergedData, 1) + int64(handle.ExtraMemSize())
+ e.memTracker.Consume(memDelta)
}
return nil
}
@@ -190,7 +193,12 @@ func (e *UpdateExec) exec(ctx context.Context, schema *expression.Schema, row, n
// Update row
changed, err1 := updateRecord(ctx, e.ctx, handle, oldData, newTableData, flags, tbl, false, e.memTracker)
if err1 == nil {
- e.updatedRowKeys[content.Start].Set(handle, changed)
+ _, exist := e.updatedRowKeys[content.Start].Get(handle)
+ memDelta := e.updatedRowKeys[content.Start].Set(handle, changed)
+ if !exist {
+ memDelta += int64(handle.ExtraMemSize())
+ }
+ e.memTracker.Consume(memDelta)
continue
}
@@ -426,6 +434,7 @@ func (e *UpdateExec) Close() error {
txn.GetSnapshot().SetOption(kv.CollectRuntimeStats, nil)
}
}
+ defer e.memTracker.ReplaceBytesUsed(0)
return e.children[0].Close()
}
diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go
index c281e2de80302..cc7b9d3683b71 100644
--- a/expression/builtin_cast.go
+++ b/expression/builtin_cast.go
@@ -917,7 +917,7 @@ func (b *builtinCastRealAsDurationSig) evalDuration(row chunk.Row) (res types.Du
if isNull || err != nil {
return res, isNull, err
}
- res, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(val, 'f', -1, 64), b.tp.GetDecimal())
+ res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(val, 'f', -1, 64), b.tp.GetDecimal())
if err != nil {
if types.ErrTruncatedWrongVal.Equal(err) {
err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err)
@@ -1095,7 +1095,7 @@ func (b *builtinCastDecimalAsDurationSig) evalDuration(row chunk.Row) (res types
if isNull || err != nil {
return res, true, err
}
- res, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(val.ToString()), b.tp.GetDecimal())
+ res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(val.ToString()), b.tp.GetDecimal())
if types.ErrTruncatedWrongVal.Equal(err) {
err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err)
// ErrTruncatedWrongVal needs to be considered NULL.
@@ -1318,16 +1318,12 @@ func (b *builtinCastStringAsDurationSig) evalDuration(row chunk.Row) (res types.
if isNull || err != nil {
return res, isNull, err
}
- res, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, val, b.tp.GetDecimal())
+ res, isNull, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, val, b.tp.GetDecimal())
if types.ErrTruncatedWrongVal.Equal(err) {
sc := b.ctx.GetSessionVars().StmtCtx
err = sc.HandleTruncate(err)
- // ZeroDuration of error ErrTruncatedWrongVal needs to be considered NULL.
- if res == types.ZeroDuration {
- return res, true, err
- }
}
- return res, false, err
+ return res, isNull, err
}
type builtinCastTimeAsTimeSig struct {
@@ -1765,7 +1761,7 @@ func (b *builtinCastJSONAsDurationSig) evalDuration(row chunk.Row) (res types.Du
if err != nil {
return res, false, err
}
- res, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, s, b.tp.GetDecimal())
+ res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, s, b.tp.GetDecimal())
if types.ErrTruncatedWrongVal.Equal(err) {
sc := b.ctx.GetSessionVars().StmtCtx
err = sc.HandleTruncate(err)
diff --git a/expression/builtin_cast_vec.go b/expression/builtin_cast_vec.go
index ee29a768dd702..141ff49c26f13 100644
--- a/expression/builtin_cast_vec.go
+++ b/expression/builtin_cast_vec.go
@@ -939,7 +939,7 @@ func (b *builtinCastStringAsDurationSig) vecEvalDuration(input *chunk.Chunk, res
if result.IsNull(i) {
continue
}
- dur, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, buf.GetString(i), b.tp.GetDecimal())
+ dur, isNull, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, buf.GetString(i), b.tp.GetDecimal())
if err != nil {
if types.ErrTruncatedWrongVal.Equal(err) {
err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err)
@@ -947,7 +947,7 @@ func (b *builtinCastStringAsDurationSig) vecEvalDuration(input *chunk.Chunk, res
if err != nil {
return err
}
- if dur == types.ZeroDuration {
+ if isNull {
result.SetNull(i, true)
continue
}
@@ -1213,7 +1213,7 @@ func (b *builtinCastRealAsDurationSig) vecEvalDuration(input *chunk.Chunk, resul
if result.IsNull(i) {
continue
}
- dur, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(f64s[i], 'f', -1, 64), b.tp.GetDecimal())
+ dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(f64s[i], 'f', -1, 64), b.tp.GetDecimal())
if err != nil {
if types.ErrTruncatedWrongVal.Equal(err) {
err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err)
@@ -1793,7 +1793,7 @@ func (b *builtinCastDecimalAsDurationSig) vecEvalDuration(input *chunk.Chunk, re
if result.IsNull(i) {
continue
}
- dur, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(args[i].ToString()), b.tp.GetDecimal())
+ dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(args[i].ToString()), b.tp.GetDecimal())
if err != nil {
if types.ErrTruncatedWrongVal.Equal(err) {
err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err)
@@ -1880,7 +1880,7 @@ func (b *builtinCastJSONAsDurationSig) vecEvalDuration(input *chunk.Chunk, resul
if err != nil {
return nil
}
- dur, err = types.ParseDuration(ctx, s, b.tp.GetDecimal())
+ dur, _, err = types.ParseDuration(ctx, s, b.tp.GetDecimal())
if types.ErrTruncatedWrongVal.Equal(err) {
err = ctx.HandleTruncate(err)
}
diff --git a/expression/builtin_string.go b/expression/builtin_string.go
index e46490e1cf31d..55ab3d15c7486 100644
--- a/expression/builtin_string.go
+++ b/expression/builtin_string.go
@@ -324,6 +324,7 @@ func (b *builtinConcatSig) Clone() builtinFunc {
// evalString evals a builtinConcatSig
// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_concat
func (b *builtinConcatSig) evalString(row chunk.Row) (d string, isNull bool, err error) {
+ //nolint: prealloc
var s []byte
for _, a := range b.getArgs() {
d, isNull, err = a.EvalString(b.ctx, row)
diff --git a/expression/builtin_time.go b/expression/builtin_time.go
index 0150af13191a9..d5d17bd4a6ecb 100644
--- a/expression/builtin_time.go
+++ b/expression/builtin_time.go
@@ -2158,7 +2158,7 @@ func (b *builtinCurrentTime0ArgSig) evalDuration(row chunk.Row) (types.Duration,
return types.Duration{}, true, err
}
dur := nowTs.In(tz).Format(types.TimeFormat)
- res, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp)
+ res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp)
if err != nil {
return types.Duration{}, true, err
}
@@ -2186,7 +2186,7 @@ func (b *builtinCurrentTime1ArgSig) evalDuration(row chunk.Row) (types.Duration,
return types.Duration{}, true, err
}
dur := nowTs.In(tz).Format(types.TimeFSPFormat)
- res, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, int(fsp))
+ res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, int(fsp))
if err != nil {
return types.Duration{}, true, err
}
@@ -2246,7 +2246,7 @@ func (b *builtinTimeSig) evalDuration(row chunk.Row) (res types.Duration, isNull
fsp = tmpFsp
sc := b.ctx.GetSessionVars().StmtCtx
- res, err = types.ParseDuration(sc, expr, fsp)
+ res, _, err = types.ParseDuration(sc, expr, fsp)
if types.ErrTruncatedWrongVal.Equal(err) {
err = sc.HandleTruncate(err)
}
@@ -2273,7 +2273,7 @@ func (c *timeLiteralFunctionClass) getFunction(ctx sessionctx.Context, args []Ex
if !isDuration(str) {
return nil, types.ErrWrongValue.GenWithStackByArgs(types.TimeStr, str)
}
- duration, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, str, types.GetFsp(str))
+ duration, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, str, types.GetFsp(str))
if err != nil {
return nil, err
}
@@ -2639,7 +2639,7 @@ func (b *builtinExtractDatetimeFromStringSig) evalInt(row chunk.Row) (int64, boo
sc := b.ctx.GetSessionVars().StmtCtx
switch strings.ToUpper(unit) {
case "DAY_MICROSECOND", "DAY_SECOND", "DAY_MINUTE", "DAY_HOUR":
- dur, err := types.ParseDuration(sc, dtStr, types.GetFsp(dtStr))
+ dur, _, err := types.ParseDuration(sc, dtStr, types.GetFsp(dtStr))
if err != nil {
return 0, true, err
}
@@ -4129,9 +4129,13 @@ func (c *unixTimestampFunctionClass) getFunction(ctx sessionctx.Context, args []
// goTimeToMysqlUnixTimestamp converts go time into MySQL's Unix timestamp.
// MySQL's Unix timestamp ranges in int32. Values out of range should be rewritten to 0.
+// https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_unix-timestamp
func goTimeToMysqlUnixTimestamp(t time.Time, decimal int) (*types.MyDecimal, error) {
nanoSeconds := t.UnixNano()
- if nanoSeconds < 0 || (nanoSeconds/1e3) >= (math.MaxInt32+1)*1e6 {
+ // Prior to MySQL 8.0.28, the valid range of argument values is the same as for the TIMESTAMP data type:
+ // '1970-01-01 00:00:01.000000' UTC to '2038-01-19 03:14:07.999999' UTC.
+ // This is also the case in MySQL 8.0.28 and later for 32-bit platforms.
+ if nanoSeconds < 1e9 || (nanoSeconds/1e3) >= (math.MaxInt32+1)*1e6 {
return new(types.MyDecimal), nil
}
dec := new(types.MyDecimal)
@@ -4369,7 +4373,7 @@ func (b *builtinTimestamp2ArgsSig) evalTime(row chunk.Row) (types.Time, bool, er
if !isDuration(arg1) {
return types.ZeroTime, true, nil
}
- duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err)
}
@@ -4531,7 +4535,7 @@ func strDatetimeAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.D
// strDurationAddDuration adds duration to duration string, returns a string value.
func strDurationAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (string, error) {
- arg0, err := types.ParseDuration(sc, d, types.MaxFsp)
+ arg0, _, err := types.ParseDuration(sc, d, types.MaxFsp)
if err != nil {
return "", err
}
@@ -4568,7 +4572,7 @@ func strDatetimeSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.D
// strDurationSubDuration subtracts duration from duration string, returns a string value.
func strDurationSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (string, error) {
- arg0, err := types.ParseDuration(sc, d, types.MaxFsp)
+ arg0, _, err := types.ParseDuration(sc, d, types.MaxFsp)
if err != nil {
return "", err
}
@@ -4717,7 +4721,7 @@ func (b *builtinAddDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo
return types.ZeroDatetime, true, nil
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err := types.ParseDuration(sc, s, types.GetFsp(s))
+ arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -4798,7 +4802,7 @@ func (b *builtinAddDurationAndStringSig) evalDuration(row chunk.Row) (types.Dura
return types.ZeroDuration, true, nil
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err := types.ParseDuration(sc, s, types.GetFsp(s))
+ arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -4900,7 +4904,7 @@ func (b *builtinAddStringAndStringSig) evalString(row chunk.Row) (result string,
return "", isNull, err
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err = types.ParseDuration(sc, arg1Str, getFsp4TimeAddSub(arg1Str))
+ arg1, _, err = types.ParseDuration(sc, arg1Str, getFsp4TimeAddSub(arg1Str))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -4983,7 +4987,7 @@ func (b *builtinAddDateAndStringSig) evalString(row chunk.Row) (string, bool, er
return "", true, nil
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s))
+ arg1, _, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -5249,7 +5253,8 @@ func (b *builtinMakeTimeSig) makeTime(hour int64, minute int64, second float64,
second = 59
}
fsp := b.tp.GetDecimal()
- return types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%02d:%02d:%v", hour, minute, second), fsp)
+ d, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%02d:%02d:%v", hour, minute, second), fsp)
+ return d, err
}
// evalDuration evals a builtinMakeTimeIntSig.
@@ -5543,7 +5548,7 @@ func (b *builtinSecToTimeSig) evalDuration(row chunk.Row) (types.Duration, bool,
secondDemical = float64(second) + demical
var dur types.Duration
- dur, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal())
+ dur, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal())
if err != nil {
return types.Duration{}, err != nil, err
}
@@ -5672,7 +5677,7 @@ func (b *builtinSubDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo
return types.ZeroDatetime, true, nil
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err := types.ParseDuration(sc, s, types.GetFsp(s))
+ arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -5771,7 +5776,7 @@ func (b *builtinSubStringAndStringSig) evalString(row chunk.Row) (result string,
return "", isNull, err
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err = types.ParseDuration(sc, s, getFsp4TimeAddSub(s))
+ arg1, _, err = types.ParseDuration(sc, s, getFsp4TimeAddSub(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -5863,7 +5868,7 @@ func (b *builtinSubDurationAndStringSig) evalDuration(row chunk.Row) (types.Dura
return types.ZeroDuration, true, nil
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err := types.ParseDuration(sc, s, types.GetFsp(s))
+ arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -5941,7 +5946,7 @@ func (b *builtinSubDateAndStringSig) evalString(row chunk.Row) (string, bool, er
return "", true, nil
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s))
+ arg1, _, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -6306,7 +6311,7 @@ func (b *builtinUTCTimeWithoutArgSig) evalDuration(row chunk.Row) (types.Duratio
if err != nil {
return types.Duration{}, true, err
}
- v, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), 0)
+ v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), 0)
return v, false, err
}
@@ -6337,7 +6342,7 @@ func (b *builtinUTCTimeWithArgSig) evalDuration(row chunk.Row) (types.Duration,
if err != nil {
return types.Duration{}, true, err
}
- v, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFSPFormat), int(fsp))
+ v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFSPFormat), int(fsp))
return v, false, err
}
diff --git a/expression/builtin_time_test.go b/expression/builtin_time_test.go
index c081b11f0f0f8..fb8387eb022fb 100644
--- a/expression/builtin_time_test.go
+++ b/expression/builtin_time_test.go
@@ -962,7 +962,7 @@ func TestAddTimeSig(t *testing.T) {
{"-110:00:00", "1 02:00:00", "-84:00:00"},
}
for _, c := range tbl {
- dur, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input))
+ dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input))
require.NoError(t, err)
tmpInput := types.NewDurationDatum(dur)
tmpInputDuration := types.NewStringDatum(c.InputDuration)
@@ -1063,7 +1063,7 @@ func TestSubTimeSig(t *testing.T) {
{"235959", "00:00:01", "23:59:58"},
}
for _, c := range tbl {
- dur, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input))
+ dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input))
require.NoError(t, err)
tmpInput := types.NewDurationDatum(dur)
tmpInputDuration := types.NewStringDatum(c.InputDuration)
diff --git a/expression/builtin_time_vec.go b/expression/builtin_time_vec.go
index b292164813dcc..0d6b4321095f5 100644
--- a/expression/builtin_time_vec.go
+++ b/expression/builtin_time_vec.go
@@ -421,7 +421,7 @@ func (b *builtinUTCTimeWithArgSig) vecEvalDuration(input *chunk.Chunk, result *c
if fsp < int64(types.MinFsp) {
return errors.Errorf("Invalid negative %d specified, must in [0, 6]", fsp)
}
- res, err := types.ParseDuration(stmtCtx, utc, int(fsp))
+ res, _, err := types.ParseDuration(stmtCtx, utc, int(fsp))
if err != nil {
return err
}
@@ -1954,7 +1954,7 @@ func (b *builtinSecToTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk.
second = seconds % 60
}
secondDemical := float64(second) + demical
- duration, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal())
+ duration, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal())
if err != nil {
return err
}
@@ -1975,7 +1975,7 @@ func (b *builtinUTCTimeWithoutArgSig) vecEvalDuration(input *chunk.Chunk, result
if err != nil {
return err
}
- res, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), types.DefaultFsp)
+ res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), types.DefaultFsp)
if err != nil {
return err
}
@@ -2378,7 +2378,7 @@ func (b *builtinCurrentTime0ArgSig) vecEvalDuration(input *chunk.Chunk, result *
}
tz := b.ctx.GetSessionVars().Location()
dur := nowTs.In(tz).Format(types.TimeFormat)
- res, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp)
+ res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp)
if err != nil {
return err
}
@@ -2426,7 +2426,7 @@ func (b *builtinTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Colum
}
fsp = tmpFsp
- res, err := types.ParseDuration(sc, expr, fsp)
+ res, _, err := types.ParseDuration(sc, expr, fsp)
if types.ErrTruncatedWrongVal.Equal(err) {
err = sc.HandleTruncate(err)
}
@@ -2572,7 +2572,7 @@ func (b *builtinCurrentTime1ArgSig) vecEvalDuration(input *chunk.Chunk, result *
result.ResizeGoDuration(n, false)
durations := result.GoDurations()
for i := 0; i < n; i++ {
- res, err := types.ParseDuration(stmtCtx, dur, int(i64s[i]))
+ res, _, err := types.ParseDuration(stmtCtx, dur, int(i64s[i]))
if err != nil {
return err
}
@@ -2751,7 +2751,7 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk
continue
}
- duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
if err = handleInvalidTimeError(b.ctx, err); err != nil {
return err
diff --git a/expression/builtin_time_vec_generated.go b/expression/builtin_time_vec_generated.go
index 7c18af6e81ddf..4e897affc66d9 100644
--- a/expression/builtin_time_vec_generated.go
+++ b/expression/builtin_time_vec_generated.go
@@ -122,7 +122,7 @@ func (b *builtinAddDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -248,7 +248,7 @@ func (b *builtinAddDurationAndStringSig) vecEvalDuration(input *chunk.Chunk, res
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -406,7 +406,7 @@ func (b *builtinAddStringAndStringSig) vecEvalString(input *chunk.Chunk, result
// calculate
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -566,7 +566,7 @@ func (b *builtinAddDateAndStringSig) vecEvalString(input *chunk.Chunk, result *c
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -737,7 +737,7 @@ func (b *builtinSubDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -862,7 +862,7 @@ func (b *builtinSubDurationAndStringSig) vecEvalDuration(input *chunk.Chunk, res
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -1020,7 +1020,7 @@ func (b *builtinSubStringAndStringSig) vecEvalString(input *chunk.Chunk, result
// calculate
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -1180,7 +1180,7 @@ func (b *builtinSubDateAndStringSig) vecEvalString(input *chunk.Chunk, result *c
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
diff --git a/expression/generator/time_vec.go b/expression/generator/time_vec.go
index 3bf16f0076594..73067419f8cfc 100644
--- a/expression/generator/time_vec.go
+++ b/expression/generator/time_vec.go
@@ -63,7 +63,7 @@ import (
continue
}{{ end }}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
@@ -191,7 +191,7 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result
continue
}
sc := b.ctx.GetSessionVars().StmtCtx
- arg1Duration, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
+ arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1))
if err != nil {
if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) {
sc.AppendWarning(err)
diff --git a/expression/integration_serial_test.go b/expression/integration_serial_test.go
index 0b7b9a58f9054..fc4ecda462208 100644
--- a/expression/integration_serial_test.go
+++ b/expression/integration_serial_test.go
@@ -2387,6 +2387,12 @@ func TestTimeBuiltin(t *testing.T) {
// for unix_timestamp
tk.MustExec("SET time_zone = '+00:00';")
+ tk.MustQuery("SELECT UNIX_TIMESTAMP('1970-01-01 00:00:00.000001');").Check(testkit.Rows("0.000000"))
+ tk.MustQuery("SELECT UNIX_TIMESTAMP('1970-01-01 00:00:00.999999');").Check(testkit.Rows("0.000000"))
+ tk.MustQuery("SELECT UNIX_TIMESTAMP('1970-01-01 00:00:01.000000');").Check(testkit.Rows("1.000000"))
+ tk.MustQuery("SELECT UNIX_TIMESTAMP('2038-01-19 03:14:07.999999');").Check(testkit.Rows("2147483647.999999"))
+ tk.MustQuery("SELECT UNIX_TIMESTAMP('2038-01-19 03:14:08.000000');").Check(testkit.Rows("0.000000"))
+
result = tk.MustQuery("SELECT UNIX_TIMESTAMP(151113);")
result.Check(testkit.Rows("1447372800"))
result = tk.MustQuery("SELECT UNIX_TIMESTAMP(20151113);")
diff --git a/expression/integration_test.go b/expression/integration_test.go
index 8c10fc96b5d30..b3dc43fe084d1 100644
--- a/expression/integration_test.go
+++ b/expression/integration_test.go
@@ -6233,11 +6233,11 @@ func TestGlobalCacheCorrectness(t *testing.T) {
defer clean()
tk := testkit.NewTestKit(t, store)
- tk.MustQuery("SHOW VARIABLES LIKE 'max_connections'").Check(testkit.Rows("max_connections 151"))
+ tk.MustQuery("SHOW VARIABLES LIKE 'max_connections'").Check(testkit.Rows("max_connections 0"))
tk.MustExec("SET GLOBAL max_connections=1234")
tk.MustQuery("SHOW VARIABLES LIKE 'max_connections'").Check(testkit.Rows("max_connections 1234"))
// restore
- tk.MustExec("SET GLOBAL max_connections=151")
+ tk.MustExec("SET GLOBAL max_connections=0")
}
func TestRedundantColumnResolve(t *testing.T) {
diff --git a/go.mod b/go.mod
index bfd038156bf93..8af187bf5e607 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@ require (
cloud.google.com/go/storage v1.21.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.12.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0
- github.com/BurntSushi/toml v0.3.1
+ github.com/BurntSushi/toml v0.4.1
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/Jeffail/gabs/v2 v2.5.1
github.com/Shopify/sarama v1.29.0
@@ -87,7 +87,7 @@ require (
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/text v0.3.7
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65
- golang.org/x/tools v0.1.8
+ golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f
google.golang.org/api v0.69.0
google.golang.org/grpc v1.44.0
gopkg.in/yaml.v2 v2.4.0
@@ -95,7 +95,11 @@ require (
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67
)
-require github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581
+require (
+ github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581
+ github.com/charithe/durationcheck v0.0.9
+ honnef.co/go/tools v0.0.1-2020.1.4
+)
require (
cloud.google.com/go v0.100.2 // indirect
@@ -128,6 +132,7 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/golang/glog v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21
github.com/google/go-cmp v0.5.7 // indirect
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/gorilla/handlers v1.5.1 // indirect
@@ -193,6 +198,8 @@ require (
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
+ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8 // indirect
@@ -206,3 +213,5 @@ replace github.com/pingcap/tidb/parser => ./parser
// fix potential security issue(CVE-2020-26160) introduced by indirect dependency.
replace github.com/dgrijalva/jwt-go => github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible
+
+replace honnef.co/go/tools => honnef.co/go/tools v0.3.2
diff --git a/go.sum b/go.sum
index 3a6e62f536235..77de5a0c0284c 100644
--- a/go.sum
+++ b/go.sum
@@ -64,8 +64,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1 h1:BUYIbDf/mMZ8945v3QkG3Ou
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0 h1:62Ew5xXg5UCGIXDOM7+y4IL5/6mQJq1nenhBCJAeGX8=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0/go.mod h1:eHWhQKXc1Gv1DvWH//UzgWjWFEo0Pp4pH2vBzjBw8Fc=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
+github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
@@ -130,6 +131,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=
+github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
github.com/cheggaaa/pb/v3 v3.0.8 h1:bC8oemdChbke2FHIIGy9mn4DPJ2caZYQnfbRqwmdCoA=
github.com/cheggaaa/pb/v3 v3.0.8/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA=
github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54=
@@ -340,6 +343,8 @@ github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=
+github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -384,7 +389,6 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20211122183932-1daafda22083 h1:c8EUapQFi+kjzedr4c6WqbwMdmB95+oDBWZ5XFHFYxY=
github.com/google/pprof v0.0.0-20211122183932-1daafda22083/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@@ -700,7 +704,6 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
@@ -809,6 +812,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@@ -905,6 +909,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -924,6 +929,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
+golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
+golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -942,7 +949,6 @@ golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhp
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
@@ -951,6 +957,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1004,6 +1012,7 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1114,6 +1123,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1157,7 +1167,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1171,7 +1180,6 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1207,8 +1215,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
-golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f h1:OKYpQQVE3DKSc3r3zHVzq46vq5YH7x8xpR3/k9ixmUg=
+golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1416,13 +1424,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=
+honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=
modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk=
diff --git a/infoschema/cluster.go b/infoschema/cluster.go
index 8ba852de0989b..d1d4f3c5a7fac 100644
--- a/infoschema/cluster.go
+++ b/infoschema/cluster.go
@@ -44,6 +44,8 @@ const (
ClusterTableTiDBTrx = "CLUSTER_TIDB_TRX"
// ClusterTableDeadlocks is the string constant of cluster dead lock table.
ClusterTableDeadlocks = "CLUSTER_DEADLOCKS"
+ // ClusterTableDeadlocks is the string constant of cluster transaction summary table.
+ ClusterTableTrxSummary = "CLUSTER_TRX_SUMMARY"
)
// memTableToClusterTables means add memory table to cluster table.
@@ -55,6 +57,7 @@ var memTableToClusterTables = map[string]string{
TableStatementsSummaryEvicted: ClusterTableStatementsSummaryEvicted,
TableTiDBTrx: ClusterTableTiDBTrx,
TableDeadlocks: ClusterTableDeadlocks,
+ TableTrxSummary: ClusterTableTrxSummary,
}
func init() {
diff --git a/infoschema/infoschema_test.go b/infoschema/infoschema_test.go
index 710914ad41159..e414f97d02906 100644
--- a/infoschema/infoschema_test.go
+++ b/infoschema/infoschema_test.go
@@ -294,6 +294,7 @@ func TestInfoTables(t *testing.T) {
"TIDB_TRX",
"DEADLOCKS",
"PLACEMENT_POLICIES",
+ "TRX_SUMMARY",
}
for _, tbl := range infoTables {
tb, err1 := is.TableByName(util.InformationSchemaName, model.NewCIStr(tbl))
diff --git a/infoschema/perfschema/tables.go b/infoschema/perfschema/tables.go
index 1beea67e64a1b..c0006f9f7413a 100644
--- a/infoschema/perfschema/tables.go
+++ b/infoschema/perfschema/tables.go
@@ -382,7 +382,7 @@ func dataForRemoteProfile(ctx sessionctx.Context, nodeType, uri string, isGorout
close(ch)
// Keep the original order to make the result more stable
- var results []result // nolint: prealloc
+ var results []result //nolint: prealloc
for result := range ch {
if result.err != nil {
ctx.GetSessionVars().StmtCtx.AppendWarning(result.err)
diff --git a/infoschema/tables.go b/infoschema/tables.go
index d1deae5c6739a..622eb1ef9452c 100644
--- a/infoschema/tables.go
+++ b/infoschema/tables.go
@@ -180,6 +180,8 @@ const (
TableAttributes = "ATTRIBUTES"
// TablePlacementPolicies is the string constant of placement policies table.
TablePlacementPolicies = "PLACEMENT_POLICIES"
+ // TableTrxSummary is the string constant of transaction summary table.
+ TableTrxSummary = "TRX_SUMMARY"
)
const (
@@ -278,6 +280,8 @@ var tableIDMap = map[string]int64{
TableAttributes: autoid.InformationSchemaDBID + 77,
TableTiDBHotRegionsHistory: autoid.InformationSchemaDBID + 78,
TablePlacementPolicies: autoid.InformationSchemaDBID + 79,
+ TableTrxSummary: autoid.InformationSchemaDBID + 80,
+ ClusterTableTrxSummary: autoid.InformationSchemaDBID + 81,
}
// columnInfo represents the basic column information of all kinds of INFORMATION_SCHEMA tables
@@ -1463,6 +1467,11 @@ var tableAttributesCols = []columnInfo{
{name: "RANGES", tp: mysql.TypeBlob, size: types.UnspecifiedLength},
}
+var tableTrxSummaryCols = []columnInfo{
+ {name: "DIGEST", tp: mysql.TypeVarchar, size: 16, flag: mysql.NotNullFlag, comment: "Digest of a transaction"},
+ {name: txninfo.AllSQLDigestsStr, tp: mysql.TypeBlob, size: types.UnspecifiedLength, comment: "A list of the digests of SQL statements that the transaction has executed"},
+}
+
var tablePlacementPoliciesCols = []columnInfo{
{name: "POLICY_ID", tp: mysql.TypeLonglong, size: 64, flag: mysql.NotNullFlag},
{name: "CATALOG_NAME", tp: mysql.TypeVarchar, size: 512, flag: mysql.NotNullFlag},
@@ -1580,6 +1589,7 @@ func GetClusterServerInfo(ctx sessionctx.Context) ([]ServerInfo, error) {
})
type retriever func(ctx sessionctx.Context) ([]ServerInfo, error)
+ //nolint: prealloc
var servers []ServerInfo
for _, r := range []retriever{GetTiDBServerInfo, GetPDServerInfo, GetStoreServerInfo} {
nodes, err := r(ctx)
@@ -1879,6 +1889,7 @@ var tableNameToColumns = map[string][]columnInfo{
TableDataLockWaits: tableDataLockWaitsCols,
TableAttributes: tableAttributesCols,
TablePlacementPolicies: tablePlacementPoliciesCols,
+ TableTrxSummary: tableTrxSummaryCols,
}
func createInfoSchemaTable(_ autoid.Allocators, meta *model.TableInfo) (table.Table, error) {
diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go
index 962a8db8e0d13..cf9075ee21f29 100644
--- a/infoschema/tables_test.go
+++ b/infoschema/tables_test.go
@@ -1479,6 +1479,37 @@ func TestTiDBTrx(t *testing.T) {
"[null,null,\"update `test_tidb_trx` set `i` = `i` + ?\"]"))
}
+func TestTiDBTrxSummary(t *testing.T) {
+ store, clean := testkit.CreateMockStore(t)
+ defer clean()
+
+ tk := newTestKitWithRoot(t, store)
+ tk.MustExec("drop table if exists test_tidb_trx")
+ tk.MustExec("create table test_tidb_trx(i int)")
+ _, beginDigest := parser.NormalizeDigest("begin")
+ _, digest := parser.NormalizeDigest("update test_tidb_trx set i = i + 1")
+ _, commitDigest := parser.NormalizeDigest("commit")
+ txninfo.Recorder.Clean()
+ txninfo.Recorder.SetMinDuration(500 * time.Millisecond)
+ defer txninfo.Recorder.SetMinDuration(2147483647)
+ txninfo.Recorder.ResizeSummaries(128)
+ defer txninfo.Recorder.ResizeSummaries(0)
+ tk.MustExec("begin")
+ tk.MustExec("update test_tidb_trx set i = i + 1")
+ time.Sleep(1 * time.Second)
+ tk.MustExec("update test_tidb_trx set i = i + 1")
+ tk.MustExec("commit")
+ // it is possible for TRX_SUMMARY to have other rows (due to parallel execution of tests)
+ for _, row := range tk.MustQuery("select * from information_schema.TRX_SUMMARY;").Rows() {
+ // so we just look for the row we are looking for
+ if row[0] == "1bb679108d0012a8" {
+ require.Equal(t, strings.TrimSpace(row[1].(string)), "[\""+beginDigest.String()+"\",\""+digest.String()+"\",\""+digest.String()+"\",\""+commitDigest.String()+"\"]")
+ return
+ }
+ }
+ t.Fatal("cannot find the expected row")
+}
+
func TestInfoSchemaDeadlockPrivilege(t *testing.T) {
store, clean := testkit.CreateMockStore(t)
defer clean()
diff --git a/kv/BUILD.bazel b/kv/BUILD.bazel
index 600cec889528f..fed476f803dfc 100644
--- a/kv/BUILD.bazel
+++ b/kv/BUILD.bazel
@@ -32,6 +32,7 @@ go_library(
"//util/dbterror",
"//util/logutil",
"//util/memory",
+ "//util/set",
"//util/trxevents",
"@com_github_coocood_freecache//:freecache",
"@com_github_pingcap_errors//:errors",
diff --git a/kv/key.go b/kv/key.go
index 3e68b5fc80dd7..561cc2a03fd78 100644
--- a/kv/key.go
+++ b/kv/key.go
@@ -23,6 +23,7 @@ import (
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
+ "github.com/pingcap/tidb/util/set"
)
// Key represents high-level Key type.
@@ -158,9 +159,15 @@ type Handle interface {
// String implements the fmt.Stringer interface.
String() string
// MemUsage returns the memory usage of a handle.
- MemUsage() int64
+ MemUsage() uint64
+ // ExtraMemSize returns the memory usage of objects that are pointed to by the Handle.
+ ExtraMemSize() uint64
}
+var _ Handle = IntHandle(0)
+var _ Handle = &CommonHandle{}
+var _ Handle = PartitionHandle{}
+
// IntHandle implement the Handle interface for int64 type handle.
type IntHandle int64
@@ -231,10 +238,15 @@ func (ih IntHandle) String() string {
}
// MemUsage implements the Handle interface.
-func (ih IntHandle) MemUsage() int64 {
+func (ih IntHandle) MemUsage() uint64 {
return 8
}
+// ExtraMemSize implements the Handle interface.
+func (ih IntHandle) ExtraMemSize() uint64 {
+ return 0
+}
+
// CommonHandle implements the Handle interface for non-int64 type handle.
type CommonHandle struct {
encoded []byte
@@ -355,8 +367,15 @@ func (ch *CommonHandle) String() string {
}
// MemUsage implements the Handle interface.
-func (ch *CommonHandle) MemUsage() int64 {
- return int64(cap(ch.encoded)) + int64(cap(ch.colEndOffsets))*2
+func (ch *CommonHandle) MemUsage() uint64 {
+ // 48 is used by the 2 slice fields.
+ return 48 + ch.ExtraMemSize()
+}
+
+// ExtraMemSize implements the Handle interface.
+func (ch *CommonHandle) ExtraMemSize() uint64 {
+ // colEndOffsets is a slice of uint16.
+ return uint64(cap(ch.encoded) + cap(ch.colEndOffsets)*2)
}
// HandleMap is the map for Handle.
@@ -431,6 +450,65 @@ func (m *HandleMap) Range(fn func(h Handle, val interface{}) bool) {
}
}
+// MemAwareHandleMap is similar to HandleMap, but it's aware of its memory usage and doesn't support delete.
+// It only tracks the actual sizes. Objects that are pointed to by the key or value are not tracked.
+// Those should be tracked by the caller.
+type MemAwareHandleMap[V any] struct {
+ ints set.MemAwareMap[int64, V]
+ strs set.MemAwareMap[string, strHandleValue[V]]
+}
+
+type strHandleValue[V any] struct {
+ h Handle
+ val V
+}
+
+// NewMemAwareHandleMap creates a new map for handle.
+func NewMemAwareHandleMap[V any]() *MemAwareHandleMap[V] {
+ // Initialize the two maps to avoid checking nil.
+ return &MemAwareHandleMap[V]{
+ ints: set.NewMemAwareMap[int64, V](),
+ strs: set.NewMemAwareMap[string, strHandleValue[V]](),
+ }
+}
+
+// Get gets a value by a Handle.
+func (m *MemAwareHandleMap[V]) Get(h Handle) (v V, ok bool) {
+ if h.IsInt() {
+ v, ok = m.ints.Get(h.IntValue())
+ } else {
+ var strVal strHandleValue[V]
+ strVal, ok = m.strs.Get(string(h.Encoded()))
+ v = strVal.val
+ }
+ return
+}
+
+// Set sets a value with a Handle.
+func (m *MemAwareHandleMap[V]) Set(h Handle, val V) int64 {
+ if h.IsInt() {
+ return m.ints.Set(h.IntValue(), val)
+ }
+ return m.strs.Set(string(h.Encoded()), strHandleValue[V]{
+ h: h,
+ val: val,
+ })
+}
+
+// Range iterates the MemAwareHandleMap with fn, the fn returns true to continue, returns false to stop.
+func (m *MemAwareHandleMap[V]) Range(fn func(h Handle, val interface{}) bool) {
+ for h, val := range m.ints.M {
+ if !fn(IntHandle(h), val) {
+ return
+ }
+ }
+ for _, strVal := range m.strs.M {
+ if !fn(strVal.h, strVal.val) {
+ return
+ }
+ }
+}
+
// PartitionHandle combines a handle and a PartitionID, used to location a row in partitioned table.
// Now only used in global index.
// TODO: support PartitionHandle in HandleMap.
@@ -470,6 +548,11 @@ func (ph PartitionHandle) Compare(h Handle) int {
}
// MemUsage implements the Handle interface.
-func (ph PartitionHandle) MemUsage() int64 {
+func (ph PartitionHandle) MemUsage() uint64 {
return ph.Handle.MemUsage() + 8
}
+
+// ExtraMemSize implements the Handle interface.
+func (ph PartitionHandle) ExtraMemSize() uint64 {
+ return ph.Handle.ExtraMemSize()
+}
diff --git a/kv/key_test.go b/kv/key_test.go
index 3d3ee3ce5fb1a..af45999d5e5bc 100644
--- a/kv/key_test.go
+++ b/kv/key_test.go
@@ -17,6 +17,7 @@ package kv_test
import (
"bytes"
"errors"
+ "strconv"
"testing"
"time"
@@ -221,3 +222,83 @@ func BenchmarkIsPoint(b *testing.B) {
kr.IsPoint()
}
}
+
+var result int
+
+var inputs = []struct {
+ input int
+}{
+ {input: 1},
+ {input: 100},
+ {input: 10000},
+ {input: 1000000},
+}
+
+func memAwareIntMap(size int, handles []Handle) int {
+ var x int
+ m := NewMemAwareHandleMap[int]()
+ for j := 0; j < size; j++ {
+ m.Set(handles[j], j)
+ }
+ for j := 0; j < size; j++ {
+ x, _ = m.Get(handles[j])
+ }
+ return x
+}
+
+func nativeIntMap(size int, handles []Handle) int {
+ var x int
+ m := make(map[Handle]int)
+ for j := 0; j < size; j++ {
+ m[handles[j]] = j
+ }
+
+ for j := 0; j < size; j++ {
+ x = m[handles[j]]
+ }
+ return x
+}
+
+func BenchmarkMemAwareHandleMap(b *testing.B) {
+ var sc stmtctx.StatementContext
+ for _, s := range inputs {
+ handles := make([]Handle, s.input)
+ for i := 0; i < s.input; i++ {
+ if i%2 == 0 {
+ handles[i] = IntHandle(i)
+ } else {
+ handleBytes, _ := codec.EncodeKey(&sc, nil, types.NewIntDatum(int64(i)))
+ handles[i], _ = NewCommonHandle(handleBytes)
+ }
+ }
+ b.Run("MemAwareIntMap_"+strconv.Itoa(s.input), func(b *testing.B) {
+ var x int
+ for i := 0; i < b.N; i++ {
+ x = memAwareIntMap(s.input, handles)
+ }
+ result = x
+ })
+ }
+}
+
+func BenchmarkNativeHandleMap(b *testing.B) {
+ var sc stmtctx.StatementContext
+ for _, s := range inputs {
+ handles := make([]Handle, s.input)
+ for i := 0; i < s.input; i++ {
+ if i%2 == 0 {
+ handles[i] = IntHandle(i)
+ } else {
+ handleBytes, _ := codec.EncodeKey(&sc, nil, types.NewIntDatum(int64(i)))
+ handles[i], _ = NewCommonHandle(handleBytes)
+ }
+ }
+ b.Run("NativeIntMap_"+strconv.Itoa(s.input), func(b *testing.B) {
+ var x int
+ for i := 0; i < b.N; i++ {
+ x = nativeIntMap(s.input, handles)
+ }
+ result = x
+ })
+ }
+}
diff --git a/parser/consistent_test.go b/parser/consistent_test.go
index 17163378b5439..e78b7f31ddddd 100644
--- a/parser/consistent_test.go
+++ b/parser/consistent_test.go
@@ -73,6 +73,7 @@ func extractMiddle(str, startMarker, endMarker string) string {
}
func extractQuotedWords(strs []string) []string {
+ //nolint: prealloc
var words []string
for _, str := range strs {
word := extractMiddle(str, "\"", "\"")
diff --git a/parser/model/model.go b/parser/model/model.go
index eb22b4bee39bd..43e3e4bc5bcfb 100644
--- a/parser/model/model.go
+++ b/parser/model/model.go
@@ -1426,8 +1426,7 @@ type PolicyInfo struct {
}
func (p *PolicyInfo) Clone() *PolicyInfo {
- var cloned PolicyInfo
- cloned = *p
+ cloned := *p
cloned.PlacementSettings = p.PlacementSettings.Clone()
return &cloned
}
@@ -1489,8 +1488,7 @@ func (p *PlacementSettings) String() string {
}
func (p *PlacementSettings) Clone() *PlacementSettings {
- var cloned PlacementSettings
- cloned = *p
+ cloned := *p
return &cloned
}
diff --git a/parser/parser_test.go b/parser/parser_test.go
index f908cfa8d7b6e..3ab44e1a232c0 100644
--- a/parser/parser_test.go
+++ b/parser/parser_test.go
@@ -5735,6 +5735,7 @@ func TestNotExistsSubquery(t *testing.T) {
}
func TestWindowFunctionIdentifier(t *testing.T) {
+ //nolint: prealloc
var table []testCase
for key := range parser.WindowFuncTokenMapForTest {
table = append(table, testCase{fmt.Sprintf("select 1 %s", key), false, fmt.Sprintf("SELECT 1 AS `%s`", key)})
diff --git a/parser/yy_parser.go b/parser/yy_parser.go
index 808b4a216ebeb..221dd7a26e790 100644
--- a/parser/yy_parser.go
+++ b/parser/yy_parser.go
@@ -148,8 +148,7 @@ func (parser *Parser) ParseSQL(sql string, params ...ParseParam) (stmt []ast.Stm
parser.src = sql
parser.result = parser.result[:0]
- var l yyLexer
- l = &parser.lexer
+ var l yyLexer = &parser.lexer
yyParse(l, parser)
warns, errs := l.Errors()
diff --git a/planner/core/explain.go b/planner/core/explain.go
index 218450bd2bc5e..6d8512d5dd94a 100644
--- a/planner/core/explain.go
+++ b/planner/core/explain.go
@@ -466,12 +466,6 @@ func (p *PhysicalIndexLookUpReader) ExplainInfo() string {
str.WriteString(strconv.FormatUint(p.PushedLimit.Count, 10))
str.WriteString(")")
}
- if p.Paging {
- if p.PushedLimit != nil {
- str.WriteString(", ")
- }
- str.WriteString("paging:true")
- }
return str.String()
}
diff --git a/planner/core/plan_cost.go b/planner/core/plan_cost.go
index ee0af71c51149..c0598e74f2801 100644
--- a/planner/core/plan_cost.go
+++ b/planner/core/plan_cost.go
@@ -735,7 +735,7 @@ func (p *PhysicalApply) GetPlanCost(taskType property.TaskType, costFlag uint64)
}
// GetCost computes cost of merge join operator itself.
-func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64) float64 {
+func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64, costFlag uint64) float64 {
outerCnt := lCnt
innerCnt := rCnt
innerKeys := p.RightJoinKeys
@@ -766,6 +766,9 @@ func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64) float64 {
numPairs = 0
}
}
+ if hasCostFlag(costFlag, CostFlagUseTrueCardinality) {
+ numPairs = getOperatorActRows(p)
+ }
sessVars := p.ctx.GetSessionVars()
probeCost := numPairs * sessVars.GetCPUFactor()
// Cost of evaluating outer filters.
@@ -795,13 +798,13 @@ func (p *PhysicalMergeJoin) GetPlanCost(taskType property.TaskType, costFlag uin
}
p.planCost += childCost
}
- p.planCost += p.GetCost(getCardinality(p.children[0], costFlag), getCardinality(p.children[1], costFlag))
+ p.planCost += p.GetCost(getCardinality(p.children[0], costFlag), getCardinality(p.children[1], costFlag), costFlag)
p.planCostInit = true
return p.planCost, nil
}
// GetCost computes cost of hash join operator itself.
-func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 {
+func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, isMPP bool, costFlag uint64) float64 {
buildCnt, probeCnt := lCnt, rCnt
build := p.children[0]
// Taking the right as the inner for right join or using the outer to build a hash table.
@@ -815,7 +818,11 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 {
rowSize := getAvgRowSize(build.statsInfo(), build.Schema())
spill := oomUseTmpStorage && memQuota > 0 && rowSize*buildCnt > float64(memQuota) && p.storeTp != kv.TiFlash
// Cost of building hash table.
- cpuCost := buildCnt * sessVars.GetCPUFactor()
+ cpuFactor := sessVars.GetCPUFactor()
+ if isMPP && p.ctx.GetSessionVars().CostModelVersion == modelVer2 {
+ cpuFactor = sessVars.GetTiFlashCPUFactor() // use the dedicated TiFlash CPU Factor on modelVer2
+ }
+ cpuCost := buildCnt * cpuFactor
memoryCost := buildCnt * sessVars.GetMemoryFactor()
diskCost := buildCnt * sessVars.GetDiskFactor() * rowSize
// Number of matched row pairs regarding the equal join conditions.
@@ -845,16 +852,19 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 {
numPairs = 0
}
}
+ if hasCostFlag(costFlag, CostFlagUseTrueCardinality) {
+ numPairs = getOperatorActRows(p)
+ }
// Cost of querying hash table is cheap actually, so we just compute the cost of
// evaluating `OtherConditions` and joining row pairs.
- probeCost := numPairs * sessVars.GetCPUFactor()
+ probeCost := numPairs * cpuFactor
probeDiskCost := numPairs * sessVars.GetDiskFactor() * rowSize
// Cost of evaluating outer filter.
if len(p.LeftConditions)+len(p.RightConditions) > 0 {
// Input outer count for the above compution should be adjusted by SelectionFactor.
probeCost *= SelectionFactor
probeDiskCost *= SelectionFactor
- probeCost += probeCnt * sessVars.GetCPUFactor()
+ probeCost += probeCnt * cpuFactor
}
diskCost += probeDiskCost
probeCost /= float64(p.Concurrency)
@@ -864,9 +874,9 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 {
if p.UseOuterToBuild {
if spill {
// It runs in sequence when build data is on disk. See handleUnmatchedRowsFromHashTableInDisk
- cpuCost += buildCnt * sessVars.GetCPUFactor()
+ cpuCost += buildCnt * cpuFactor
} else {
- cpuCost += buildCnt * sessVars.GetCPUFactor() / float64(p.Concurrency)
+ cpuCost += buildCnt * cpuFactor / float64(p.Concurrency)
}
diskCost += buildCnt * sessVars.GetDiskFactor() * rowSize
}
@@ -892,7 +902,7 @@ func (p *PhysicalHashJoin) GetPlanCost(taskType property.TaskType, costFlag uint
}
p.planCost += childCost
}
- p.planCost += p.GetCost(getCardinality(p.children[0], costFlag), getCardinality(p.children[1], costFlag))
+ p.planCost += p.GetCost(getCardinality(p.children[0], costFlag), getCardinality(p.children[1], costFlag), taskType == property.MppTaskType, costFlag)
p.planCostInit = true
return p.planCost, nil
}
diff --git a/planner/core/plan_cost_test.go b/planner/core/plan_cost_test.go
index 6a37e4252977b..d8bf5833412ce 100644
--- a/planner/core/plan_cost_test.go
+++ b/planner/core/plan_cost_test.go
@@ -460,6 +460,7 @@ func TestNewCostInterfaceRandGen(t *testing.T) {
tk.MustExec("use test")
tk.MustExec(`create table t (a int primary key, b int, c int, d int, k int, key b(b), key cd(c, d), unique key(k))`)
+ tk.MustExec(`set @@tidb_enable_paging = off`)
queries := []string{
`SELECT a FROM t WHERE a is null AND d in (5307, 15677, 57970)`,
diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go
index f0435241adece..ed05bcce2c429 100644
--- a/planner/core/plan_test.go
+++ b/planner/core/plan_test.go
@@ -678,7 +678,7 @@ func TestCopPaging(t *testing.T) {
for i := 0; i < 10; i++ {
tk.MustQuery("explain format='brief' select * from t force index(i) where id <= 1024 and c1 >= 0 and c1 <= 1024 and c2 in (2, 4, 6, 8) order by c1 limit 960").Check(testkit.Rows(
"Limit 4.00 root offset:0, count:960",
- "└─IndexLookUp 4.00 root paging:true",
+ "└─IndexLookUp 4.00 root ",
" ├─Selection(Build) 1024.00 cop[tikv] le(test.t.id, 1024)",
" │ └─IndexRangeScan 1024.00 cop[tikv] table:t, index:i(c1) range:[0,1024], keep order:true",
" └─Selection(Probe) 4.00 cop[tikv] in(test.t.c2, 2, 4, 6, 8)",
@@ -689,7 +689,7 @@ func TestCopPaging(t *testing.T) {
for i := 0; i < 10; i++ {
tk.MustQuery("explain format='brief' select * from t force index(i) where mod(id, 2) > 0 and id <= 1024 and c1 >= 0 and c1 <= 1024 and c2 in (2, 4, 6, 8) order by c1 limit 960").Check(testkit.Rows(
"Limit 3.20 root offset:0, count:960",
- "└─IndexLookUp 3.20 root paging:true",
+ "└─IndexLookUp 3.20 root ",
" ├─Selection(Build) 819.20 cop[tikv] gt(mod(test.t.id, 2), 0), le(test.t.id, 1024)",
" │ └─IndexRangeScan 1024.00 cop[tikv] table:t, index:i(c1) range:[0,1024], keep order:true",
" └─Selection(Probe) 3.20 cop[tikv] in(test.t.c2, 2, 4, 6, 8)",
diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go
index e4f60984a75b3..ad8388f15a8f4 100644
--- a/planner/core/planbuilder.go
+++ b/planner/core/planbuilder.go
@@ -2769,7 +2769,7 @@ func buildShowDDLJobsFields() (*expression.Schema, types.NameSlice) {
}
func buildTableRegionsSchema() (*expression.Schema, types.NameSlice) {
- schema := newColumnsWithNames(11)
+ schema := newColumnsWithNames(13)
schema.Append(buildColumnWithName("", "REGION_ID", mysql.TypeLonglong, 4))
schema.Append(buildColumnWithName("", "START_KEY", mysql.TypeVarchar, 64))
schema.Append(buildColumnWithName("", "END_KEY", mysql.TypeVarchar, 64))
@@ -2781,6 +2781,8 @@ func buildTableRegionsSchema() (*expression.Schema, types.NameSlice) {
schema.Append(buildColumnWithName("", "READ_BYTES", mysql.TypeLonglong, 4))
schema.Append(buildColumnWithName("", "APPROXIMATE_SIZE(MB)", mysql.TypeLonglong, 4))
schema.Append(buildColumnWithName("", "APPROXIMATE_KEYS", mysql.TypeLonglong, 4))
+ schema.Append(buildColumnWithName("", "SCHEDULING_CONSTRAINTS", mysql.TypeVarchar, 256))
+ schema.Append(buildColumnWithName("", "SCHEDULING_STATE", mysql.TypeVarchar, 16))
return schema.col2Schema(), schema.names
}
diff --git a/planner/core/rule_column_pruning.go b/planner/core/rule_column_pruning.go
index 5dc6506f3d2b3..b8df243bd2299 100644
--- a/planner/core/rule_column_pruning.go
+++ b/planner/core/rule_column_pruning.go
@@ -115,6 +115,7 @@ func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column,
}
appendColumnPruneTraceStep(la, prunedColumns, opt)
appendFunctionPruneTraceStep(la, prunedFunctions, opt)
+ //nolint: prealloc
var selfUsedCols []*expression.Column
for _, aggrFunc := range la.AggFuncs {
selfUsedCols = expression.ExtractColumnsFromExpressions(selfUsedCols, aggrFunc.Args, nil)
diff --git a/planner/core/rule_join_reorder.go b/planner/core/rule_join_reorder.go
index 68d1fa3bf16cb..0294da8bac852 100644
--- a/planner/core/rule_join_reorder.go
+++ b/planner/core/rule_join_reorder.go
@@ -563,6 +563,7 @@ func findRoots(t *tracing.PlanTrace) []*tracing.PlanTrace {
if t.TP == plancodec.TypeJoin || t.TP == plancodec.TypeDataSource {
return []*tracing.PlanTrace{t}
}
+ //nolint: prealloc
var r []*tracing.PlanTrace
for _, child := range t.Children {
r = append(r, findRoots(child)...)
diff --git a/planner/core/rule_join_reorder_dp.go b/planner/core/rule_join_reorder_dp.go
index c91d74e1b7c28..c7c0f45cf2274 100644
--- a/planner/core/rule_join_reorder_dp.go
+++ b/planner/core/rule_join_reorder_dp.go
@@ -212,10 +212,11 @@ func (s *joinReorderDPSolver) dpGraph(visitID2NodeID, nodeID2VisitID []int, join
func (s *joinReorderDPSolver) nodesAreConnected(leftMask, rightMask uint, oldPos2NewPos []int,
totalEqEdges []joinGroupEqEdge, totalNonEqEdges []joinGroupNonEqEdge) ([]joinGroupEqEdge, []expression.Expression) {
- var ( // nolint: prealloc
- usedEqEdges []joinGroupEqEdge
- otherConds []expression.Expression
- )
+ //nolint: prealloc
+ var usedEqEdges []joinGroupEqEdge
+ //nolint: prealloc
+ var otherConds []expression.Expression
+
for _, edge := range totalEqEdges {
lIdx := uint(oldPos2NewPos[edge.nodeIDs[0]])
rIdx := uint(oldPos2NewPos[edge.nodeIDs[1]])
diff --git a/planner/core/task.go b/planner/core/task.go
index fd6cac675f4c6..4ecc67fc67a34 100644
--- a/planner/core/task.go
+++ b/planner/core/task.go
@@ -326,7 +326,7 @@ func (p *PhysicalHashJoin) attach2Task(tasks ...task) task {
p.SetChildren(lTask.plan(), rTask.plan())
task := &rootTask{
p: p,
- cst: lTask.cost() + rTask.cost() + p.GetCost(lTask.count(), rTask.count()),
+ cst: lTask.cost() + rTask.cost() + p.GetCost(lTask.count(), rTask.count(), false, 0),
}
p.cost = task.cost()
return task
@@ -547,7 +547,7 @@ func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task {
outerTask = rTask
}
task := &mppTask{
- cst: lCost + rCost + p.GetCost(lTask.count(), rTask.count()),
+ cst: lCost + rCost + p.GetCost(lTask.count(), rTask.count(), false, 0),
p: p,
partTp: outerTask.partTp,
hashCols: outerTask.hashCols,
@@ -578,7 +578,7 @@ func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...task) task {
tblColHists: rTask.tblColHists,
indexPlanFinished: true,
tablePlan: p,
- cst: lCost + rCost + p.GetCost(lTask.count(), rTask.count()),
+ cst: lCost + rCost + p.GetCost(lTask.count(), rTask.count(), false, 0),
}
p.cost = task.cst
return task
@@ -590,7 +590,7 @@ func (p *PhysicalMergeJoin) attach2Task(tasks ...task) task {
p.SetChildren(lTask.plan(), rTask.plan())
t := &rootTask{
p: p,
- cst: lTask.cost() + rTask.cost() + p.GetCost(lTask.count(), rTask.count()),
+ cst: lTask.cost() + rTask.cost() + p.GetCost(lTask.count(), rTask.count(), 0),
}
p.cost = t.cost()
return t
diff --git a/planner/implementation/join.go b/planner/implementation/join.go
index f24791e9a987e..4b247353c58bf 100644
--- a/planner/implementation/join.go
+++ b/planner/implementation/join.go
@@ -29,7 +29,7 @@ func (impl *HashJoinImpl) CalcCost(outCount float64, children ...memo.Implementa
hashJoin := impl.plan.(*plannercore.PhysicalHashJoin)
// The children here are only used to calculate the cost.
hashJoin.SetChildren(children[0].GetPlan(), children[1].GetPlan())
- selfCost := hashJoin.GetCost(children[0].GetPlan().StatsCount(), children[1].GetPlan().StatsCount())
+ selfCost := hashJoin.GetCost(children[0].GetPlan().StatsCount(), children[1].GetPlan().StatsCount(), false, 0)
impl.cost = selfCost + children[0].GetCost() + children[1].GetCost()
return impl.cost
}
@@ -56,7 +56,7 @@ func (impl *MergeJoinImpl) CalcCost(outCount float64, children ...memo.Implement
mergeJoin := impl.plan.(*plannercore.PhysicalMergeJoin)
// The children here are only used to calculate the cost.
mergeJoin.SetChildren(children[0].GetPlan(), children[1].GetPlan())
- selfCost := mergeJoin.GetCost(children[0].GetPlan().StatsCount(), children[1].GetPlan().StatsCount())
+ selfCost := mergeJoin.GetCost(children[0].GetPlan().StatsCount(), children[1].GetPlan().StatsCount(), 0)
impl.cost = selfCost + children[0].GetCost() + children[1].GetCost()
return impl.cost
}
diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go
index 9e8198650d25b..d1e85b3d45091 100644
--- a/privilege/privileges/cache.go
+++ b/privilege/privileges/cache.go
@@ -1168,7 +1168,7 @@ func (p *MySQLPrivilege) DBIsVisible(user, host, db string) bool {
}
func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentity) []string {
- var gs []string // nolint: prealloc
+ var gs []string //nolint: prealloc
var sortFromIdx int
var hasGlobalGrant = false
// Some privileges may granted from role inheritance.
diff --git a/server/conn.go b/server/conn.go
index 85b227b3f3bdf..e5289e4fec5af 100644
--- a/server/conn.go
+++ b/server/conn.go
@@ -1936,8 +1936,8 @@ func (cc *clientConn) prefetchPointPlanKeys(ctx context.Context, stmts []ast.Stm
}
}
pointPlans := make([]plannercore.Plan, len(stmts))
- var idxKeys []kv.Key // nolint: prealloc
- var rowKeys []kv.Key // nolint: prealloc
+ var idxKeys []kv.Key //nolint: prealloc
+ var rowKeys []kv.Key //nolint: prealloc
sc := vars.StmtCtx
for i, stmt := range stmts {
switch stmt.(type) {
diff --git a/server/http_handler.go b/server/http_handler.go
index 4ef260eed12dd..5db67bcd3be0c 100644
--- a/server/http_handler.go
+++ b/server/http_handler.go
@@ -47,6 +47,7 @@ import (
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/session"
+ "github.com/pingcap/tidb/session/txninfo"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessionctx/stmtctx"
@@ -755,6 +756,34 @@ func (h settingsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
return
}
}
+ if transactionSummaryCapacity := req.Form.Get("transaction_summary_capacity"); transactionSummaryCapacity != "" {
+ capacity, err := strconv.Atoi(transactionSummaryCapacity)
+ if err != nil {
+ writeError(w, errors.New("illegal argument"))
+ return
+ } else if capacity < 0 || capacity > 5000 {
+ writeError(w, errors.New("transaction_summary_capacity out of range, should be in 0 to 5000"))
+ return
+ }
+ cfg := config.GetGlobalConfig()
+ cfg.TrxSummary.TransactionSummaryCapacity = uint(capacity)
+ config.StoreGlobalConfig(cfg)
+ txninfo.Recorder.ResizeSummaries(uint(capacity))
+ }
+ if transactionIDDigestMinDuration := req.Form.Get("transaction_id_digest_min_duration"); transactionIDDigestMinDuration != "" {
+ duration, err := strconv.Atoi(transactionIDDigestMinDuration)
+ if err != nil {
+ writeError(w, errors.New("illegal argument"))
+ return
+ } else if duration < 0 || duration > 2147483647 {
+ writeError(w, errors.New("transaction_id_digest_min_duration out of range, should be in 0 to 2147483647"))
+ return
+ }
+ cfg := config.GetGlobalConfig()
+ cfg.TrxSummary.TransactionIDDigestMinDuration = uint(duration)
+ config.StoreGlobalConfig(cfg)
+ txninfo.Recorder.SetMinDuration(time.Duration(duration) * time.Millisecond)
+ }
} else {
writeData(w, config.GetGlobalConfig())
}
diff --git a/server/server.go b/server/server.go
index 90b4a7e27101b..13ed052391f59 100644
--- a/server/server.go
+++ b/server/server.go
@@ -352,7 +352,7 @@ func setTxnScope() {
// Export config-related metrics
func (s *Server) reportConfig() {
metrics.ConfigStatus.WithLabelValues("token-limit").Set(float64(s.cfg.TokenLimit))
- metrics.ConfigStatus.WithLabelValues("max-server-connections").Set(float64(s.cfg.MaxServerConnections))
+ metrics.ConfigStatus.WithLabelValues("max_connections").Set(float64(s.cfg.Instance.MaxConnections))
}
// Run runs the server.
@@ -514,11 +514,18 @@ func (s *Server) onConn(conn *clientConn) {
})
terror.Log(err)
}
- if errors.Cause(err) == io.EOF {
+ switch errors.Cause(err) {
+ case io.EOF:
// `EOF` means the connection is closed normally, we do not treat it as a noticeable error and log it in 'DEBUG' level.
logutil.BgLogger().With(zap.Uint64("conn", conn.connectionID)).
Debug("EOF", zap.String("remote addr", conn.bufReadConn.RemoteAddr().String()))
- } else {
+ case errConCount:
+ if err := conn.writeError(ctx, err); err != nil {
+ logutil.BgLogger().With(zap.Uint64("conn", conn.connectionID)).
+ Warn("error in writing errConCount", zap.Error(err),
+ zap.String("remote addr", conn.bufReadConn.RemoteAddr().String()))
+ }
+ default:
metrics.HandShakeErrorCounter.Inc()
logutil.BgLogger().With(zap.Uint64("conn", conn.connectionID)).
Warn("Server.onConn handshake", zap.Error(err),
@@ -605,8 +612,8 @@ func (cc *clientConn) connectInfo() *variable.ConnectionInfo {
}
func (s *Server) checkConnectionCount() error {
- // When the value of MaxServerConnections is 0, the number of connections is unlimited.
- if int(s.cfg.MaxServerConnections) == 0 {
+ // When the value of Instance.MaxConnections is 0, the number of connections is unlimited.
+ if int(s.cfg.Instance.MaxConnections) == 0 {
return nil
}
@@ -614,9 +621,9 @@ func (s *Server) checkConnectionCount() error {
conns := len(s.clients)
s.rwlock.RUnlock()
- if conns >= int(s.cfg.MaxServerConnections) {
+ if conns >= int(s.cfg.Instance.MaxConnections) {
logutil.BgLogger().Error("too many connections",
- zap.Uint32("max connections", s.cfg.MaxServerConnections), zap.Error(errConCount))
+ zap.Uint32("max connections", s.cfg.Instance.MaxConnections), zap.Error(errConCount))
return errConCount
}
return nil
diff --git a/server/util_test.go b/server/util_test.go
index 889c2512eacad..7a5040b43cd3c 100644
--- a/server/util_test.go
+++ b/server/util_test.go
@@ -70,7 +70,7 @@ func TestDumpBinaryTime(t *testing.T) {
d = dumpBinaryDateTime(nil, parsedTime)
require.Equal(t, []byte{0}, d)
- myDuration, err := types.ParseDuration(sc, "0000-00-00 00:00:00.000000", 6)
+ myDuration, _, err := types.ParseDuration(sc, "0000-00-00 00:00:00.000000", 6)
require.NoError(t, err)
d = dumpBinaryTime(myDuration.Duration)
require.Equal(t, []byte{0}, d)
@@ -194,7 +194,7 @@ func TestDumpTextValue(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "2017-01-06 00:00:00", mustDecodeStr(t, bs))
- duration, err := types.ParseDuration(sc, "11:30:45", 0)
+ duration, _, err := types.ParseDuration(sc, "11:30:45", 0)
require.NoError(t, err)
d.SetMysqlDuration(duration)
columns[0].Type = mysql.TypeDuration
diff --git a/session/BUILD.bazel b/session/BUILD.bazel
index e6cfe0f898682..a48f8238cc5ca 100644
--- a/session/BUILD.bazel
+++ b/session/BUILD.bazel
@@ -75,6 +75,7 @@ go_library(
"//util/memory",
"//util/parser",
"//util/rowcodec",
+ "//util/sem",
"//util/sli",
"//util/sqlexec",
"//util/tableutil",
diff --git a/session/bootstrap.go b/session/bootstrap.go
index 68e97c84d1dd2..d63181829993d 100644
--- a/session/bootstrap.go
+++ b/session/bootstrap.go
@@ -2043,6 +2043,8 @@ func doDMLWorks(s Session) {
vVal = variable.AssertionFastStr
case variable.TiDBEnableMutationChecker:
vVal = variable.On
+ case variable.TiDBEnablePaging:
+ vVal = variable.BoolToOnOff(variable.DefTiDBEnablePaging)
}
value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), vVal)
values = append(values, value)
diff --git a/session/bootstrap_test.go b/session/bootstrap_test.go
index d5438998cc506..6b56501d2b2eb 100644
--- a/session/bootstrap_test.go
+++ b/session/bootstrap_test.go
@@ -1025,3 +1025,31 @@ func TestUpgradeToVer85(t *testing.T) {
require.NoError(t, r.Close())
mustExec(t, se, "delete from mysql.bind_info where default_db = 'test'")
}
+
+func TestTiDBEnablePagingVariable(t *testing.T) {
+ store, dom := createStoreAndBootstrap(t)
+ se := createSessionAndSetID(t, store)
+ defer func() { require.NoError(t, store.Close()) }()
+ defer dom.Close()
+
+ for _, sql := range []string{
+ "select @@global.tidb_enable_paging",
+ "select @@session.tidb_enable_paging",
+ } {
+ r := mustExec(t, se, sql)
+ require.NotNil(t, r)
+
+ req := r.NewChunk(nil)
+ err := r.Next(context.Background(), req)
+ require.NoError(t, err)
+ require.NotEqual(t, 0, req.NumRows())
+
+ rows := statistics.RowToDatums(req.GetRow(0), r.Fields())
+ if variable.DefTiDBEnablePaging {
+ match(t, rows, "1")
+ } else {
+ match(t, rows, "0")
+ }
+ r.Close()
+ }
+}
diff --git a/session/session.go b/session/session.go
index 3f163146e8796..c5c1ead4c65b4 100644
--- a/session/session.go
+++ b/session/session.go
@@ -534,9 +534,22 @@ func (s *session) doCommit(ctx context.Context) error {
s.sessionVars.SetInTxn(false)
s.ClearDiskFullOpt()
}()
+ // check if the transaction is read-only
if s.txn.IsReadOnly() {
return nil
}
+ // check if the cluster is read-only
+ if !s.sessionVars.InRestrictedSQL && variable.RestrictedReadOnly.Load() || variable.VarTiDBSuperReadOnly.Load() {
+ // It is not internal SQL, and the cluster has one of RestrictedReadOnly or SuperReadOnly
+ // We need to privilege check again: a privilege check occurred during planning, but we need
+ // to prevent the case that a long running auto-commit statement is now trying to commit.
+ pm := privilege.GetPrivilegeManager(s)
+ roles := s.sessionVars.ActiveRoles
+ if pm != nil && !pm.HasExplicitlyGrantedDynamicPrivilege(roles, "RESTRICTED_REPLICA_WRITER_ADMIN", false) {
+ s.RollbackTxn(ctx)
+ return plannercore.ErrSQLInReadOnlyMode
+ }
+ }
err := s.checkPlacementPolicyBeforeCommit()
if err != nil {
return err
@@ -2056,17 +2069,20 @@ func runStmt(ctx context.Context, se *session, s sqlexec.Statement) (rs sqlexec.
sessVars := se.sessionVars
// Record diagnostic information for DML statements
- if _, ok := s.(*executor.ExecStmt).StmtNode.(ast.DMLNode); ok {
- defer func() {
- sessVars.LastQueryInfo = variable.QueryInfo{
- TxnScope: sessVars.CheckAndGetTxnScope(),
- StartTS: sessVars.TxnCtx.StartTS,
- ForUpdateTS: sessVars.TxnCtx.GetForUpdateTS(),
- }
- if err != nil {
- sessVars.LastQueryInfo.ErrMsg = err.Error()
- }
- }()
+ if stmt, ok := s.(*executor.ExecStmt).StmtNode.(ast.DMLNode); ok {
+ // Keep the previous queryInfo for `show session_states` because the statement needs to encode it.
+ if showStmt, ok := stmt.(*ast.ShowStmt); !ok || showStmt.Tp != ast.ShowSessionStates {
+ defer func() {
+ sessVars.LastQueryInfo = sessionstates.QueryInfo{
+ TxnScope: sessVars.CheckAndGetTxnScope(),
+ StartTS: sessVars.TxnCtx.StartTS,
+ ForUpdateTS: sessVars.TxnCtx.GetForUpdateTS(),
+ }
+ if err != nil {
+ sessVars.LastQueryInfo.ErrMsg = err.Error()
+ }
+ }()
+ }
}
// Save origTxnCtx here to avoid it reset in the transaction retry.
diff --git a/session/session_test/BUILD.bazel b/session/session_test/BUILD.bazel
index 47c330e409de7..57900ac9d97cc 100644
--- a/session/session_test/BUILD.bazel
+++ b/session/session_test/BUILD.bazel
@@ -12,7 +12,9 @@ go_test(
"//config",
"//domain",
"//kv",
+ "//parser/auth",
"//parser/terror",
+ "//planner/core",
"//session",
"//store/mockstore",
"//testkit",
diff --git a/session/session_test/session_test.go b/session/session_test/session_test.go
index 83f4472ded39a..2a0f95fcbc473 100644
--- a/session/session_test/session_test.go
+++ b/session/session_test/session_test.go
@@ -17,6 +17,7 @@ package session_test
import (
"context"
"fmt"
+ "sync"
"sync/atomic"
"testing"
"time"
@@ -25,7 +26,9 @@ import (
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
+ "github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/terror"
+ plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
@@ -302,6 +305,48 @@ func TestDisableTxnAutoRetry(t *testing.T) {
tk2.MustQuery("select * from no_retry").Check(testkit.Rows("13"))
}
+// The Read-only flags are checked in the planning stage of queries,
+// but this test checks we check them again at commit time.
+// The main use case for this is a long-running auto-commit statement.
+func TestAutoCommitRespectsReadOnly(t *testing.T) {
+ store, clean := createMockStoreForSchemaTest(t)
+ defer clean()
+ var wg sync.WaitGroup
+ tk1 := testkit.NewTestKit(t, store)
+ tk2 := testkit.NewTestKit(t, store)
+ require.True(t, tk1.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil))
+ require.True(t, tk2.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil))
+
+ tk1.MustExec("create table test.auto_commit_test (a int)")
+ wg.Add(1)
+ go func() {
+ err := tk1.ExecToErr("INSERT INTO test.auto_commit_test VALUES (SLEEP(1))")
+ require.True(t, terror.ErrorEqual(err, plannercore.ErrSQLInReadOnlyMode), fmt.Sprintf("err %v", err))
+ wg.Done()
+ }()
+ tk2.MustExec("SET GLOBAL tidb_restricted_read_only = 1")
+ err := tk2.ExecToErr("INSERT INTO test.auto_commit_test VALUES (0)") // should also be an error
+ require.True(t, terror.ErrorEqual(err, plannercore.ErrSQLInReadOnlyMode), fmt.Sprintf("err %v", err))
+ // Reset and check with the privilege to ignore the readonly flag and continue to insert.
+ wg.Wait()
+ tk1.MustExec("SET GLOBAL tidb_restricted_read_only = 0")
+ tk1.MustExec("SET GLOBAL tidb_super_read_only = 0")
+ tk1.MustExec("GRANT RESTRICTED_REPLICA_WRITER_ADMIN on *.* to 'root'")
+
+ wg.Add(1)
+ go func() {
+ tk1.MustExec("INSERT INTO test.auto_commit_test VALUES (SLEEP(1))")
+ wg.Done()
+ }()
+ tk2.MustExec("SET GLOBAL tidb_restricted_read_only = 1")
+ tk2.MustExec("INSERT INTO test.auto_commit_test VALUES (0)")
+
+ // wait for go routines
+ wg.Wait()
+ tk1.MustExec("SET GLOBAL tidb_restricted_read_only = 0")
+ tk1.MustExec("SET GLOBAL tidb_super_read_only = 0")
+}
+
func TestLoadSchemaFailed(t *testing.T) {
originalRetryTime := domain.SchemaOutOfDateRetryTimes.Load()
originalRetryInterval := domain.SchemaOutOfDateRetryInterval.Load()
diff --git a/session/txn.go b/session/txn.go
index b867d8a5530c1..5cb87948bd6ce 100644
--- a/session/txn.go
+++ b/session/txn.go
@@ -136,6 +136,9 @@ func (txn *LazyTxn) resetTxnInfo(
currentSQLDigest string,
allSQLDigests []string,
) {
+ if txn.mu.TxnInfo.StartTS != 0 {
+ txninfo.Recorder.OnTrxEnd(&txn.mu.TxnInfo)
+ }
txn.mu.TxnInfo = txninfo.TxnInfo{}
txn.mu.TxnInfo.StartTS = startTS
txn.mu.TxnInfo.State = state
@@ -270,6 +273,9 @@ func (txn *LazyTxn) changeToInvalid() {
txn.mu.Lock()
defer txn.mu.Unlock()
+ if txn.mu.TxnInfo.StartTS != 0 {
+ txninfo.Recorder.OnTrxEnd(&txn.mu.TxnInfo)
+ }
txn.mu.TxnInfo = txninfo.TxnInfo{}
}
diff --git a/session/txninfo/summary.go b/session/txninfo/summary.go
new file mode 100644
index 0000000000000..e26b2c534cb6a
--- /dev/null
+++ b/session/txninfo/summary.go
@@ -0,0 +1,162 @@
+// Copyright 2021 PingCAP, Inc.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txninfo
+
+import (
+ "container/list"
+ "encoding/json"
+ "fmt"
+ "hash/fnv"
+ "sync"
+ "time"
+
+ "github.com/pingcap/tidb/types"
+ "github.com/tikv/client-go/v2/oracle"
+)
+
+func digest(digests []string) uint64 {
+ // We use FNV-1a hash to generate the 64bit digest
+ // since 64bit digest use less memory and FNV-1a is faster than most of other hash algorithms
+ // You can refer to https://softwareengineering.stackexchange.com/questions/49550/which-hashing-algorithm-is-best-for-uniqueness-and-speed
+ hash := fnv.New64a()
+ for _, digest := range digests {
+ hash.Write([]byte(digest))
+ }
+ return hash.Sum64()
+}
+
+type trxSummaryEntry struct {
+ trxDigest uint64
+ digests []string
+}
+
+type trxSummaries struct {
+ capacity uint
+
+ // lru cache for digest -> trxSummaryEntry
+ elements map[uint64]*list.Element
+ cache *list.List
+}
+
+func newTrxSummaries(capacity uint) trxSummaries {
+ return trxSummaries{
+ capacity: capacity,
+ cache: list.New(),
+ elements: make(map[uint64]*list.Element),
+ }
+}
+
+func (s *trxSummaries) onTrxEnd(digests []string) {
+ key := digest(digests)
+ element, exists := s.elements[key]
+ if exists {
+ s.cache.MoveToFront(element)
+ return
+ }
+ e := trxSummaryEntry{
+ trxDigest: key,
+ digests: digests,
+ }
+ s.elements[key] = s.cache.PushFront(e)
+ if uint(s.cache.Len()) > s.capacity {
+ last := s.cache.Back()
+ delete(s.elements, last.Value.(trxSummaryEntry).trxDigest)
+ s.cache.Remove(last)
+ }
+}
+
+func (s *trxSummaries) dumpTrxSummary() [][]types.Datum {
+ var result [][]types.Datum
+ for element := s.cache.Front(); element != nil; element = element.Next() {
+ sqls := element.Value.(trxSummaryEntry).digests
+ // for consistency with other digests in TiDB, we calculate sum256 here to generate varchar(64) digest
+ digest := fmt.Sprintf("%x", element.Value.(trxSummaryEntry).trxDigest)
+
+ res, err := json.Marshal(sqls)
+ if err != nil {
+ panic(err)
+ }
+
+ result = append(result, []types.Datum{
+ types.NewDatum(digest),
+ types.NewDatum(string(res)),
+ })
+ }
+ return result
+}
+
+func (s *trxSummaries) resize(capacity uint) {
+ s.capacity = capacity
+ for uint(s.cache.Len()) > s.capacity {
+ last := s.cache.Back()
+ delete(s.elements, last.Value.(trxSummaryEntry).trxDigest)
+ s.cache.Remove(last)
+ }
+}
+
+// TrxHistoryRecorder is a history recorder for transaction.
+type TrxHistoryRecorder struct {
+ mu sync.Mutex
+ minDuration time.Duration
+ summaries trxSummaries
+}
+
+// DumpTrxSummary dumps the transaction summary to Datum for displaying in `TRX_SUMMARY` table.
+func (recorder *TrxHistoryRecorder) DumpTrxSummary() [][]types.Datum {
+ recorder.mu.Lock()
+ defer recorder.mu.Unlock()
+ return recorder.summaries.dumpTrxSummary()
+}
+
+// OnTrxEnd should be called when a transaction ends, ie. leaves `TIDB_TRX` table.
+func (recorder *TrxHistoryRecorder) OnTrxEnd(info *TxnInfo) {
+ now := time.Now()
+ startTime := time.Unix(0, oracle.ExtractPhysical(info.StartTS)*1e6)
+ if now.Sub(startTime) < recorder.minDuration {
+ return
+ }
+ recorder.mu.Lock()
+ defer recorder.mu.Unlock()
+ recorder.summaries.onTrxEnd(info.AllSQLDigests)
+}
+
+func newTrxHistoryRecorder(summariesCap uint) TrxHistoryRecorder {
+ return TrxHistoryRecorder{
+ summaries: newTrxSummaries(summariesCap),
+ minDuration: 1 * time.Second,
+ }
+}
+
+// Clean clears the history recorder. For test only.
+func (recorder *TrxHistoryRecorder) Clean() {
+ recorder.summaries.cache = list.New()
+}
+
+// SetMinDuration sets the minimum duration for a transaction to be recorded.
+func (recorder *TrxHistoryRecorder) SetMinDuration(d time.Duration) {
+ recorder.mu.Lock()
+ defer recorder.mu.Unlock()
+ recorder.minDuration = d
+}
+
+// ResizeSummaries resizes the summaries capacity.
+func (recorder *TrxHistoryRecorder) ResizeSummaries(capacity uint) {
+ recorder.mu.Lock()
+ defer recorder.mu.Unlock()
+ recorder.summaries.resize(capacity)
+}
+
+// Recorder is the recorder instance.
+var Recorder TrxHistoryRecorder = newTrxHistoryRecorder(0)
diff --git a/sessionctx/sessionstates/BUILD.bazel b/sessionctx/sessionstates/BUILD.bazel
index cb421dfeac1ec..ce2f91c36176a 100644
--- a/sessionctx/sessionstates/BUILD.bazel
+++ b/sessionctx/sessionstates/BUILD.bazel
@@ -16,7 +16,9 @@ go_test(
srcs = ["session_states_test.go"],
deps = [
"//errno",
+ "//sessionctx/variable",
"//testkit",
+ "//util/sem",
"@com_github_stretchr_testify//require",
],
)
diff --git a/sessionctx/sessionstates/session_states.go b/sessionctx/sessionstates/session_states.go
index 312cf891ec80e..baf876ff87b4f 100644
--- a/sessionctx/sessionstates/session_states.go
+++ b/sessionctx/sessionstates/session_states.go
@@ -15,14 +15,41 @@
package sessionstates
import (
+ "time"
+
ptypes "github.com/pingcap/tidb/parser/types"
"github.com/pingcap/tidb/types"
)
+// QueryInfo represents the information of last executed query. It's used to expose information for test purpose.
+type QueryInfo struct {
+ TxnScope string `json:"txn_scope"`
+ StartTS uint64 `json:"start_ts"`
+ ForUpdateTS uint64 `json:"for_update_ts"`
+ ErrMsg string `json:"error,omitempty"`
+}
+
+// LastDDLInfo represents the information of last DDL. It's used to expose information for test purpose.
+type LastDDLInfo struct {
+ Query string `json:"query"`
+ SeqNum uint64 `json:"seq_num"`
+}
+
// SessionStates contains all the states in the session that should be migrated when the session
// is migrated to another server. It is shown by `show session_states` and recovered by `set session_states`.
type SessionStates struct {
- UserVars map[string]*types.Datum `json:"user-var-values,omitempty"`
- UserVarTypes map[string]*ptypes.FieldType `json:"user-var-types,omitempty"`
- SystemVars map[string]string `json:"sys-vars,omitempty"`
+ UserVars map[string]*types.Datum `json:"user-var-values,omitempty"`
+ UserVarTypes map[string]*ptypes.FieldType `json:"user-var-types,omitempty"`
+ SystemVars map[string]string `json:"sys-vars,omitempty"`
+ PreparedStmtID uint32 `json:"prepared-stmt-id,omitempty"`
+ Status uint16 `json:"status,omitempty"`
+ CurrentDB string `json:"current-db,omitempty"`
+ LastTxnInfo string `json:"txn-info,omitempty"`
+ LastQueryInfo *QueryInfo `json:"query-info,omitempty"`
+ LastDDLInfo *LastDDLInfo `json:"ddl-info,omitempty"`
+ LastFoundRows uint64 `json:"found-rows,omitempty"`
+ FoundInPlanCache bool `json:"in-plan-cache,omitempty"`
+ FoundInBinding bool `json:"in-binding,omitempty"`
+ SequenceLatestValues map[int64]int64 `json:"seq-values,omitempty"`
+ MPPStoreLastFailTime map[string]time.Time `json:"store-fail-time,omitempty"`
}
diff --git a/sessionctx/sessionstates/session_states_test.go b/sessionctx/sessionstates/session_states_test.go
index 81e4cb6d5285a..847f50f4e9a2b 100644
--- a/sessionctx/sessionstates/session_states_test.go
+++ b/sessionctx/sessionstates/session_states_test.go
@@ -19,8 +19,10 @@ import (
"strconv"
"strings"
"testing"
+ "time"
"github.com/pingcap/tidb/errno"
+ "github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/sem"
@@ -167,7 +169,10 @@ func TestSystemVars(t *testing.T) {
},
}
- sem.Enable()
+ if !sem.IsEnabled() {
+ sem.Enable()
+ defer sem.Disable()
+ }
for _, tt := range tests {
tk1 := testkit.NewTestKit(t, store)
for _, stmt := range tt.stmts {
@@ -206,6 +211,230 @@ func TestSystemVars(t *testing.T) {
}
}
+func TestSessionCtx(t *testing.T) {
+ store, clean := testkit.CreateMockStore(t)
+ defer clean()
+ tk := testkit.NewTestKit(t, store)
+ tk.MustExec("create table test.t1(id int)")
+
+ tests := []struct {
+ setFunc func(tk *testkit.TestKit) any
+ checkFunc func(tk *testkit.TestKit, param any)
+ }{
+ {
+ // check PreparedStmtID
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ require.Equal(t, uint32(1), tk.Session().GetSessionVars().GetNextPreparedStmtID())
+ },
+ },
+ {
+ // check PreparedStmtID
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("prepare stmt from 'select ?'")
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ require.Equal(t, uint32(2), tk.Session().GetSessionVars().GetNextPreparedStmtID())
+ },
+ },
+ {
+ // check Status
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ require.Equal(t, mysql.ServerStatusAutocommit, tk.Session().GetSessionVars().Status&mysql.ServerStatusAutocommit)
+ },
+ },
+ {
+ // check Status
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("set autocommit=0")
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ require.Equal(t, uint16(0), tk.Session().GetSessionVars().Status&mysql.ServerStatusAutocommit)
+ },
+ },
+ {
+ // check CurrentDB
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select database()").Check(testkit.Rows(""))
+ },
+ },
+ {
+ // check CurrentDB
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("use test")
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select database()").Check(testkit.Rows("test"))
+ },
+ },
+ {
+ // check LastTxnInfo
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@tidb_last_txn_info").Check(testkit.Rows(""))
+ },
+ },
+ {
+ // check LastTxnInfo
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("begin")
+ tk.MustExec("insert test.t1 value(1)")
+ tk.MustExec("commit")
+ rows := tk.MustQuery("select @@tidb_last_txn_info").Rows()
+ require.NotEqual(t, "", rows[0][0].(string))
+ return rows
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@tidb_last_txn_info").Check(param.([][]interface{}))
+ },
+ },
+ {
+ // check LastQueryInfo
+ setFunc: func(tk *testkit.TestKit) any {
+ rows := tk.MustQuery("select @@tidb_last_query_info").Rows()
+ require.NotEqual(t, "", rows[0][0].(string))
+ return rows
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@tidb_last_query_info").Check(param.([][]interface{}))
+ },
+ },
+ {
+ // check LastQueryInfo
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustQuery("select * from test.t1")
+ startTS := tk.Session().GetSessionVars().LastQueryInfo.StartTS
+ require.NotEqual(t, uint64(0), startTS)
+ return startTS
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ startTS := tk.Session().GetSessionVars().LastQueryInfo.StartTS
+ require.Equal(t, param.(uint64), startTS)
+ },
+ },
+ {
+ // check LastDDLInfo
+ setFunc: func(tk *testkit.TestKit) any {
+ rows := tk.MustQuery("select @@tidb_last_ddl_info").Rows()
+ require.NotEqual(t, "", rows[0][0].(string))
+ return rows
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@tidb_last_ddl_info").Check(param.([][]interface{}))
+ },
+ },
+ {
+ // check LastDDLInfo
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("truncate table test.t1")
+ rows := tk.MustQuery("select @@tidb_last_ddl_info").Rows()
+ require.NotEqual(t, "", rows[0][0].(string))
+ return rows
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@tidb_last_ddl_info").Check(param.([][]interface{}))
+ },
+ },
+ {
+ // check LastFoundRows
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("insert test.t1 value(1), (2), (3), (4), (5)")
+ // SQL_CALC_FOUND_ROWS is not supported now, so we just test normal select.
+ rows := tk.MustQuery("select * from test.t1 limit 3").Rows()
+ require.Equal(t, 3, len(rows))
+ return "3"
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select found_rows()").Check(testkit.Rows(param.(string)))
+ },
+ },
+ {
+ // check SequenceState
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("create sequence test.s")
+ tk.MustQuery("select nextval(test.s)").Check(testkit.Rows("1"))
+ tk.MustQuery("select lastval(test.s)").Check(testkit.Rows("1"))
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select lastval(test.s)").Check(testkit.Rows("1"))
+ tk.MustQuery("select nextval(test.s)").Check(testkit.Rows("2"))
+ },
+ },
+ {
+ // check MPPStoreLastFailTime
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.Session().GetSessionVars().MPPStoreLastFailTime = map[string]time.Time{"store1": time.Now()}
+ return tk.Session().GetSessionVars().MPPStoreLastFailTime
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ failTime := tk.Session().GetSessionVars().MPPStoreLastFailTime
+ require.Equal(t, 1, len(failTime))
+ tm, ok := failTime["store1"]
+ require.True(t, ok)
+ require.True(t, param.(map[string]time.Time)["store1"].Equal(tm))
+ },
+ },
+ {
+ // check FoundInPlanCache
+ setFunc: func(tk *testkit.TestKit) any {
+ require.False(t, tk.Session().GetSessionVars().FoundInPlanCache)
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
+ },
+ },
+ {
+ // check FoundInPlanCache
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("prepare stmt from 'select * from test.t1'")
+ tk.MustQuery("execute stmt")
+ tk.MustQuery("execute stmt")
+ require.True(t, tk.Session().GetSessionVars().FoundInPlanCache)
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
+ },
+ },
+ {
+ // check FoundInBinding
+ setFunc: func(tk *testkit.TestKit) any {
+ require.False(t, tk.Session().GetSessionVars().FoundInBinding)
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("0"))
+ },
+ },
+ {
+ // check FoundInBinding
+ setFunc: func(tk *testkit.TestKit) any {
+ tk.MustExec("create session binding for select * from test.t1 using select * from test.t1")
+ tk.MustQuery("select * from test.t1")
+ require.True(t, tk.Session().GetSessionVars().FoundInBinding)
+ return nil
+ },
+ checkFunc: func(tk *testkit.TestKit, param any) {
+ tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1"))
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ tk1 := testkit.NewTestKit(t, store)
+ var param any
+ if tt.setFunc != nil {
+ param = tt.setFunc(tk1)
+ }
+ tk2 := testkit.NewTestKit(t, store)
+ showSessionStatesAndSet(t, tk1, tk2)
+ tt.checkFunc(tk2, param)
+ }
+}
+
func showSessionStatesAndSet(t *testing.T, tk1, tk2 *testkit.TestKit) {
rows := tk1.MustQuery("show session_states").Rows()
require.Len(t, rows, 1)
diff --git a/sessionctx/variable/noop.go b/sessionctx/variable/noop.go
index 4f2cdac1aa690..6eb70beabfc99 100644
--- a/sessionctx/variable/noop.go
+++ b/sessionctx/variable/noop.go
@@ -24,7 +24,6 @@ import (
// but changing them has no effect on behavior.
var noopSysVars = []*SysVar{
- {Scope: ScopeGlobal, Name: MaxConnections, Value: "151", Type: TypeUnsigned, MinValue: 1, MaxValue: 100000},
// It is unsafe to pretend that any variation of "read only" is enabled when the server
// does not support it. It is possible that these features will be supported in future,
// but until then...
diff --git a/sessionctx/variable/sequence_state.go b/sessionctx/variable/sequence_state.go
index bb8b468da2de9..38199b084fa81 100644
--- a/sessionctx/variable/sequence_state.go
+++ b/sessionctx/variable/sequence_state.go
@@ -50,3 +50,23 @@ func (ss *SequenceState) GetLastValue(sequenceID int64) (int64, bool, error) {
}
return 0, true, nil
}
+
+// GetAllStates returns a copied latestValueMap.
+func (ss *SequenceState) GetAllStates() map[int64]int64 {
+ ss.mu.Lock()
+ defer ss.mu.Unlock()
+ latestValueMap := make(map[int64]int64, len(ss.latestValueMap))
+ for seqID, latestValue := range ss.latestValueMap {
+ latestValueMap[seqID] = latestValue
+ }
+ return latestValueMap
+}
+
+// SetAllStates sets latestValueMap as a whole.
+func (ss *SequenceState) SetAllStates(latestValueMap map[int64]int64) {
+ ss.mu.Lock()
+ defer ss.mu.Unlock()
+ for seqID, latestValue := range latestValueMap {
+ ss.latestValueMap[seqID] = latestValue
+ }
+}
diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go
index 735105c57d4db..fe4f469e76134 100644
--- a/sessionctx/variable/session.go
+++ b/sessionctx/variable/session.go
@@ -1041,10 +1041,10 @@ type SessionVars struct {
LastTxnInfo string
// LastQueryInfo keeps track the info of last query.
- LastQueryInfo QueryInfo
+ LastQueryInfo sessionstates.QueryInfo
// LastDDLInfo keeps track the info of last DDL.
- LastDDLInfo LastDDLInfo
+ LastDDLInfo sessionstates.LastDDLInfo
// PartitionPruneMode indicates how and when to prune partitions.
PartitionPruneMode atomic2.String
@@ -1850,6 +1850,23 @@ func (s *SessionVars) EncodeSessionStates(ctx context.Context, sessionStates *se
sessionStates.UserVarTypes[name] = userVarType.Clone()
}
s.UsersLock.RUnlock()
+
+ // Encode other session contexts.
+ sessionStates.PreparedStmtID = s.preparedStmtID
+ sessionStates.Status = s.Status
+ sessionStates.CurrentDB = s.CurrentDB
+ sessionStates.LastTxnInfo = s.LastTxnInfo
+ if s.LastQueryInfo.StartTS != 0 {
+ sessionStates.LastQueryInfo = &s.LastQueryInfo
+ }
+ if s.LastDDLInfo.SeqNum != 0 {
+ sessionStates.LastDDLInfo = &s.LastDDLInfo
+ }
+ sessionStates.LastFoundRows = s.LastFoundRows
+ sessionStates.SequenceLatestValues = s.SequenceState.GetAllStates()
+ sessionStates.MPPStoreLastFailTime = s.MPPStoreLastFailTime
+ sessionStates.FoundInPlanCache = s.PrevFoundInPlanCache
+ sessionStates.FoundInBinding = s.PrevFoundInBinding
return
}
@@ -1866,6 +1883,25 @@ func (s *SessionVars) DecodeSessionStates(ctx context.Context, sessionStates *se
s.UserVarTypes[name] = userVarType.Clone()
}
s.UsersLock.Unlock()
+
+ // Decode other session contexts.
+ s.preparedStmtID = sessionStates.PreparedStmtID
+ s.Status = sessionStates.Status
+ s.CurrentDB = sessionStates.CurrentDB
+ s.LastTxnInfo = sessionStates.LastTxnInfo
+ if sessionStates.LastQueryInfo != nil {
+ s.LastQueryInfo = *sessionStates.LastQueryInfo
+ }
+ if sessionStates.LastDDLInfo != nil {
+ s.LastDDLInfo = *sessionStates.LastDDLInfo
+ }
+ s.LastFoundRows = sessionStates.LastFoundRows
+ s.SequenceState.SetAllStates(sessionStates.SequenceLatestValues)
+ if sessionStates.MPPStoreLastFailTime != nil {
+ s.MPPStoreLastFailTime = sessionStates.MPPStoreLastFailTime
+ }
+ s.FoundInPlanCache = sessionStates.FoundInPlanCache
+ s.FoundInBinding = sessionStates.FoundInBinding
return
}
@@ -2458,20 +2494,6 @@ func writeSlowLogItem(buf *bytes.Buffer, key, value string) {
buf.WriteString(SlowLogRowPrefixStr + key + SlowLogSpaceMarkStr + value + "\n")
}
-// QueryInfo represents the information of last executed query. It's used to expose information for test purpose.
-type QueryInfo struct {
- TxnScope string `json:"txn_scope"`
- StartTS uint64 `json:"start_ts"`
- ForUpdateTS uint64 `json:"for_update_ts"`
- ErrMsg string `json:"error,omitempty"`
-}
-
-// LastDDLInfo represents the information of last DDL. It's used to expose information for test purpose.
-type LastDDLInfo struct {
- Query string `json:"query"`
- SeqNum uint64 `json:"seq_num"`
-}
-
// TxnReadTS indicates the value and used situation for tx_read_ts
type TxnReadTS struct {
readTS uint64
diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go
index ce3cdc66bfc7b..2e7865ac57040 100644
--- a/sessionctx/variable/sysvar.go
+++ b/sessionctx/variable/sysvar.go
@@ -431,6 +431,12 @@ var defaultSysVars = []*SysVar{
{Scope: ScopeInstance, Name: PluginDir, Value: "/data/deploy/plugin", ReadOnly: true, GetGlobal: func(s *SessionVars) (string, error) {
return config.GetGlobalConfig().Instance.PluginDir, nil
}},
+ {Scope: ScopeInstance, Name: MaxConnections, Value: strconv.FormatUint(uint64(config.GetGlobalConfig().Instance.MaxConnections), 10), Type: TypeUnsigned, MinValue: 0, MaxValue: 100000, SetGlobal: func(s *SessionVars, val string) error {
+ config.GetGlobalConfig().Instance.MaxConnections = uint32(TidbOptInt64(val, 0))
+ return nil
+ }, GetGlobal: func(s *SessionVars) (string, error) {
+ return strconv.FormatUint(uint64(config.GetGlobalConfig().Instance.MaxConnections), 10), nil
+ }},
/* The system variables below have GLOBAL scope */
{Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576},
@@ -1564,7 +1570,10 @@ var defaultSysVars = []*SysVar{
s.RegardNULLAsPoint = TiDBOptOn(val)
return nil
}},
- {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePaging, Value: Off, Type: TypeBool, Hidden: true, SetSession: func(s *SessionVars, val string) error {
+ {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePaging, Value: BoolToOnOff(DefTiDBEnablePaging), Type: TypeBool, Hidden: true, SetSession: func(s *SessionVars, val string) error {
+ s.EnablePaging = TiDBOptOn(val)
+ return nil
+ }, SetGlobal: func(s *SessionVars, val string) error {
s.EnablePaging = TiDBOptOn(val)
return nil
}},
diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go
index 03eca96b0e20c..529c31b6e560d 100644
--- a/sessionctx/variable/tidb_vars.go
+++ b/sessionctx/variable/tidb_vars.go
@@ -941,6 +941,7 @@ const (
DefTiDBPrepPlanCacheMemoryGuardRatio = 0.1
DefTiDBEnableConcurrentDDL = true
DefTiDBSimplifiedMetrics = false
+ DefTiDBEnablePaging = true
)
// Process global variables.
diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go
index ab878d2bb3054..38108e5cba932 100644
--- a/sessionctx/variable/varsutil.go
+++ b/sessionctx/variable/varsutil.go
@@ -430,7 +430,7 @@ func parseTimeZone(s string) (*time.Location, error) {
// The value can be given as a string indicating an offset from UTC, such as '+10:00' or '-6:00'.
// The time zone's value should in [-12:59,+14:00].
if strings.HasPrefix(s, "+") || strings.HasPrefix(s, "-") {
- d, err := types.ParseDuration(nil, s[1:], 0)
+ d, _, err := types.ParseDuration(nil, s[1:], 0)
if err == nil {
if s[0] == '-' {
if d.Duration > 12*time.Hour+59*time.Minute {
diff --git a/statistics/row_sampler.go b/statistics/row_sampler.go
index d091f2be818fb..c80af0b980c79 100644
--- a/statistics/row_sampler.go
+++ b/statistics/row_sampler.go
@@ -78,7 +78,7 @@ func (i ReservoirRowSampleItem) MemUsage() (sum int64) {
sum += col.MemUsage()
}
if i.Handle != nil {
- sum += i.Handle.MemUsage()
+ sum += int64(i.Handle.MemUsage())
}
return sum
}
diff --git a/statistics/scalar_test.go b/statistics/scalar_test.go
index 32eca78c5c80b..34a911579137f 100644
--- a/statistics/scalar_test.go
+++ b/statistics/scalar_test.go
@@ -35,7 +35,7 @@ func getDecimal(value float64) *types.MyDecimal {
}
func getDuration(value string) types.Duration {
- dur, _ := types.ParseDuration(nil, value, 0)
+ dur, _, _ := types.ParseDuration(nil, value, 0)
return dur
}
diff --git a/store/driver/tikv_driver.go b/store/driver/tikv_driver.go
index a3c385f39df6e..e1ba5d121608f 100644
--- a/store/driver/tikv_driver.go
+++ b/store/driver/tikv_driver.go
@@ -326,6 +326,7 @@ func (s *tikvStore) ShowStatus(ctx context.Context, key string) (interface{}, er
// GetLockWaits get return lock waits info
func (s *tikvStore) GetLockWaits() ([]*deadlockpb.WaitForEntry, error) {
stores := s.GetRegionCache().GetStoresByType(tikvrpc.TiKV)
+ //nolint: prealloc
var result []*deadlockpb.WaitForEntry
for _, store := range stores {
resp, err := s.GetTiKVClient().SendRequest(context.TODO(), store.GetAddr(), tikvrpc.NewRequest(tikvrpc.CmdLockWaitInfo, &kvrpcpb.GetLockWaitInfoRequest{}), time.Second*30)
diff --git a/store/mockstore/unistore/cophandler/mpp_exec.go b/store/mockstore/unistore/cophandler/mpp_exec.go
index 941e9996f5acc..8e079991c7daa 100644
--- a/store/mockstore/unistore/cophandler/mpp_exec.go
+++ b/store/mockstore/unistore/cophandler/mpp_exec.go
@@ -151,8 +151,9 @@ func (e *tableScanExec) Process(key, value []byte) error {
e.rowCnt++
if e.chk.IsFull() {
+ lastProcessed := kv.Key(append([]byte{}, key...)) // make a copy to avoid data race
select {
- case e.result <- scanResult{chk: e.chk, lastProcessedKey: kv.Key(key), err: nil}:
+ case e.result <- scanResult{chk: e.chk, lastProcessedKey: lastProcessed, err: nil}:
e.chk = chunk.NewChunkWithCapacity(e.fieldTypes, DefaultBatchSize)
case <-e.done:
return dbreader.ErrScanBreak
diff --git a/tablecodec/tablecodec_test.go b/tablecodec/tablecodec_test.go
index 008ee8c5d7e1f..3f813c8747a7a 100644
--- a/tablecodec/tablecodec_test.go
+++ b/tablecodec/tablecodec_test.go
@@ -262,7 +262,7 @@ func TestTimeCodec(t *testing.T) {
"2016-06-23 11:30:45")
require.NoError(t, err)
row[2] = types.NewDatum(ts)
- du, err := types.ParseDuration(nil, "12:59:59.999999", 6)
+ du, _, err := types.ParseDuration(nil, "12:59:59.999999", 6)
require.NoError(t, err)
row[3] = types.NewDatum(du)
diff --git a/tests/globalkilltest/global_kill_test.go b/tests/globalkilltest/global_kill_test.go
index 85738f873970d..0e4caf303074d 100644
--- a/tests/globalkilltest/global_kill_test.go
+++ b/tests/globalkilltest/global_kill_test.go
@@ -49,7 +49,10 @@ var (
pdClientPath = flag.String("pd", "127.0.0.1:2379", "pd client path")
- lostConnectionToPDTimeout = flag.Int("conn_lost", 5, "lost connection to PD timeout, should be the same as TiDB ldflag ")
+ // nolint: unused, deadcode
+ lostConnectionToPDTimeout = flag.Int("conn_lost", 5, "lost connection to PD timeout, should be the same as TiDB ldflag ")
+
+ // nolint: unused, deadcode
timeToCheckPDConnectionRestored = flag.Int("conn_restored", 1, "time to check PD connection restored, should be the same as TiDB ldflag ")
)
@@ -64,7 +67,7 @@ type GlobalKillSuite struct {
pdCli *clientv3.Client
pdErr error
- clusterId string
+ clusterID string
pdProc *exec.Cmd
tikvProc *exec.Cmd
}
@@ -74,7 +77,7 @@ func createGloabalKillSuite(t *testing.T) (s *GlobalKillSuite, clean func()) {
err := logutil.InitLogger(&logutil.LogConfig{Config: log.Config{Level: *logLevel}})
require.NoError(t, err)
- s.clusterId = time.Now().Format(time.RFC3339Nano)
+ s.clusterID = time.Now().Format(time.RFC3339Nano)
err = s.startCluster()
require.NoError(t, err)
s.pdCli, s.pdErr = s.connectPD()
@@ -157,12 +160,12 @@ func (s *GlobalKillSuite) startPD(dataDir string) (err error) {
}
func (s *GlobalKillSuite) startCluster() (err error) {
- err = s.startPD(s.clusterId)
+ err = s.startPD(s.clusterID)
if err != nil {
return
}
- err = s.startTiKV(s.clusterId)
+ err = s.startTiKV(s.clusterID)
if err != nil {
return
}
diff --git a/tests/globalkilltest/go.mod b/tests/globalkilltest/go.mod
deleted file mode 100644
index 85c7fec1f425f..0000000000000
--- a/tests/globalkilltest/go.mod
+++ /dev/null
@@ -1,46 +0,0 @@
-module github.com/pingcap/tests/globalkilltest
-
-go 1.18
-
-require (
- github.com/go-sql-driver/mysql v1.6.0
- github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c
- github.com/pingcap/log v1.1.0
- github.com/pingcap/tidb v2.0.11+incompatible
- github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df
- go.etcd.io/etcd/client/v3 v3.5.2
- go.uber.org/zap v1.21.0
- google.golang.org/grpc v1.44.0
-)
-
-require (
- github.com/benbjohnson/clock v1.3.0 // indirect
- github.com/coreos/go-semver v0.3.0 // indirect
- github.com/coreos/go-systemd/v22 v22.3.2 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
- github.com/opentracing/opentracing-go v1.2.0 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- go.etcd.io/etcd/api/v3 v3.5.2 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
- go.uber.org/atomic v1.9.0 // indirect
- go.uber.org/multierr v1.8.0 // indirect
- golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
- golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f // indirect
- golang.org/x/text v0.3.7 // indirect
- google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
- gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
-)
-
-// fix potential security issue(CVE-2020-26160) introduced by indirect dependency.
-replace github.com/dgrijalva/jwt-go => github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible
-
-replace github.com/pingcap/tidb => ../../
-
-replace github.com/pingcap/tidb/parser => ../../parser
-
-replace google.golang.org/grpc => google.golang.org/grpc v1.29.1
diff --git a/tests/globalkilltest/go.sum b/tests/globalkilltest/go.sum
deleted file mode 100644
index 10a048e20d254..0000000000000
--- a/tests/globalkilltest/go.sum
+++ /dev/null
@@ -1,312 +0,0 @@
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
-github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu/U=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
-github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 h1:X6mKGhCFOxrKeeHAjv/3UvT6e5RRxW6wRdlqlV6/H4w=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
-github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4=
-github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
-github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3 h1:kJolJWbyadVeL8RKBlqmXQR7FRKPsIeU85TUYyhbhiQ=
-github.com/pingcap/kvproto v0.0.0-20220328072018-6e75c12dbd73 h1:jKixsi6Iw00hL0+o23hmr8BNzlsQP9pShHTOwyuf/Os=
-github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8=
-github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
-github.com/pingcap/tipb v0.0.0-20220215045658-d12dec7a7609 h1:BiCS1ZRnW0szOvTAa3gCqWIhyo+hv83SVaBgrUghXIU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
-github.com/shirou/gopsutil/v3 v3.21.12 h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df h1:rh3VYpfvzXRbJ90ymx1yfhGl/wq8ac2m/cUbao61kwY=
-github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/tikv/client-go/v2 v2.0.1-0.20220406091203-f73ec0e675f4 h1:bi/tuV42dQCu7TTTOwHQW6cHVrV1fhet+Hzo5CUODBQ=
-github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk=
-github.com/twmb/murmur3 v1.1.3 h1:D83U0XYKcHRYwYIpBKf3Pks91Z0Byda/9SJ8B6EMRcA=
-github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM=
-github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
-go.etcd.io/etcd/api/v3 v3.5.2 h1:tXok5yLlKyuQ/SXSjtqHc4uzNaMqZi2XsoSPr/LlJXI=
-go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
-go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE=
-go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v3 v3.5.2 h1:WdnejrUtQC4nCxK0/dLTMqKOB+U5TP/2Ya0BJL+1otA=
-go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
-go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
-go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw=
-golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8 h1:divpuJZKgX3Qt7MFDE5v62yu0yQcQbTCD9VJp9leX58=
-google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/tests/graceshutdown/go.mod b/tests/graceshutdown/go.mod
deleted file mode 100644
index df86a6bebb48a..0000000000000
--- a/tests/graceshutdown/go.mod
+++ /dev/null
@@ -1,27 +0,0 @@
-module graceshutdown
-
-go 1.18
-
-require (
- github.com/go-sql-driver/mysql v1.6.0
- github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c
- github.com/pingcap/log v1.1.0
- github.com/pingcap/tidb v2.0.11+incompatible
- github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df
- go.uber.org/goleak v1.1.12
- go.uber.org/zap v1.21.0
-)
-
-require (
- github.com/benbjohnson/clock v1.3.0 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- go.uber.org/atomic v1.9.0 // indirect
- go.uber.org/multierr v1.8.0 // indirect
- gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
-)
-
-replace github.com/pingcap/tidb => ../../
-
-replace github.com/pingcap/tidb/parser => ../../parser
diff --git a/tests/graceshutdown/go.sum b/tests/graceshutdown/go.sum
deleted file mode 100644
index 8f811d384e31c..0000000000000
--- a/tests/graceshutdown/go.sum
+++ /dev/null
@@ -1,87 +0,0 @@
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
-github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4=
-github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
-github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8=
-github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df h1:rh3VYpfvzXRbJ90ymx1yfhGl/wq8ac2m/cUbao61kwY=
-github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
-go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
-go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/tests/readonlytest/go.mod b/tests/readonlytest/go.mod
deleted file mode 100644
index cc0b22e3b1343..0000000000000
--- a/tests/readonlytest/go.mod
+++ /dev/null
@@ -1,14 +0,0 @@
-module github.com/pingcap/tidb/tests/readonlytest
-
-go 1.16
-
-require (
- github.com/go-sql-driver/mysql v1.6.0
- github.com/pingcap/tidb v2.0.11+incompatible
- github.com/stretchr/testify v1.7.0
- go.uber.org/goleak v1.1.12
-)
-
-replace github.com/pingcap/tidb => ../../
-
-replace github.com/pingcap/tidb/parser => ../../parser
diff --git a/tests/readonlytest/go.sum b/tests/readonlytest/go.sum
deleted file mode 100644
index 387cfacb99a28..0000000000000
--- a/tests/readonlytest/go.sum
+++ /dev/null
@@ -1,1257 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.16.1/go.mod h1:LaNorbty3ehnU3rEjXSNV/NRgQA0O8Y+uh6bPe5UOk4=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v0.20.0/go.mod h1:ZPW/Z0kLCTdDZaDbYTetxc9Cxl/2lNqxYHYNOF2bti0=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.12.0/go.mod h1:GJzjM4SR9T0KyX5gKCVyz1ytD8FeWeUPCwtFCt1AyfE=
-github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0/go.mod h1:eHWhQKXc1Gv1DvWH//UzgWjWFEo0Pp4pH2vBzjBw8Fc=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
-github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
-github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
-github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
-github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
-github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
-github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
-github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/ReneKroon/ttlcache/v2 v2.3.0/go.mod h1:zbo6Pv/28e21Z8CzzqgYRArQYGYtjONRxaAKGxzQvG4=
-github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
-github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
-github.com/VividCortex/mysqlerr v0.0.0-20200629151747-c28746d985dd/go.mod h1:f3HiCrHjHBdcm6E83vGaXh1KomZMA2P6aeo3hKx/wg0=
-github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM=
-github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
-github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo=
-github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0=
-github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
-github.com/aws/aws-sdk-go v1.35.3/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
-github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU=
-github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets=
-github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac=
-github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cheggaaa/pb/v3 v3.0.8/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA=
-github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
-github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
-github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/cockroachdb/pebble v0.0.0-20210719141320-8c3bd06debb5/go.mod h1:JXfQr3d+XO4bL1pxGwKKo09xylQSdZ/mpZ9b2wfVcPs=
-github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
-github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
-github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c=
-github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64/go.mod h1:F86k/6c7aDUdwSUevnLpHS/3Q9hzYCE99jGk2xsHnt0=
-github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY=
-github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2/go.mod h1:7qG7YFnOALvsx6tKTNmQot8d7cGFXM9TidzvRFLWYwM=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/corona10/goimagehash v1.0.2/go.mod h1:/l9umBhvcHQXVtQO1V6Gp1yD20STawkhRnnX0D1bvVI=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
-github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
-github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
-github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
-github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
-github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
-github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
-github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
-github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
-github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
-github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
-github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
-github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsouza/fake-gcs-server v1.19.0/go.mod h1:JtXHY/QzHhtyIxsNfIuQ+XgHtRb5B/w8nqbL5O8zqo0=
-github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E=
-github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
-github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w=
-github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
-github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
-github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
-github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y=
-github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
-github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
-github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
-github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
-github.com/go-echarts/go-echarts v1.0.0/go.mod h1:qbmyAb/Rl1f2w7wKba1D4LoNq4U164yO4/wedFbcWyo=
-github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
-github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
-github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
-github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w=
-github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
-github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q=
-github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
-github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
-github.com/goccy/go-graphviz v0.0.5/go.mod h1:wXVsXxmyMQU6TN3zGRttjNn3h+iCAS7xQFC6TlNvLhk=
-github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
-github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
-github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200407044318-7d83b28da2e9/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo=
-github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
-github.com/hypnoglow/gormzap v0.3.0/go.mod h1:5Wom8B7Jl2oK0Im9hs6KQ+Kl92w4Y7gKCrj66rhyvw0=
-github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
-github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
-github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
-github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
-github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
-github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
-github.com/jedib0t/go-pretty/v6 v6.2.2/go.mod h1:+nE9fyyHGil+PuISTCrp7avEdo6bqoMwqZnuiK2r2a0=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
-github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
-github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ=
-github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
-github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
-github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
-github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
-github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
-github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
-github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
-github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
-github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
-github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
-github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
-github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
-github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
-github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
-github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo=
-github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
-github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
-github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
-github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY=
-github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
-github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
-github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
-github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ=
-github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
-github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
-github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
-github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8=
-github.com/pingcap/badger v1.5.1-0.20210831093107-2f6cb8008145/go.mod h1:LyrqUOHZrUDf9oGi1yoz1+qw9ckSIhQb5eMa1acOLNQ=
-github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
-github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
-github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
-github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
-github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM=
-github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/errors v0.11.5-0.20200917111840-a15ef68f753d/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ=
-github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
-github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
-github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
-github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
-github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI=
-github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce/go.mod h1:w4PEZ5y16LeofeeGwdgZB4ddv9bLyDuIX+ljstgKZyk=
-github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd/go.mod h1:IVF+ijPSMZVtx2oIqxAg7ur6EyixtTYfOHwpfmlhqI4=
-github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ=
-github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
-github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w=
-github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20210805052247-76981389e818/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20210806074406-317f69fb54b4/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20210819164333-bd5706b9d9f2/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20211109071446-a8b4d34474bc/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20211122024046-03abd340988f/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/kvproto v0.0.0-20211207042851-78a55fb8e69c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
-github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
-github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
-github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
-github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
-github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM=
-github.com/pingcap/log v0.0.0-20210906054005-afc726e70354 h1:SvWCbCPh1YeHd9yQLksvJYAgft6wLTY1aNG81tpyscQ=
-github.com/pingcap/log v0.0.0-20210906054005-afc726e70354/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
-github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
-github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
-github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041UWP+NqYzrJ3fMgC/Hw9wnmQ/tUkp/JaHly8=
-github.com/pingcap/sysutil v0.0.0-20210730114356-fcd8a63f68c5/go.mod h1:XsOaV712rUk63aOEKYP9PhXTIE3FMNHmC2r1wX5wElY=
-github.com/pingcap/sysutil v0.0.0-20211208032423-041a72e5860d/go.mod h1:7j18ezaWTao2LHOyMlsc2Dg1vW+mDY9dEbPzVyOlaeM=
-github.com/pingcap/tidb-dashboard v0.0.0-20210312062513-eef5d6404638/go.mod h1:OzFN8H0EDMMqeulPhPMw2i2JaiZWOKFQ7zdRPhENNgo=
-github.com/pingcap/tidb-dashboard v0.0.0-20210716172320-2226872e3296/go.mod h1:OCXbZTBTIMRcIt0jFsuCakZP+goYRv6IjawKbwLS2TQ=
-github.com/pingcap/tidb-dashboard v0.0.0-20211008050453-a25c25809529/go.mod h1:OCXbZTBTIMRcIt0jFsuCakZP+goYRv6IjawKbwLS2TQ=
-github.com/pingcap/tidb-dashboard v0.0.0-20211107164327-80363dfbe884/go.mod h1:OCXbZTBTIMRcIt0jFsuCakZP+goYRv6IjawKbwLS2TQ=
-github.com/pingcap/tidb-tools v5.0.3+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
-github.com/pingcap/tidb-tools v5.2.2-0.20211019062242-37a8bef2fa17+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
-github.com/pingcap/tipb v0.0.0-20210802080519-94b831c6db55/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs=
-github.com/pingcap/tipb v0.0.0-20220107024056-3b91949a18a7/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs=
-github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
-github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/gopsutil v3.21.2+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/gopsutil v3.21.3+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
-github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E=
-github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI=
-github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0=
-github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y=
-github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio=
-github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc=
-github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
-github.com/thoas/go-funk v0.7.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
-github.com/thoas/go-funk v0.8.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
-github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU=
-github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
-github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210926100628-3cc2459779ca/go.mod h1:KwtZXt0JD+bP9bWW2ka0ir3Wp3oTEfZUTh22bs2sI4o=
-github.com/tikv/client-go/v2 v2.0.0-rc.0.20211229051614-62d6b4a2e8f7/go.mod h1:wRuh+W35daKTiYBld0oBlT6PSkzEVr+pB/vChzJZk+8=
-github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d/go.mod h1:Jw9KG11C/23Rr7DW4XWQ7H5xOgGZo6DFL1OKAF4+Igw=
-github.com/tikv/pd v1.1.0-beta.0.20210818082359-acba1da0018d/go.mod h1:rammPjeZgpvfrQRPkijcx8tlxF1XM5+m6kRXrkDzCAA=
-github.com/tikv/pd v1.1.0-beta.0.20211029083450-e65f0c55b6ae/go.mod h1:varH0IE0jJ9E9WN2Ei/N6pajMlPkcXdDEf7f5mmsUVQ=
-github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee/go.mod h1:lRbwxBAhnTQR5vqbTzeI/Bj62bD2OvYYuFezo2vrmeI=
-github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek=
-github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/twmb/murmur3 v1.1.3/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
-github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0=
-github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
-github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI=
-github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/unrolled/render v1.0.1/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
-github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
-github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
-github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
-github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
-github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI=
-github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
-github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f/go.mod h1:8sdOQnirw1PrcnTJYkmW1iOHtUmblMmGdUOHyWYycLI=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww=
-github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457/go.mod h1:pheqtXeHQFzxJk45lRQ0UIGIivKnLXvialZSFWs81A8=
-github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA=
-github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
-github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE=
-github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
-github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
-github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20210512015243-d19fbe541bf9/go.mod h1:q+i20RPAmay+xq8LJ3VMOhXCNk4YCk3V7QP91meFavw=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
-go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
-go.uber.org/fx v1.10.0/go.mod h1:vLRicqpG/qQEzno4SYU86iCwfT95EZza+Eba0ItuxqY=
-go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
-go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
-go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
-go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
-go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
-go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
-golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
-gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
-google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
-gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
-gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
-gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
-gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
-gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
-gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
-gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gorm.io/driver/mysql v1.0.6/go.mod h1:KdrTanmfLPPyAOeYGyG+UpDys7/7eeWT1zCq+oekYnU=
-gorm.io/driver/sqlite v1.1.4/go.mod h1:mJCeTFr7+crvS+TRnWc5Z3UvwxUN1BGBLMrf5LA9DYw=
-gorm.io/gorm v1.20.7/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
-gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
-modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=
-modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk=
-modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
-modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
-modernc.org/parser v1.0.0/go.mod h1:H20AntYJ2cHHL6MHthJ8LZzXCdDCHMWt1KZXtIMjejA=
-modernc.org/parser v1.0.2/go.mod h1:TXNq3HABP3HMaqLK7brD1fLA/LfN0KS6JxZn71QdDqs=
-modernc.org/scanner v1.0.1/go.mod h1:OIzD2ZtjYk6yTuyqZr57FmifbM9fIH74SumloSsajuE=
-modernc.org/sortutil v1.0.0/go.mod h1:1QO0q8IlIlmjBIwm6t/7sof874+xCfZouyqZMLIAtxM=
-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/y v1.0.1/go.mod h1:Ho86I+LVHEI+LYXoUKlmOMAM1JTXOCfj8qi1T8PsClE=
-moul.io/zapgorm2 v1.1.0/go.mod h1:emRfKjNqSzVj5lcgasBdovIXY1jSOwFz2GQZn1Rddks=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
-sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=
diff --git a/tests/realtikvtest/sessiontest/session_test.go b/tests/realtikvtest/sessiontest/session_test.go
index f726c1118b39f..4bc983d94b40f 100644
--- a/tests/realtikvtest/sessiontest/session_test.go
+++ b/tests/realtikvtest/sessiontest/session_test.go
@@ -2951,6 +2951,17 @@ func TestCast(t *testing.T) {
tk.MustQuery("select cast(0.5 as unsigned)")
tk.MustQuery("select cast(-0.5 as signed)")
tk.MustQuery("select hex(cast(0x10 as binary(2)))").Check(testkit.Rows("1000"))
+
+ // test for issue: https://github.com/pingcap/tidb/issues/34539
+ tk.MustQuery("select cast('0000-00-00' as TIME);").Check(testkit.Rows("00:00:00"))
+ tk.MustQuery("select cast('1234x' as TIME);").Check(testkit.Rows("00:12:34"))
+ tk.MustQuery("show warnings;").Check(testkit.RowsWithSep("|", "Warning|1292|Truncated incorrect time value: '1234x'"))
+ tk.MustQuery("select cast('a' as TIME);").Check(testkit.Rows(""))
+ tk.MustQuery("select cast('' as TIME);").Check(testkit.Rows(""))
+ tk.MustQuery("select cast('1234xxxxxxx' as TIME);").Check(testkit.Rows("00:12:34"))
+ tk.MustQuery("select cast('1234xxxxxxxx' as TIME);").Check(testkit.Rows(""))
+ tk.MustQuery("select cast('-1234xxxxxxx' as TIME);").Check(testkit.Rows("-00:12:34"))
+ tk.MustQuery("select cast('-1234xxxxxxxx' as TIME);").Check(testkit.Rows(""))
}
func TestTableInfoMeta(t *testing.T) {
diff --git a/tidb-server/main.go b/tidb-server/main.go
index 40dd3cacfd5a1..bc8795875f360 100644
--- a/tidb-server/main.go
+++ b/tidb-server/main.go
@@ -45,6 +45,7 @@ import (
"github.com/pingcap/tidb/privilege/privileges"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/session"
+ "github.com/pingcap/tidb/session/txninfo"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
@@ -558,6 +559,8 @@ func setGlobalVars() {
cfg.Instance.CheckMb4ValueInUTF8.Store(cfg.CheckMb4ValueInUTF8.Load())
case "enable-collect-execution-info":
cfg.Instance.EnableCollectExecutionInfo = cfg.EnableCollectExecutionInfo
+ case "max-server-connections":
+ cfg.Instance.MaxConnections = cfg.MaxServerConnections
}
case "log":
switch oldName {
@@ -701,6 +704,8 @@ func setGlobalVars() {
tikv.SetStoreLivenessTimeout(t)
parsertypes.TiDBStrictIntegerDisplayWidth = cfg.DeprecateIntegerDisplayWidth
deadlockhistory.GlobalDeadlockHistory.Resize(cfg.PessimisticTxn.DeadlockHistoryCapacity)
+ txninfo.Recorder.ResizeSummaries(cfg.TrxSummary.TransactionSummaryCapacity)
+ txninfo.Recorder.SetMinDuration(time.Duration(cfg.TrxSummary.TransactionIDDigestMinDuration) * time.Millisecond)
}
func setupLog() {
diff --git a/tools/check/ut.go b/tools/check/ut.go
index 920afbb9cb257..84b00d34d9e60 100644
--- a/tools/check/ut.go
+++ b/tools/check/ut.go
@@ -720,6 +720,7 @@ func (n *numa) runTestCase(pkg string, fn string) testResult {
start = time.Now()
err = cmd.Run()
if err != nil {
+ //lint:ignore S1020
if _, ok := err.(*exec.ExitError); ok {
// Retry 3 times to get rid of the weird error:
switch err.Error() {
@@ -875,6 +876,7 @@ func buildTestBinaryMulti(pkgs []string) error {
func testBinaryExist(pkg string) (bool, error) {
_, err := os.Stat(testFileFullPath(pkg))
if err != nil {
+ //lint:ignore S1020
if _, ok := err.(*os.PathError); ok {
return false, nil
}
diff --git a/types/convert.go b/types/convert.go
index 9433f26dbefc8..a6e33d5b2c51b 100644
--- a/types/convert.go
+++ b/types/convert.go
@@ -327,7 +327,7 @@ func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duratio
}
}
- d, err = ParseDuration(sc, str, fsp)
+ d, _, err = ParseDuration(sc, str, fsp)
if ErrTruncatedWrongVal.Equal(err) {
err = sc.HandleTruncate(err)
}
diff --git a/types/convert_test.go b/types/convert_test.go
index aed9980635b72..53ec0c93b1e55 100644
--- a/types/convert_test.go
+++ b/types/convert_test.go
@@ -352,7 +352,7 @@ func TestConvertToString(t *testing.T) {
require.NoError(t, err)
testToString(t, t1, "2011-11-10 11:11:11.999999")
- td, err := ParseDuration(nil, "11:11:11.999999", 6)
+ td, _, err := ParseDuration(nil, "11:11:11.999999", 6)
require.NoError(t, err)
testToString(t, td, "11:11:11.999999")
@@ -819,7 +819,9 @@ func TestConvert(t *testing.T) {
signedDeny(t, mysql.TypeDate, "2012-08-x", "0000-00-00")
signedDeny(t, mysql.TypeDatetime, "2012-08-x", "0000-00-00 00:00:00")
signedDeny(t, mysql.TypeTimestamp, "2012-08-x", "0000-00-00 00:00:00")
- signedDeny(t, mysql.TypeDuration, "2012-08-x", "00:00:00")
+ signedDeny(t, mysql.TypeDuration, "2012-08-x", "00:20:12")
+ signedDeny(t, mysql.TypeDuration, "0000-00-00", "00:00:00")
+ signedDeny(t, mysql.TypeDuration, "1234abc", "00:12:34")
// string from string
signedAccept(t, mysql.TypeString, "abc", "abc")
diff --git a/types/datum.go b/types/datum.go
index a450b51a8a25c..1ad86c770a01b 100644
--- a/types/datum.go
+++ b/types/datum.go
@@ -749,7 +749,7 @@ func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer c
dt, err := ParseDatetime(sc, s)
return d.GetMysqlTime().Compare(dt), errors.Trace(err)
case KindMysqlDuration:
- dur, err := ParseDuration(sc, s, MaxFsp)
+ dur, _, err := ParseDuration(sc, s, MaxFsp)
return d.GetMysqlDuration().Compare(dur), errors.Trace(err)
case KindMysqlSet:
return comparer.Compare(d.GetMysqlSet().String(), s), nil
@@ -796,7 +796,7 @@ func (d *Datum) compareMysqlDuration(sc *stmtctx.StatementContext, dur Duration)
case KindMysqlDuration:
return d.GetMysqlDuration().Compare(dur), nil
case KindString, KindBytes:
- dDur, err := ParseDuration(sc, d.GetString(), MaxFsp)
+ dDur, _, err := ParseDuration(sc, d.GetString(), MaxFsp)
return dDur.Compare(dur), errors.Trace(err)
default:
return d.compareFloat64(sc, dur.Seconds())
@@ -1375,13 +1375,13 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie
if timeNum < -MaxDuration {
return ret, ErrWrongValue.GenWithStackByArgs(TimeStr, timeStr)
}
- t, err := ParseDuration(sc, timeStr, fsp)
+ t, _, err := ParseDuration(sc, timeStr, fsp)
ret.SetMysqlDuration(t)
if err != nil {
return ret, errors.Trace(err)
}
case KindString, KindBytes:
- t, err := ParseDuration(sc, d.GetString(), fsp)
+ t, _, err := ParseDuration(sc, d.GetString(), fsp)
ret.SetMysqlDuration(t)
if err != nil {
return ret, errors.Trace(err)
@@ -1392,7 +1392,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie
if err != nil {
return ret, errors.Trace(err)
}
- t, err := ParseDuration(sc, s, fsp)
+ t, _, err := ParseDuration(sc, s, fsp)
ret.SetMysqlDuration(t)
if err != nil {
return ret, errors.Trace(err)
diff --git a/types/datum_test.go b/types/datum_test.go
index 75627791c2342..698a69fd6b731 100644
--- a/types/datum_test.go
+++ b/types/datum_test.go
@@ -98,7 +98,7 @@ func TestToBool(t *testing.T) {
require.NoError(t, err)
testDatumToBool(t, t1, 1)
- td, err := ParseDuration(nil, "11:11:11.999999", 6)
+ td, _, err := ParseDuration(nil, "11:11:11.999999", 6)
require.NoError(t, err)
testDatumToBool(t, td, 1)
@@ -141,7 +141,7 @@ func TestToInt64(t *testing.T) {
require.NoError(t, err)
testDatumToInt64(t, t1, int64(20111110111112))
- td, err := ParseDuration(nil, "11:11:11.999999", 6)
+ td, _, err := ParseDuration(nil, "11:11:11.999999", 6)
require.NoError(t, err)
testDatumToInt64(t, td, int64(111112))
diff --git a/types/time.go b/types/time.go
index d63669d7fe409..14c647f945788 100644
--- a/types/time.go
+++ b/types/time.go
@@ -1530,7 +1530,7 @@ func (d Duration) Compare(o Duration) int {
// but parses str to Duration then compares.
func (d Duration) CompareString(sc *stmtctx.StatementContext, str string) (int, error) {
// use MaxFsp to parse the string
- o, err := ParseDuration(sc, str, MaxFsp)
+ o, _, err := ParseDuration(sc, str, MaxFsp)
if err != nil {
return 0, err
}
@@ -1680,18 +1680,19 @@ func matchFrac(str string, fsp int) (bool, int, string, error) {
return overflow, frac, rest, nil
}
-func matchDuration(str string, fsp int) (Duration, error) {
+func matchDuration(str string, fsp int) (Duration, bool, error) {
fsp, err := CheckFsp(fsp)
if err != nil {
- return ZeroDuration, errors.Trace(err)
+ return ZeroDuration, true, errors.Trace(err)
}
if len(str) == 0 {
- return ZeroDuration, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
negative, rest := isNegativeDuration(str)
rest = parser.Space0(rest)
+ charsLen := len(rest)
hhmmss := [3]int{}
@@ -1703,13 +1704,13 @@ func matchDuration(str string, fsp int) (Duration, error) {
} else if hms, remain, err := matchHHMMSSCompact(rest); err == nil {
rest, hhmmss = remain, hms
} else {
- return ZeroDuration, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
rest = parser.Space0(rest)
overflow, frac, rest, err := matchFrac(rest, fsp)
- if err != nil || len(rest) > 0 {
- return ZeroDuration, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ if err != nil || (len(rest) > 0 && charsLen >= 12) {
+ return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
if overflow {
@@ -1718,7 +1719,7 @@ func matchDuration(str string, fsp int) (Duration, error) {
}
if !checkHHMMSS(hhmmss) {
- return ZeroDuration, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
if hhmmss[0] > TimeMaxHour {
@@ -1728,7 +1729,7 @@ func matchDuration(str string, fsp int) (Duration, error) {
} else {
t = MaxTime
}
- return Duration{t, fsp}, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return Duration{t, fsp}, false, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
d := gotime.Duration(hhmmss[0]*3600+hhmmss[1]*60+hhmmss[2])*gotime.Second + gotime.Duration(frac)*gotime.Microsecond //nolint:durationcheck
@@ -1736,7 +1737,10 @@ func matchDuration(str string, fsp int) (Duration, error) {
d = -d
}
d, err = TruncateOverflowMySQLTime(d)
- return Duration{d, fsp}, errors.Trace(err)
+ if err == nil && len(rest) > 0 {
+ return Duration{d, fsp}, false, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ }
+ return Duration{d, fsp}, false, errors.Trace(err)
}
// canFallbackToDateTime return true
@@ -1776,29 +1780,30 @@ func canFallbackToDateTime(str string) bool {
}
// ParseDuration parses the time form a formatted string with a fractional seconds part,
-// returns the duration type Time value.
+// returns the duration type Time value and bool to indicate whether the result is null.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
-func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, error) {
+func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, bool, error) {
rest := strings.TrimSpace(str)
- d, err := matchDuration(rest, fsp)
+ d, isNull, err := matchDuration(rest, fsp)
if err == nil {
- return d, nil
+ return d, isNull, nil
}
if !canFallbackToDateTime(rest) {
- return d, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return d, isNull, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
datetime, err := ParseDatetime(sc, rest)
if err != nil {
- return ZeroDuration, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
d, err = datetime.ConvertToDuration()
if err != nil {
- return ZeroDuration, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
+ return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str)
}
- return d.RoundFrac(fsp, sc.TimeZone)
+ d, err = d.RoundFrac(fsp, sc.TimeZone)
+ return d, false, err
}
// TruncateOverflowMySQLTime truncates d when it overflows, and returns ErrTruncatedWrongVal.
diff --git a/types/time_test.go b/types/time_test.go
index a1dfa57ea5402..13e9191ec4175 100644
--- a/types/time_test.go
+++ b/types/time_test.go
@@ -334,8 +334,9 @@ func TestTime(t *testing.T) {
}
for _, test := range table {
- duration, err := types.ParseDuration(sc, test.Input, types.MinFsp)
+ duration, isNull, err := types.ParseDuration(sc, test.Input, types.MinFsp)
require.NoError(t, err)
+ require.False(t, isNull)
require.Equal(t, test.Expect, duration.String())
}
@@ -349,11 +350,27 @@ func TestTime(t *testing.T) {
}
for _, test := range table {
- duration, err := types.ParseDuration(sc, test.Input, types.MaxFsp)
+ duration, _, err := types.ParseDuration(sc, test.Input, types.MaxFsp)
require.NoError(t, err)
require.Equal(t, test.Expect, duration.String())
}
+ table = []struct {
+ Input string
+ Expect string
+ }{
+ {"0x", "00:00:00.000000"},
+ {"1x", "00:00:01.000000"},
+ {"0000-00-00", "00:00:00.000000"},
+ }
+
+ for _, test := range table {
+ duration, isNull, err := types.ParseDuration(sc, test.Input, types.MaxFsp)
+ require.False(t, isNull)
+ require.True(t, types.ErrTruncatedWrongVal.Equal(err))
+ require.Equal(t, test.Expect, duration.String())
+ }
+
errTable := []string{
"2011-11-11",
"232 10",
@@ -361,11 +378,11 @@ func TestTime(t *testing.T) {
}
for _, test := range errTable {
- _, err := types.ParseDuration(sc, test, types.DefaultFsp)
+ _, _, err := types.ParseDuration(sc, test, types.DefaultFsp)
require.Error(t, err)
}
- duration, err := types.ParseDuration(sc, "4294967295 0:59:59", types.DefaultFsp)
+ duration, _, err := types.ParseDuration(sc, "4294967295 0:59:59", types.DefaultFsp)
require.Error(t, err)
require.Equal(t, "838:59:59", duration.String())
@@ -408,15 +425,15 @@ func TestDurationAdd(t *testing.T) {
{"00:00:00.099", 3, "00:00:00.001", 3, "00:00:00.100"},
}
for _, test := range table {
- duration, err := types.ParseDuration(nil, test.Input, test.Fsp)
+ duration, _, err := types.ParseDuration(nil, test.Input, test.Fsp)
require.NoError(t, err)
- ta, err := types.ParseDuration(nil, test.InputAdd, test.FspAdd)
+ ta, _, err := types.ParseDuration(nil, test.InputAdd, test.FspAdd)
require.NoError(t, err)
result, err := duration.Add(ta)
require.NoError(t, err)
require.Equal(t, test.Expect, result.String())
}
- duration, err := types.ParseDuration(nil, "00:00:00", 0)
+ duration, _, err := types.ParseDuration(nil, "00:00:00", 0)
require.NoError(t, err)
ta := new(types.Duration)
result, err := duration.Add(*ta)
@@ -424,7 +441,7 @@ func TestDurationAdd(t *testing.T) {
require.Equal(t, "00:00:00", result.String())
duration = types.Duration{Duration: math.MaxInt64, Fsp: 0}
- tatmp, err := types.ParseDuration(nil, "00:01:00", 0)
+ tatmp, _, err := types.ParseDuration(nil, "00:01:00", 0)
require.NoError(t, err)
_, err = duration.Add(tatmp)
require.Error(t, err)
@@ -444,9 +461,9 @@ func TestDurationSub(t *testing.T) {
{"00:00:00", 0, "00:00:00.1", 1, "-00:00:00.1"},
}
for _, test := range table {
- duration, err := types.ParseDuration(sc, test.Input, test.Fsp)
+ duration, _, err := types.ParseDuration(sc, test.Input, test.Fsp)
require.NoError(t, err)
- ta, err := types.ParseDuration(sc, test.InputAdd, test.FspAdd)
+ ta, _, err := types.ParseDuration(sc, test.InputAdd, test.FspAdd)
require.NoError(t, err)
result, err := duration.Sub(ta)
require.NoError(t, err)
@@ -475,7 +492,7 @@ func TestTimeFsp(t *testing.T) {
}
for _, test := range table {
- duration, err := types.ParseDuration(sc, test.Input, test.Fsp)
+ duration, _, err := types.ParseDuration(sc, test.Input, test.Fsp)
require.NoError(t, err)
require.Equal(t, test.Expect, duration.String())
}
@@ -488,7 +505,7 @@ func TestTimeFsp(t *testing.T) {
}
for _, test := range errTable {
- _, err := types.ParseDuration(sc, test.Input, test.Fsp)
+ _, _, err := types.ParseDuration(sc, test.Input, test.Fsp)
require.Error(t, err)
}
}
@@ -753,7 +770,7 @@ func TestToNumber(t *testing.T) {
}
for _, test := range tblDuration {
- v, err := types.ParseDuration(sc, test.Input, test.Fsp)
+ v, _, err := types.ParseDuration(sc, test.Input, test.Fsp)
require.NoError(t, err)
// now we can only changetypes.Duration's Fsp to check ToNumber with different Fsp
require.Equal(t, test.Expect, v.ToNumber().String())
@@ -897,7 +914,7 @@ func TestRoundFrac(t *testing.T) {
}
for _, tt := range tbl {
- v, err := types.ParseDuration(sc, tt.Input, types.MaxFsp)
+ v, _, err := types.ParseDuration(sc, tt.Input, types.MaxFsp)
require.NoError(t, err)
nv, err := v.RoundFrac(tt.Fsp, sc.TimeZone)
require.NoError(t, err)
@@ -959,7 +976,7 @@ func TestConvert(t *testing.T) {
// test different time zone.
sc.TimeZone = time.UTC
for _, tt := range tblDuration {
- v, err := types.ParseDuration(sc, tt.Input, tt.Fsp)
+ v, _, err := types.ParseDuration(sc, tt.Input, tt.Fsp)
require.NoError(t, err)
year, month, day := time.Now().In(sc.TimeZone).Date()
n := time.Date(year, month, day, 0, 0, 0, 0, sc.TimeZone)
@@ -1010,7 +1027,7 @@ func TestCompare(t *testing.T) {
}
for _, tt := range tbl {
- v1, err := types.ParseDuration(nil, tt.Arg1, types.MaxFsp)
+ v1, _, err := types.ParseDuration(nil, tt.Arg1, types.MaxFsp)
require.NoError(t, err)
ret, err := v1.CompareString(nil, tt.Arg2)
@@ -1034,7 +1051,7 @@ func TestDurationClock(t *testing.T) {
}
for _, tt := range tbl {
- d, err := types.ParseDuration(&stmtctx.StatementContext{TimeZone: time.UTC}, tt.Input, types.MaxFsp)
+ d, _, err := types.ParseDuration(&stmtctx.StatementContext{TimeZone: time.UTC}, tt.Input, types.MaxFsp)
require.NoError(t, err)
require.Equal(t, tt.Hour, d.Hour())
require.Equal(t, tt.Minute, d.Minute())
@@ -1151,7 +1168,7 @@ func TestTimeAdd(t *testing.T) {
for _, tt := range tbl {
v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp)
require.NoError(t, err)
- dur, err := types.ParseDuration(sc, tt.Arg2, types.MaxFsp)
+ dur, _, err := types.ParseDuration(sc, tt.Arg2, types.MaxFsp)
require.NoError(t, err)
result, err := types.ParseTime(sc, tt.Ret, mysql.TypeDatetime, types.MaxFsp)
require.NoError(t, err)
@@ -1976,7 +1993,7 @@ func TestTimeSub(t *testing.T) {
require.NoError(t, err)
v2, err := types.ParseTime(sc, tt.Arg2, mysql.TypeDatetime, types.MaxFsp)
require.NoError(t, err)
- dur, err := types.ParseDuration(sc, tt.Ret, types.MaxFsp)
+ dur, _, err := types.ParseDuration(sc, tt.Ret, types.MaxFsp)
require.NoError(t, err)
rec := v1.Sub(sc, &v2)
require.Equal(t, dur, rec)
@@ -2209,7 +2226,7 @@ func BenchmarkTimeAdd(b *testing.B) {
TimeZone: time.UTC,
}
arg1, _ := types.ParseTime(sc, "2017-01-18", mysql.TypeDatetime, types.MaxFsp)
- arg2, _ := types.ParseDuration(sc, "12:30:59", types.MaxFsp)
+ arg2, _, _ := types.ParseDuration(sc, "12:30:59", types.MaxFsp)
for i := 0; i < b.N; i++ {
_, err := arg1.Add(sc, arg2)
if err != nil {
diff --git a/util/chunk/mutrow_test.go b/util/chunk/mutrow_test.go
index 1293240013a3d..532206cf42782 100644
--- a/util/chunk/mutrow_test.go
+++ b/util/chunk/mutrow_test.go
@@ -80,7 +80,7 @@ func TestMutRow(t *testing.T) {
retTypes := []*types.FieldType{types.NewFieldType(mysql.TypeDuration)}
chk := New(retTypes, 1, 1)
- dur, err := types.ParseDuration(sc, "01:23:45", 0)
+ dur, _, err := types.ParseDuration(sc, "01:23:45", 0)
require.NoError(t, err)
chk.AppendDuration(0, dur)
mutRow = MutRowFromTypes(retTypes)
diff --git a/util/codec/codec_test.go b/util/codec/codec_test.go
index 33af7c59a9840..a1a5461dcd5fa 100644
--- a/util/codec/codec_test.go
+++ b/util/codec/codec_test.go
@@ -527,7 +527,7 @@ func parseTime(t *testing.T, s string) types.Time {
}
func parseDuration(t *testing.T, s string) types.Duration {
- m, err := types.ParseDuration(nil, s, types.DefaultFsp)
+ m, _, err := types.ParseDuration(nil, s, types.DefaultFsp)
require.NoError(t, err)
return m
}
diff --git a/util/dbutil/common.go b/util/dbutil/common.go
index ec0ffd785349e..674314af3492f 100644
--- a/util/dbutil/common.go
+++ b/util/dbutil/common.go
@@ -319,7 +319,7 @@ func GetTimeZoneOffset(ctx context.Context, db QueryExecutor) (time.Duration, er
}
hour, minute, second := t.Clock()
- // nolint:durationcheck
+ //nolint:durationcheck
return time.Duration(hour*3600+minute*60+second) * time.Second * factor, nil
}
diff --git a/util/hack/hack.go b/util/hack/hack.go
index f4a2f79f894ab..8e586485ee9bd 100644
--- a/util/hack/hack.go
+++ b/util/hack/hack.go
@@ -76,3 +76,8 @@ const (
// DefBucketMemoryUsageForSetInt64 = bucketSize*(1+unsafe.Sizeof(int64) + unsafe.Sizeof(struct{}))+2*ptrSize
DefBucketMemoryUsageForSetInt64 = (8*(1+8+0) + 16) / 2 * 3
)
+
+// EstimateBucketMemoryUsage returns the estimated memory usage of a bucket in a map.
+func EstimateBucketMemoryUsage[K comparable, V any]() uint64 {
+ return (8*(1+uint64(unsafe.Sizeof(*new(K))+unsafe.Sizeof(*new(V)))) + 16) / 2 * 3
+}
diff --git a/util/importer/db.go b/util/importer/db.go
index e483fa0a86dc7..17956b67f3c07 100644
--- a/util/importer/db.go
+++ b/util/importer/db.go
@@ -78,7 +78,7 @@ func genRowDatas(table *table, count int) ([]string, error) {
}
func genRowData(table *table) (string, error) {
- var values []byte // nolint: prealloc
+ var values []byte //nolint: prealloc
for _, column := range table.columns {
data, err := genColumnData(table, column)
if err != nil {
diff --git a/util/processinfo.go b/util/processinfo.go
index 30b95aec100ae..98d8733b8c111 100644
--- a/util/processinfo.go
+++ b/util/processinfo.go
@@ -153,7 +153,8 @@ var mapServerStatus2Str = map[uint16]string{
// Param state is a bit-field. (e.g. 0x0003 = "in transaction; autocommit").
func serverStatus2Str(state uint16) string {
// l collect server status strings.
- var l []string // nolint: prealloc
+ //nolint: prealloc
+ var l []string
// check each defined server status, if match, append to collector.
for _, s := range ascServerStatus {
if state&s == 0 {
diff --git a/util/ranger/detacher.go b/util/ranger/detacher.go
index 037a5402f048b..cbde3ddef60c6 100644
--- a/util/ranger/detacher.go
+++ b/util/ranger/detacher.go
@@ -32,7 +32,7 @@ import (
// detachColumnCNFConditions detaches the condition for calculating range from the other conditions.
// Please make sure that the top level is CNF form.
func detachColumnCNFConditions(sctx sessionctx.Context, conditions []expression.Expression, checker *conditionChecker) ([]expression.Expression, []expression.Expression) {
- var accessConditions, filterConditions []expression.Expression // nolint: prealloc
+ var accessConditions, filterConditions []expression.Expression //nolint: prealloc
for _, cond := range conditions {
if sf, ok := cond.(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicOr {
dnfItems := expression.FlattenDNFConditions(sf)
diff --git a/util/rowcodec/rowcodec_test.go b/util/rowcodec/rowcodec_test.go
index 51c965f095cd6..8772a50eb3d5d 100644
--- a/util/rowcodec/rowcodec_test.go
+++ b/util/rowcodec/rowcodec_test.go
@@ -890,7 +890,7 @@ var (
}
}
getDuration = func(value string) types.Duration {
- dur, _ := types.ParseDuration(nil, value, 0)
+ dur, _, _ := types.ParseDuration(nil, value, 0)
return dur
}
getOldDatumByte = func(d types.Datum) []byte {
diff --git a/util/set/BUILD.bazel b/util/set/BUILD.bazel
index 790799294974f..0f733ad14ee41 100644
--- a/util/set/BUILD.bazel
+++ b/util/set/BUILD.bazel
@@ -5,6 +5,7 @@ go_library(
srcs = [
"float64_set.go",
"int_set.go",
+ "mem_aware_map.go",
"set_with_memory_usage.go",
"string_set.go",
],
@@ -19,6 +20,7 @@ go_test(
"float64_set_test.go",
"int_set_test.go",
"main_test.go",
+ "mem_aware_map_test.go",
"set_with_memory_usage_test.go",
"string_set_test.go",
],
diff --git a/util/set/mem_aware_map.go b/util/set/mem_aware_map.go
new file mode 100644
index 0000000000000..da1cb227af306
--- /dev/null
+++ b/util/set/mem_aware_map.go
@@ -0,0 +1,72 @@
+// Copyright 2022 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package set
+
+import (
+ "math"
+
+ "github.com/pingcap/tidb/util/hack"
+)
+
+// MemAwareMap is a map which is aware of its memory usage. It's adapted from SetWithMemoryUsage.
+// It doesn't support delete.
+// The estimate usage of memory is usually smaller than the real usage.
+// According to experiments with SetWithMemoryUsage, 2/3 * estimated usage <= real usage <= estimated usage.
+type MemAwareMap[K comparable, V any] struct {
+ M map[K]V // it's public, when callers want to directly access it, e.g. use in a for-range-loop
+ bInMap int64
+ bucketMemoryUsage uint64
+}
+
+// EstimateMapSize returns the estimated size of the map. It doesn't include the dynamic part, e.g. objects pointed to by pointers in the map.
+// len(map) <= load_factor * 2^bInMap. bInMap = ceil(log2(len(map)/load_factor)).
+// memory = bucketSize * 2^bInMap
+func EstimateMapSize(length int, bucketSize uint64) uint64 {
+ if length == 0 {
+ return 0
+ }
+ bInMap := uint64(math.Ceil(math.Log2(float64(length) * hack.LoadFactorDen / hack.LoadFactorNum)))
+ return bucketSize * uint64(1< (1<