diff --git a/br/pkg/lightning/mydump/loader.go b/br/pkg/lightning/mydump/loader.go index 14f9071e916c7..09ef6229c45bc 100644 --- a/br/pkg/lightning/mydump/loader.go +++ b/br/pkg/lightning/mydump/loader.go @@ -357,73 +357,77 @@ func (s *mdLoaderSetup) route() error { type dbInfo struct { fileMeta SourceFileMeta - count int + count int // means file count(db/table/view schema and table data) } - knownDBNames := make(map[string]dbInfo) + knownDBNames := make(map[string]*dbInfo) for _, info := range s.dbSchemas { - knownDBNames[info.TableName.Schema] = dbInfo{ + knownDBNames[info.TableName.Schema] = &dbInfo{ fileMeta: info.FileMeta, count: 1, } } for _, info := range s.tableSchemas { - dbInfo := knownDBNames[info.TableName.Schema] - dbInfo.count++ - knownDBNames[info.TableName.Schema] = dbInfo + knownDBNames[info.TableName.Schema].count++ } for _, info := range s.viewSchemas { - dbInfo := knownDBNames[info.TableName.Schema] - dbInfo.count++ + knownDBNames[info.TableName.Schema].count++ + } + for _, info := range s.tableDatas { + knownDBNames[info.TableName.Schema].count++ } - run := func(arr []FileInfo) error { + runRoute := func(arr []FileInfo) error { for i, info := range arr { - dbName, tableName, err := r.Route(info.TableName.Schema, info.TableName.Name) + rawDB, rawTable := info.TableName.Schema, info.TableName.Name + targetDB, targetTable, err := r.Route(rawDB, rawTable) if err != nil { return errors.Trace(err) } - if dbName != info.TableName.Schema { - oldInfo := knownDBNames[info.TableName.Schema] + if targetDB != rawDB { + oldInfo := knownDBNames[rawDB] oldInfo.count-- - knownDBNames[info.TableName.Schema] = oldInfo - - newInfo, ok := knownDBNames[dbName] - newInfo.count++ + newInfo, ok := knownDBNames[targetDB] if !ok { - newInfo.fileMeta = oldInfo.fileMeta + newInfo = &dbInfo{fileMeta: oldInfo.fileMeta, count: 1} s.dbSchemas = append(s.dbSchemas, FileInfo{ - TableName: filter.Table{Schema: dbName}, + TableName: filter.Table{Schema: targetDB}, FileMeta: oldInfo.fileMeta, }) } - knownDBNames[dbName] = newInfo + newInfo.count++ + knownDBNames[targetDB] = newInfo } - arr[i].TableName = filter.Table{Schema: dbName, Name: tableName} + arr[i].TableName = filter.Table{Schema: targetDB, Name: targetTable} } return nil } - if err := run(s.tableSchemas); err != nil { + // route for schema table and view + if err := runRoute(s.dbSchemas); err != nil { return errors.Trace(err) } - if err := run(s.viewSchemas); err != nil { + if err := runRoute(s.tableSchemas); err != nil { return errors.Trace(err) } - if err := run(s.tableDatas); err != nil { + if err := runRoute(s.viewSchemas); err != nil { return errors.Trace(err) } - - // remove all schemas which has been entirely routed away + if err := runRoute(s.tableDatas); err != nil { + return errors.Trace(err) + } + // remove all schemas which has been entirely routed away(file count > 0) // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating remainingSchemas := s.dbSchemas[:0] for _, info := range s.dbSchemas { - if knownDBNames[info.TableName.Schema].count > 0 { + if dbInfo := knownDBNames[info.TableName.Schema]; dbInfo.count > 0 { remainingSchemas = append(remainingSchemas, info) + } else if dbInfo.count < 0 { + // this should not happen if there are no bugs in the code + return common.ErrTableRoute.GenWithStack("something wrong happened when route %s", info.TableName.String()) } } s.dbSchemas = remainingSchemas - return nil } diff --git a/br/pkg/lightning/mydump/loader_test.go b/br/pkg/lightning/mydump/loader_test.go index 23561dd6edae2..0f7079a1f95df 100644 --- a/br/pkg/lightning/mydump/loader_test.go +++ b/br/pkg/lightning/mydump/loader_test.go @@ -345,220 +345,318 @@ func TestTablesWithDots(t *testing.T) { } func TestRouter(t *testing.T) { - s := newTestMydumpLoaderSuite(t) - s.cfg.Routes = []*router.TableRule{ - { - SchemaPattern: "a*", - TablePattern: "t*", - TargetSchema: "b", - TargetTable: "u", - }, - { - SchemaPattern: "c*", - TargetSchema: "c", - }, - { - SchemaPattern: "e*", - TablePattern: "f*", - TargetSchema: "v", - TargetTable: "vv", - }, - { - SchemaPattern: "~.*regexpr[1-9]+", - TablePattern: "~.*regexprtable", - TargetSchema: "downstream_db", - TargetTable: "downstream_table", - }, - { - SchemaPattern: "~.bdb.*", - TargetSchema: "db", - }, - } - - /* - Path/ - a0-schema-create.sql - a0.t0-schema.sql - a0.t0.1.sql - a0.t1-schema.sql - a0.t1.1.sql - a1-schema-create.sql - a1.s1-schema.sql - a1.s1.1.schema.sql - a1.t2-schema.sql - a1.t2.1.sql - a1.v1-schema.sql - a1.v1-schema-view.sql - c0-schema-create.sql - c0.t3-schema.sql - c0.t3.1.sql - d0-schema-create.sql - e0-schema-create.sql - e0.f0-schema.sql - e0.f0-schema-view.sql - test_regexpr1-schema-create.sql - test_regexpr1.test_regexprtable-schema.sql - test_regexpr1.test_regexprtable.1.sql - zbdb-schema-create.sql - zbdb.table-schema.sql - zbdb.table.1.sql - */ - - s.touch(t, "a0-schema-create.sql") - s.touch(t, "a0.t0-schema.sql") - s.touch(t, "a0.t0.1.sql") - s.touch(t, "a0.t1-schema.sql") - s.touch(t, "a0.t1.1.sql") - - s.touch(t, "a1-schema-create.sql") - s.touch(t, "a1.s1-schema.sql") - s.touch(t, "a1.s1.1.sql") - s.touch(t, "a1.t2-schema.sql") - s.touch(t, "a1.t2.1.sql") - s.touch(t, "a1.v1-schema.sql") - s.touch(t, "a1.v1-schema-view.sql") - - s.touch(t, "c0-schema-create.sql") - s.touch(t, "c0.t3-schema.sql") - s.touch(t, "c0.t3.1.sql") - - s.touch(t, "d0-schema-create.sql") - - s.touch(t, "e0-schema-create.sql") - s.touch(t, "e0.f0-schema.sql") - s.touch(t, "e0.f0-schema-view.sql") - - s.touch(t, "test_regexpr1-schema-create.sql") - s.touch(t, "test_regexpr1.test_regexprtable-schema.sql") - s.touch(t, "test_regexpr1.test_regexprtable.1.sql") - - s.touch(t, "zbdb-schema-create.sql") - s.touch(t, "zbdb.table-schema.sql") - s.touch(t, "zbdb.table.1.sql") + // route db and table but with some table not hit rules + { + s := newTestMydumpLoaderSuite(t) + s.cfg.Routes = []*router.TableRule{ + { + SchemaPattern: "a*", + TablePattern: "t*", + TargetSchema: "b", + TargetTable: "u", + }, + } - mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg) - require.NoError(t, err) - require.Equal(t, []*md.MDDatabaseMeta{ - { - Name: "a1", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a1-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - Tables: []*md.MDTableMeta{ - { - DB: "a1", - Name: "s1", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "s1"}, FileMeta: md.SourceFileMeta{Path: "a1.s1-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "a1", Name: "s1"}, FileMeta: md.SourceFileMeta{Path: "a1.s1.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, - IndexRatio: 0.0, - IsRowOrdered: true, + s.touch(t, "a0-schema-create.sql") + s.touch(t, "a0.t0-schema.sql") + s.touch(t, "a0.t0.1.sql") + s.touch(t, "a0.t1-schema.sql") + s.touch(t, "a0.t1.1.sql") + + s.touch(t, "a1-schema-create.sql") + s.touch(t, "a1.s1-schema.sql") + s.touch(t, "a1.s1.1.sql") + s.touch(t, "a1.t2-schema.sql") + s.touch(t, "a1.t2.1.sql") + + s.touch(t, "a1.v1-schema.sql") + s.touch(t, "a1.v1-schema-view.sql") + + mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg) + require.NoError(t, err) + dbs := mdl.GetDatabases() + // hit rules: a0.t0 -> b.u, a0.t1 -> b.0, a1.t2 -> b.u + // not hit: a1.s1, a1.v1 + expectedDBS := []*md.MDDatabaseMeta{ + { + Name: "a0", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + }, + { + Name: "a1", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a1-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "a1", + Name: "s1", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "s1"}, FileMeta: md.SourceFileMeta{Path: "a1.s1-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "a1", Name: "s1"}, FileMeta: md.SourceFileMeta{Path: "a1.s1.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, + { + DB: "a1", + Name: "v1", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "v1"}, FileMeta: md.SourceFileMeta{Path: "a1.v1-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, }, - { - DB: "a1", - Name: "v1", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "v1"}, FileMeta: md.SourceFileMeta{Path: "a1.v1-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{}, - IndexRatio: 0.0, - IsRowOrdered: true, + Views: []*md.MDTableMeta{ + { + DB: "a1", + Name: "v1", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "v1"}, FileMeta: md.SourceFileMeta{Path: "a1.v1-schema-view.sql", Type: md.SourceTypeViewSchema}}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, }, }, - Views: []*md.MDTableMeta{ - { - DB: "a1", - Name: "v1", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "v1"}, FileMeta: md.SourceFileMeta{Path: "a1.v1-schema-view.sql", Type: md.SourceTypeViewSchema}}, - IndexRatio: 0.0, - IsRowOrdered: true, + { + Name: "b", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "b", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "b", + Name: "u", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t0-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{ + {TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t0.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}, + {TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t1.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}, + {TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a1.t2.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}, + }, + IndexRatio: 0.0, + IsRowOrdered: true, + }, }, }, - }, - { - Name: "d0", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "d0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "d0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - }, - { - Name: "b", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "b", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - Tables: []*md.MDTableMeta{ - { - DB: "b", - Name: "u", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t0-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{ - {TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t0.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}, - {TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t1.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}, - {TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a1.t2.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}, + } + require.Equal(t, expectedDBS, dbs) + } + + // only route schema but with some db not hit rules + { + s := newTestMydumpLoaderSuite(t) + s.cfg.Routes = []*router.TableRule{ + { + SchemaPattern: "c*", + TargetSchema: "c", + }, + } + s.touch(t, "c0-schema-create.sql") + s.touch(t, "c0.t3-schema.sql") + s.touch(t, "c0.t3.1.sql") + + s.touch(t, "d0-schema-create.sql") + mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg) + require.NoError(t, err) + dbs := mdl.GetDatabases() + // hit rules: c0.t3 -> c.t3 + // not hit: d0 + expectedDBS := []*md.MDDatabaseMeta{ + { + Name: "d0", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "d0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "d0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + }, + { + Name: "c", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "c", Name: ""}, FileMeta: md.SourceFileMeta{Path: "c0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "c", + Name: "t3", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "c", Name: "t3"}, FileMeta: md.SourceFileMeta{Path: "c0.t3-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "c", Name: "t3"}, FileMeta: md.SourceFileMeta{Path: "c0.t3.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, + IndexRatio: 0.0, + IsRowOrdered: true, }, - IndexRatio: 0.0, - IsRowOrdered: true, }, }, - }, - { - Name: "c", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "c", Name: ""}, FileMeta: md.SourceFileMeta{Path: "c0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - Tables: []*md.MDTableMeta{ - { - DB: "c", - Name: "t3", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "c", Name: "t3"}, FileMeta: md.SourceFileMeta{Path: "c0.t3-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "c", Name: "t3"}, FileMeta: md.SourceFileMeta{Path: "c0.t3.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, - IndexRatio: 0.0, - IsRowOrdered: true, + } + require.Equal(t, expectedDBS, dbs) + } + + // route schema and table but not have table data + { + s := newTestMydumpLoaderSuite(t) + s.cfg.Routes = []*router.TableRule{ + { + SchemaPattern: "e*", + TablePattern: "f*", + TargetSchema: "v", + TargetTable: "vv", + }, + } + s.touch(t, "e0-schema-create.sql") + s.touch(t, "e0.f0-schema.sql") + s.touch(t, "e0.f0-schema-view.sql") + + mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg) + require.NoError(t, err) + dbs := mdl.GetDatabases() + // hit rules: e0.f0 -> v.vv + expectedDBS := []*md.MDDatabaseMeta{ + { + Name: "e0", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "e0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "e0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + }, + { + Name: "v", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: ""}, FileMeta: md.SourceFileMeta{Path: "e0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "v", + Name: "vv", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: "vv"}, FileMeta: md.SourceFileMeta{Path: "e0.f0-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, + }, + Views: []*md.MDTableMeta{ + { + DB: "v", + Name: "vv", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: "vv"}, FileMeta: md.SourceFileMeta{Path: "e0.f0-schema-view.sql", Type: md.SourceTypeViewSchema}}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, }, }, - }, - { - Name: "v", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: ""}, FileMeta: md.SourceFileMeta{Path: "e0-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - Tables: []*md.MDTableMeta{ - { - DB: "v", - Name: "vv", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: "vv"}, FileMeta: md.SourceFileMeta{Path: "e0.f0-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{}, - IndexRatio: 0.0, - IsRowOrdered: true, + } + require.Equal(t, expectedDBS, dbs) + } + + // route by regex + { + s := newTestMydumpLoaderSuite(t) + s.cfg.Routes = []*router.TableRule{ + { + SchemaPattern: "~.*regexpr[1-9]+", + TablePattern: "~.*regexprtable", + TargetSchema: "downstream_db", + TargetTable: "downstream_table", + }, + { + SchemaPattern: "~.bdb.*", + TargetSchema: "db", + }, + } + + s.touch(t, "test_regexpr1-schema-create.sql") + s.touch(t, "test_regexpr1.test_regexprtable-schema.sql") + s.touch(t, "test_regexpr1.test_regexprtable.1.sql") + + s.touch(t, "zbdb-schema-create.sql") + s.touch(t, "zbdb.table-schema.sql") + s.touch(t, "zbdb.table.1.sql") + + mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg) + require.NoError(t, err) + dbs := mdl.GetDatabases() + // hit rules: test_regexpr1.test_regexprtable -> downstream_db.downstream_table, zbdb.table -> db.table + expectedDBS := []*md.MDDatabaseMeta{ + { + Name: "test_regexpr1", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "test_regexpr1", Name: ""}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + }, + { + Name: "db", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "zbdb-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "db", + Name: "table", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: md.SourceFileMeta{Path: "zbdb.table-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: md.SourceFileMeta{Path: "zbdb.table.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, }, }, - Views: []*md.MDTableMeta{ - { - DB: "v", - Name: "vv", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: "vv"}, FileMeta: md.SourceFileMeta{Path: "e0.f0-schema-view.sql", Type: md.SourceTypeViewSchema}}, - IndexRatio: 0.0, - IsRowOrdered: true, + { + Name: "downstream_db", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "downstream_db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "downstream_db", + Name: "downstream_table", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "downstream_db", Name: "downstream_table"}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1.test_regexprtable-schema.sql", Type: md.SourceTypeTableSchema}}, + DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "downstream_db", Name: "downstream_table"}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1.test_regexprtable.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, + IndexRatio: 0.0, + IsRowOrdered: true, + }, }, }, - }, + } + require.Equal(t, expectedDBS, dbs) + } - { - Name: "downstream_db", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "downstream_db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - Tables: []*md.MDTableMeta{ - { - DB: "downstream_db", - Name: "downstream_table", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "downstream_db", Name: "downstream_table"}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1.test_regexprtable-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "downstream_db", Name: "downstream_table"}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1.test_regexprtable.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, - IndexRatio: 0.0, - IsRowOrdered: true, + // only route db and only route some tables + { + s := newTestMydumpLoaderSuite(t) + s.cfg.Routes = []*router.TableRule{ + // only route schema + { + SchemaPattern: "web", + TargetSchema: "web_test", + }, + // only route one table + { + SchemaPattern: "x", + TablePattern: "t1*", + TargetSchema: "x2", + TargetTable: "t", + }, + } + + s.touch(t, "web-schema-create.sql") + s.touch(t, "x-schema-create.sql") + s.touch(t, "x.t10-schema.sql") // hit rules, new name is x2.t + s.touch(t, "x.t20-schema.sql") // not hit rules, name is x.t20 + + mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg) + require.NoError(t, err) + dbs := mdl.GetDatabases() + // hit rules: web -> web_test, x.t10 -> x2.t + // not hit: x.t20 + expectedDBS := []*md.MDDatabaseMeta{ + { + Name: "x", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x", Name: ""}, FileMeta: md.SourceFileMeta{Path: "x-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "x", + Name: "t20", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x", Name: "t20"}, FileMeta: md.SourceFileMeta{Path: "x.t20-schema.sql", Type: md.SourceTypeTableSchema}}, + IndexRatio: 0.0, + IsRowOrdered: true, + DataFiles: []md.FileInfo{}, + }, }, }, - }, - { - Name: "db", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "zbdb-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, - Tables: []*md.MDTableMeta{ - { - DB: "db", - Name: "table", - SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: md.SourceFileMeta{Path: "zbdb.table-schema.sql", Type: md.SourceTypeTableSchema}}, - DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: md.SourceFileMeta{Path: "zbdb.table.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}}, - IndexRatio: 0.0, - IsRowOrdered: true, + { + Name: "web_test", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "web_test", Name: ""}, FileMeta: md.SourceFileMeta{Path: "web-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + }, + { + Name: "x2", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x2", Name: ""}, FileMeta: md.SourceFileMeta{Path: "x-schema-create.sql", Type: md.SourceTypeSchemaSchema}}, + Tables: []*md.MDTableMeta{ + { + DB: "x2", + Name: "t", + SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x2", Name: "t"}, FileMeta: md.SourceFileMeta{Path: "x.t10-schema.sql", Type: md.SourceTypeTableSchema}}, + IndexRatio: 0.0, + IsRowOrdered: true, + DataFiles: []md.FileInfo{}, + }, }, }, - }, - }, mdl.GetDatabases()) + } + require.Equal(t, expectedDBS, dbs) + } } func TestBadRouterRule(t *testing.T) { diff --git a/br/pkg/pdutil/pd.go b/br/pkg/pdutil/pd.go index 5956c6b34417a..20809385bb29c 100644 --- a/br/pkg/pdutil/pd.go +++ b/br/pkg/pdutil/pd.go @@ -378,7 +378,7 @@ func (p *PdController) getStoreInfoWith( func (p *PdController) doPauseSchedulers(ctx context.Context, schedulers []string, post pdHTTPRequest) ([]string, error) { // pause this scheduler with 300 seconds - body, err := json.Marshal(pauseSchedulerBody{Delay: int64(pauseTimeout)}) + body, err := json.Marshal(pauseSchedulerBody{Delay: int64(pauseTimeout.Seconds())}) if err != nil { return nil, errors.Trace(err) } diff --git a/br/tests/br_gcs/run.sh b/br/tests/br_gcs/run.sh index a29cbafa8668c..3829bed165ce9 100755 --- a/br/tests/br_gcs/run.sh +++ b/br/tests/br_gcs/run.sh @@ -98,7 +98,7 @@ done # new version restore full echo "restore start..." -run_br restore full -s "gcs://$BUCKET/$DB?" --pd $PD_ADDR --gcs.endpoint="http://$GCS_HOST:$GCS_PORT/storage/v1/" +run_br restore full -s "gcs://$BUCKET/$DB?" --pd $PD_ADDR --gcs.endpoint="http://$GCS_HOST:$GCS_PORT/storage/v1/" --check-requirements=false for i in $(seq $DB_COUNT); do row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') @@ -126,7 +126,7 @@ for i in $(seq $DB_COUNT); do done echo "v4.0.8 version restore start..." -run_br restore full -s "gcs://$BUCKET/${DB}_old" --pd $PD_ADDR --gcs.endpoint="http://$GCS_HOST:$GCS_PORT/storage/v1/" +bin/brv4.0.8 restore full -s "gcs://$BUCKET/${DB}_old" --pd $PD_ADDR --gcs.endpoint="http://$GCS_HOST:$GCS_PORT/storage/v1/" for i in $(seq $DB_COUNT); do row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') diff --git a/cmd/explaintest/r/tpch.result b/cmd/explaintest/r/tpch.result index 855021af61533..da6f043b22912 100644 --- a/cmd/explaintest/r/tpch.result +++ b/cmd/explaintest/r/tpch.result @@ -254,7 +254,7 @@ Projection 10.00 root tpch.lineitem.l_orderkey, Column#35, tpch.orders.o_orderd └─TopN 10.00 root Column#35:desc, tpch.orders.o_orderdate, offset:0, count:10 └─HashAgg 40252367.98 root group by:Column#48, Column#49, Column#50, funcs:sum(Column#44)->Column#35, funcs:firstrow(Column#45)->tpch.orders.o_orderdate, funcs:firstrow(Column#46)->tpch.orders.o_shippriority, funcs:firstrow(Column#47)->tpch.lineitem.l_orderkey └─Projection 91515927.49 root mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount))->Column#44, tpch.orders.o_orderdate, tpch.orders.o_shippriority, tpch.lineitem.l_orderkey, tpch.lineitem.l_orderkey, tpch.orders.o_orderdate, tpch.orders.o_shippriority - └─HashJoin 91515927.49 root inner join, equal:[eq(tpch.orders.o_orderkey, tpch.lineitem.l_orderkey)] + └─IndexHashJoin 91515927.49 root inner join, inner:IndexLookUp, outer key:tpch.orders.o_orderkey, inner key:tpch.lineitem.l_orderkey, equal cond:eq(tpch.orders.o_orderkey, tpch.lineitem.l_orderkey) ├─HashJoin(Build) 22592975.51 root inner join, equal:[eq(tpch.customer.c_custkey, tpch.orders.o_custkey)] │ ├─TableReader(Build) 1498236.00 root data:Selection │ │ └─Selection 1498236.00 cop[tikv] eq(tpch.customer.c_mktsegment, "AUTOMOBILE") @@ -262,9 +262,10 @@ Projection 10.00 root tpch.lineitem.l_orderkey, Column#35, tpch.orders.o_orderd │ └─TableReader(Probe) 36870000.00 root data:Selection │ └─Selection 36870000.00 cop[tikv] lt(tpch.orders.o_orderdate, 1995-03-13 00:00:00.000000) │ └─TableFullScan 75000000.00 cop[tikv] table:orders keep order:false - └─TableReader(Probe) 163047704.27 root data:Selection - └─Selection 163047704.27 cop[tikv] gt(tpch.lineitem.l_shipdate, 1995-03-13 00:00:00.000000) - └─TableFullScan 300005811.00 cop[tikv] table:lineitem keep order:false + └─IndexLookUp(Probe) 4.05 root + ├─IndexRangeScan(Build) 7.45 cop[tikv] table:lineitem, index:PRIMARY(L_ORDERKEY, L_LINENUMBER) range: decided by [eq(tpch.lineitem.l_orderkey, tpch.orders.o_orderkey)], keep order:false + └─Selection(Probe) 4.05 cop[tikv] gt(tpch.lineitem.l_shipdate, 1995-03-13 00:00:00.000000) + └─TableRowIDScan 7.45 cop[tikv] table:lineitem keep order:false /* Q4 Order Priority Checking Query This query determines how well the order priority system is working and gives an assessment of customer satisfaction. diff --git a/ddl/column_modify_test.go b/ddl/column_modify_test.go index 12cd8296682ac..d0dedc8667818 100644 --- a/ddl/column_modify_test.go +++ b/ddl/column_modify_test.go @@ -456,7 +456,7 @@ func TestCancelDropColumn(t *testing.T) { JobSchemaState model.SchemaState cancelSucc bool }{ - {true, model.JobStateNone, model.StateNone, true}, + {true, model.JobStateQueueing, model.StateNone, true}, {false, model.JobStateRunning, model.StateWriteOnly, false}, {true, model.JobStateRunning, model.StateDeleteOnly, false}, {true, model.JobStateRunning, model.StateDeleteReorganization, false}, @@ -497,12 +497,10 @@ func TestCancelDropColumn(t *testing.T) { originalHook := dom.DDL().GetHook() dom.DDL().SetHook(hook) for i := range testCases { - var c3IdxID int64 testCase = &testCases[i] if testCase.needAddColumn { tk.MustExec("alter table test_drop_column add column c3 int") tk.MustExec("alter table test_drop_column add index idx_c3(c3)") - c3IdxID = external.GetIndexID(t, tk, "test", "test_drop_column", "idx_c3") } err := tk.ExecToErr("alter table test_drop_column drop column c3") @@ -533,10 +531,6 @@ func TestCancelDropColumn(t *testing.T) { require.Nil(t, col1) require.NoError(t, err) require.EqualError(t, checkErr, admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID).Error()) - if c3IdxID != 0 { - // Check index is deleted - checkDelRangeAdded(tk, jobID, c3IdxID) - } } } dom.DDL().SetHook(originalHook) @@ -560,7 +554,7 @@ func TestCancelDropColumns(t *testing.T) { JobSchemaState model.SchemaState cancelSucc bool }{ - {true, model.JobStateNone, model.StateNone, true}, + {true, model.JobStateQueueing, model.StateNone, true}, {false, model.JobStateRunning, model.StateWriteOnly, false}, {true, model.JobStateRunning, model.StateDeleteOnly, false}, {true, model.JobStateRunning, model.StateDeleteReorganization, false}, @@ -601,12 +595,10 @@ func TestCancelDropColumns(t *testing.T) { originalHook := dom.DDL().GetHook() dom.DDL().SetHook(hook) for i := range testCases { - var c3IdxID int64 testCase = &testCases[i] if testCase.needAddColumn { tk.MustExec("alter table test_drop_column add column c3 int, add column c4 int") tk.MustExec("alter table test_drop_column add index idx_c3(c3)") - c3IdxID = external.GetIndexID(t, tk, "test", "test_drop_column", "idx_c3") } err := tk.ExecToErr("alter table test_drop_column drop column c3, drop column c4") tbl := external.GetTableByName(t, tk, "test", "test_drop_column") @@ -634,10 +626,6 @@ func TestCancelDropColumns(t *testing.T) { require.Nil(t, idx3) require.NoError(t, err) require.EqualError(t, checkErr, admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID).Error()) - if c3IdxID != 0 { - // Check index is deleted - checkDelRangeAdded(tk, jobID, c3IdxID) - } } } dom.DDL().SetHook(originalHook) diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index 003bcc59b995c..4d29f0ad18b8b 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -199,7 +199,7 @@ func TestRollbackColumnTypeChangeBetweenInteger(t *testing.T) { // Alter sql will modify column c2 to bigint not null. SQL := "alter table t modify column c2 int not null" err := tk.ExecToErr(SQL) - require.EqualError(t, err, "[ddl:1]MockRollingBackInCallBack-queueing") + require.EqualError(t, err, "[ddl:1]MockRollingBackInCallBack-none") assertRollBackedColUnchanged(t, tk) // Mock roll back at model.StateDeleteOnly. diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index f49c9f17ef5b6..2db31cf0fe657 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -3757,6 +3757,13 @@ func TestIssue29282(t *testing.T) { // Unexpected, test fail. t.Fail() } + + // Wait the background query rollback + select { + case <-time.After(100 * time.Millisecond): + t.Fail() + case <-ch: + } } // See https://github.com/pingcap/tidb/issues/29327 diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index 74c2236c92f50..58a644a6ff3ce 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -2754,10 +2754,6 @@ func testPartitionDropIndex(t *testing.T, store kv.Storage, lease time.Duration, } tk.MustExec(addIdxSQL) - indexID := external.GetIndexID(t, tk, "test", "partition_drop_idx", idxName) - - jobIDExt, reset := setupJobIDExtCallback(tk.Session()) - defer reset() testutil.ExecMultiSQLInGoroutine(store, "test", []string{dropIdxSQL}, done) ticker := time.NewTicker(lease / 2) defer ticker.Stop() @@ -2780,7 +2776,6 @@ LOOP: num += step } } - checkDelRangeAdded(tk, jobIDExt.jobID, indexID) tk.MustExec("drop table partition_drop_idx;") } @@ -2833,13 +2828,12 @@ func testPartitionCancelAddIndex(t *testing.T, store kv.Storage, d ddl.DDL, leas } var checkErr error - var c3IdxInfo *model.IndexInfo hook := &ddl.TestDDLCallback{} originBatchSize := tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size") // Set batch size to lower try to slow down add-index reorganization, This if for hook to cancel this ddl job. tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 32") defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_batch_size = %v", originBatchSize.Rows()[0][0])) - hook.OnJobUpdatedExported, c3IdxInfo, checkErr = backgroundExecOnJobUpdatedExportedT(t, tk, store, hook, idxName) + hook.OnJobUpdatedExported, _, checkErr = backgroundExecOnJobUpdatedExportedT(t, tk, store, hook, idxName) originHook := d.GetHook() defer d.SetHook(originHook) jobIDExt := wrapJobIDExtCallback(hook) @@ -2873,7 +2867,6 @@ LOOP: times++ } } - checkDelRangeAdded(tk, jobIDExt.jobID, c3IdxInfo.ID) tk.MustExec("drop table t1") } diff --git a/ddl/db_rename_test.go b/ddl/db_rename_test.go index 861efe3257225..b84d22e63a31c 100644 --- a/ddl/db_rename_test.go +++ b/ddl/db_rename_test.go @@ -75,7 +75,7 @@ func TestCancelRenameIndex(t *testing.T) { var checkErr error hook := &ddl.TestDDLCallback{Do: dom} hook.OnJobRunBeforeExported = func(job *model.Job) { - if job.Type == model.ActionRenameIndex && job.State == model.JobStateNone { + if job.Type == model.ActionRenameIndex && job.State == model.JobStateQueueing { jobIDs := []int64{job.ID} hookCtx := mock.NewContext() hookCtx.Store = store diff --git a/ddl/db_table_test.go b/ddl/db_table_test.go index 13a6113ad6830..146aa01e38608 100644 --- a/ddl/db_table_test.go +++ b/ddl/db_table_test.go @@ -59,12 +59,12 @@ func TestCancelDropTableAndSchema(t *testing.T) { }{ // Check drop table. // model.JobStateNone means the jobs is canceled before the first run. - {true, model.ActionDropTable, model.JobStateNone, model.StateNone, true}, + {true, model.ActionDropTable, model.JobStateQueueing, model.StateNone, true}, {false, model.ActionDropTable, model.JobStateRunning, model.StateWriteOnly, false}, {true, model.ActionDropTable, model.JobStateRunning, model.StateDeleteOnly, false}, // Check drop database. - {true, model.ActionDropSchema, model.JobStateNone, model.StateNone, true}, + {true, model.ActionDropSchema, model.JobStateQueueing, model.StateNone, true}, {false, model.ActionDropSchema, model.JobStateRunning, model.StateWriteOnly, false}, {true, model.ActionDropSchema, model.JobStateRunning, model.StateDeleteOnly, false}, } @@ -437,8 +437,8 @@ func TestCancelAddTableAndDropTablePartition(t *testing.T) { JobSchemaState model.SchemaState cancelSucc bool }{ - {model.ActionAddTablePartition, model.JobStateNone, model.StateNone, true}, - {model.ActionDropTablePartition, model.JobStateNone, model.StateNone, true}, + {model.ActionAddTablePartition, model.JobStateQueueing, model.StateNone, true}, + {model.ActionDropTablePartition, model.JobStateQueueing, model.StateNone, true}, // Add table partition now can be cancelled in ReplicaOnly state. {model.ActionAddTablePartition, model.JobStateRunning, model.StateReplicaOnly, true}, } diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 4ec29dcc50b3d..6bbf0beae14ee 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -301,6 +301,7 @@ func (d *ddl) addBatchDDLJobs(tasks []*limitJobTask) { job.Version = currentVersion job.StartTS = txn.StartTS() job.ID = ids[i] + job.State = model.JobStateQueueing if err = buildJobDependence(t, job); err != nil { return errors.Trace(err) } diff --git a/ddl/delete_range.go b/ddl/delete_range.go index 3d7d5895bdb79..d5fcfb1901341 100644 --- a/ddl/delete_range.go +++ b/ddl/delete_range.go @@ -17,6 +17,7 @@ package ddl import ( "context" "encoding/hex" + "fmt" "math" "strings" "sync" @@ -236,14 +237,24 @@ func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { return nil } +type elementIDAlloc struct { + id int64 +} + +func (ea *elementIDAlloc) alloc() int64 { + ea.id++ + return ea.id +} + // insertJobIntoDeleteRangeTable parses the job into delete-range arguments, // and inserts a new record into gc_delete_range table. The primary key is -// job ID, so we ignore key conflict error. +// (job ID, element ID), so we ignore key conflict error. func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, job *model.Job) error { now, err := getNowTSO(sctx) if err != nil { return errors.Trace(err) } + var ea elementIDAlloc s := sctx.(sqlexec.SQLExecutor) switch job.Type { @@ -257,7 +268,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, if batchEnd > i+batchInsertDeleteRangeSize { batchEnd = i + batchInsertDeleteRangeSize } - if err := doBatchInsert(ctx, s, job.ID, tableIDs[i:batchEnd], now); err != nil { + if err := doBatchInsert(ctx, s, job.ID, tableIDs[i:batchEnd], now, &ea); err != nil { return errors.Trace(err) } } @@ -274,7 +285,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, for _, pid := range physicalTableIDs { startKey = tablecodec.EncodeTablePrefix(pid) endKey := tablecodec.EncodeTablePrefix(pid + 1) - if err := doInsert(ctx, s, job.ID, pid, startKey, endKey, now); err != nil { + if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition ID is %d", pid)); err != nil { return errors.Trace(err) } } @@ -282,7 +293,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, } startKey = tablecodec.EncodeTablePrefix(tableID) endKey := tablecodec.EncodeTablePrefix(tableID + 1) - return doInsert(ctx, s, job.ID, tableID, startKey, endKey, now) + return doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID)) case model.ActionDropTablePartition, model.ActionTruncateTablePartition: var physicalTableIDs []int64 if err := job.DecodeArgs(&physicalTableIDs); err != nil { @@ -291,7 +302,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, for _, physicalTableID := range physicalTableIDs { startKey := tablecodec.EncodeTablePrefix(physicalTableID) endKey := tablecodec.EncodeTablePrefix(physicalTableID + 1) - if err := doInsert(ctx, s, job.ID, physicalTableID, startKey, endKey, now); err != nil { + if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition table ID is %d", physicalTableID)); err != nil { return errors.Trace(err) } } @@ -307,14 +318,14 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, for _, pid := range partitionIDs { startKey := tablecodec.EncodeTableIndexPrefix(pid, indexID) endKey := tablecodec.EncodeTableIndexPrefix(pid, indexID+1) - if err := doInsert(ctx, s, job.ID, indexID, startKey, endKey, now); err != nil { + if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition table ID is %d", pid)); err != nil { return errors.Trace(err) } } } else { startKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID) endKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID+1) - return doInsert(ctx, s, job.ID, indexID, startKey, endKey, now) + return doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID)) } case model.ActionDropIndex, model.ActionDropPrimaryKey: tableID := job.TableID @@ -328,14 +339,14 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, for _, pid := range partitionIDs { startKey := tablecodec.EncodeTableIndexPrefix(pid, indexID) endKey := tablecodec.EncodeTableIndexPrefix(pid, indexID+1) - if err := doInsert(ctx, s, job.ID, indexID, startKey, endKey, now); err != nil { + if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition table ID is %d", pid)); err != nil { return errors.Trace(err) } } } else { startKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID) endKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID+1) - return doInsert(ctx, s, job.ID, indexID, startKey, endKey, now) + return doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("index ID is %d", indexID)) } case model.ActionDropIndexes: var indexIDs []int64 @@ -348,10 +359,10 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, return nil } if len(partitionIDs) == 0 { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now) + return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) } for _, pID := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pID, indexIDs, now); err != nil { + if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pID, indexIDs, now, &ea); err != nil { return errors.Trace(err) } } @@ -365,12 +376,12 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, if len(indexIDs) > 0 { if len(partitionIDs) > 0 { for _, pid := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now); err != nil { + if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, &ea); err != nil { return errors.Trace(err) } } } else { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now) + return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) } } case model.ActionDropColumns: @@ -384,12 +395,12 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, if len(indexIDs) > 0 { if len(partitionIDs) > 0 { for _, pid := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now); err != nil { + if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, &ea); err != nil { return errors.Trace(err) } } } else { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now) + return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) } } case model.ActionModifyColumn: @@ -402,10 +413,10 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, return nil } if len(partitionIDs) == 0 { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now) + return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) } for _, pid := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now); err != nil { + if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, &ea); err != nil { return errors.Trace(err) } } @@ -413,8 +424,8 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, return nil } -func doBatchDeleteIndiceRange(ctx context.Context, s sqlexec.SQLExecutor, jobID, tableID int64, indexIDs []int64, ts uint64) error { - logutil.BgLogger().Info("[ddl] batch insert into delete-range indices", zap.Int64("jobID", jobID), zap.Int64s("elementIDs", indexIDs)) +func doBatchDeleteIndiceRange(ctx context.Context, s sqlexec.SQLExecutor, jobID, tableID int64, indexIDs []int64, ts uint64, ea *elementIDAlloc) error { + logutil.BgLogger().Info("[ddl] batch insert into delete-range indices", zap.Int64("jobID", jobID), zap.Int64("tableID", tableID), zap.Int64s("indexIDs", indexIDs)) paramsList := make([]interface{}, 0, len(indexIDs)*5) var buf strings.Builder buf.WriteString(insertDeleteRangeSQLPrefix) @@ -427,14 +438,14 @@ func doBatchDeleteIndiceRange(ctx context.Context, s sqlexec.SQLExecutor, jobID, if i != len(indexIDs)-1 { buf.WriteString(",") } - paramsList = append(paramsList, jobID, indexID, startKeyEncoded, endKeyEncoded, ts) + paramsList = append(paramsList, jobID, ea.alloc(), startKeyEncoded, endKeyEncoded, ts) } _, err := s.ExecuteInternal(ctx, buf.String(), paramsList...) return errors.Trace(err) } -func doInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID int64, elementID int64, startKey, endKey kv.Key, ts uint64) error { - logutil.BgLogger().Info("[ddl] insert into delete-range table", zap.Int64("jobID", jobID), zap.Int64("elementID", elementID)) +func doInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID, elementID int64, startKey, endKey kv.Key, ts uint64, comment string) error { + logutil.BgLogger().Info("[ddl] insert into delete-range table", zap.Int64("jobID", jobID), zap.Int64("elementID", elementID), zap.String("comment", comment)) startKeyEncoded := hex.EncodeToString(startKey) endKeyEncoded := hex.EncodeToString(endKey) // set session disk full opt @@ -446,8 +457,8 @@ func doInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID int64, elementID return errors.Trace(err) } -func doBatchInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID int64, tableIDs []int64, ts uint64) error { - logutil.BgLogger().Info("[ddl] batch insert into delete-range table", zap.Int64("jobID", jobID), zap.Int64s("elementIDs", tableIDs)) +func doBatchInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID int64, tableIDs []int64, ts uint64, ea *elementIDAlloc) error { + logutil.BgLogger().Info("[ddl] batch insert into delete-range table", zap.Int64("jobID", jobID), zap.Int64s("tableIDs", tableIDs)) var buf strings.Builder buf.WriteString(insertDeleteRangeSQLPrefix) paramsList := make([]interface{}, 0, len(tableIDs)*5) @@ -460,7 +471,7 @@ func doBatchInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID int64, tabl if i != len(tableIDs)-1 { buf.WriteString(",") } - paramsList = append(paramsList, jobID, tableID, startKeyEncoded, endKeyEncoded, ts) + paramsList = append(paramsList, jobID, ea.alloc(), startKeyEncoded, endKeyEncoded, ts) } // set session disk full opt s.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) diff --git a/ddl/index_modify_test.go b/ddl/index_modify_test.go index 586e9c8ffa7c8..8a615fb4de80a 100644 --- a/ddl/index_modify_test.go +++ b/ddl/index_modify_test.go @@ -715,7 +715,6 @@ func testCancelAddIndex(t *testing.T, store kv.Storage, dom *domain.Domain, idxN batchInsert(tk, "t1", i, i+defaultBatchSize) } - var c3IdxInfo *model.IndexInfo hook := &ddl.TestDDLCallback{Do: dom} originBatchSize := tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size") // Set batch size to lower try to slow down add-index reorganization, This if for hook to cancel this ddl job. @@ -725,7 +724,7 @@ func testCancelAddIndex(t *testing.T, store kv.Storage, dom *domain.Domain, idxN // the hook.OnJobUpdatedExported is called when the job is updated, runReorgJob will wait ddl.ReorgWaitTimeout, then return the ddl.runDDLJob. // After that ddl call d.hook.OnJobUpdated(job), so that we can canceled the job in this test case. var checkErr error - hook.OnJobUpdatedExported, c3IdxInfo, checkErr = backgroundExecOnJobUpdatedExported(t, tk, store, hook, idxName) + hook.OnJobUpdatedExported, _, checkErr = backgroundExecOnJobUpdatedExported(t, tk, store, hook, idxName) originalHook := d.GetHook() jobIDExt := wrapJobIDExtCallback(hook) d.SetHook(jobIDExt) @@ -757,7 +756,6 @@ LOOP: times++ } } - checkDelRangeAdded(tk, jobIDExt.jobID, c3IdxInfo.ID) d.SetHook(originalHook) } @@ -1059,8 +1057,6 @@ func testDropIndexes(t *testing.T, store kv.Storage, createSQL, dropIdxSQL strin for _, idxName := range idxNames { idxIDs = append(idxIDs, external.GetIndexID(t, tk, "test", "test_drop_indexes", idxName)) } - jobIDExt, reset := setupJobIDExtCallback(tk.Session()) - defer reset() testddlutil.SessionExecInGoroutine(store, "test", dropIdxSQL, done) ticker := time.NewTicker(indexModifyLease / 2) @@ -1084,9 +1080,6 @@ LOOP: num += step } } - for _, idxID := range idxIDs { - checkDelRangeAdded(tk, jobIDExt.jobID, idxID) - } } func testDropIndexesIfExists(t *testing.T, store kv.Storage) { @@ -1128,6 +1121,11 @@ func testDropIndexesFromPartitionedTable(t *testing.T, store kv.Storage) { tk.MustExec("insert into test_drop_indexes_from_partitioned_table values (?, ?, ?)", i, i, i) } tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i2;") + tk.MustExec("alter table test_drop_indexes_from_partitioned_table add index i1(c1)") + tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i1;") + tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column c2;") + tk.MustExec("alter table test_drop_indexes_from_partitioned_table add column c1 int") + tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column if exists c1;") } func testCancelDropIndexes(t *testing.T, store kv.Storage, d ddl.DDL) { @@ -1151,7 +1149,7 @@ func testCancelDropIndexes(t *testing.T, store kv.Storage, d ddl.DDL) { }{ // model.JobStateNone means the jobs is canceled before the first run. // if we cancel successfully, we need to set needAddIndex to false in the next test case. Otherwise, set needAddIndex to true. - {true, model.JobStateNone, model.StateNone, true}, + {true, model.JobStateQueueing, model.StateNone, true}, {false, model.JobStateRunning, model.StateWriteOnly, false}, {true, model.JobStateRunning, model.StateDeleteOnly, false}, {true, model.JobStateRunning, model.StateDeleteReorganization, false}, @@ -1255,9 +1253,6 @@ func testDropIndex(t *testing.T, store kv.Storage, createSQL, dropIdxSQL, idxNam for i := 0; i < num; i++ { tk.MustExec("insert into test_drop_index values (?, ?, ?)", i, i, i) } - indexID := external.GetIndexID(t, tk, "test", "test_drop_index", idxName) - jobIDExt, reset := setupJobIDExtCallback(tk.Session()) - defer reset() testddlutil.SessionExecInGoroutine(store, "test", dropIdxSQL, done) ticker := time.NewTicker(indexModifyLease / 2) @@ -1285,7 +1280,6 @@ LOOP: rows := tk.MustQuery("explain select c1 from test_drop_index where c3 >= 0") require.NotContains(t, fmt.Sprintf("%v", rows), idxName) - checkDelRangeAdded(tk, jobIDExt.jobID, indexID) tk.MustExec("drop table test_drop_index") } diff --git a/ddl/main_test.go b/ddl/main_test.go index 3d8de547bd5dc..c944d92799902 100644 --- a/ddl/main_test.go +++ b/ddl/main_test.go @@ -27,8 +27,6 @@ import ( "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util/testbridge" "github.com/tikv/client-go/v2/tikv" "go.uber.org/goleak" @@ -76,23 +74,6 @@ func wrapJobIDExtCallback(oldCallback ddl.Callback) *testDDLJobIDCallback { } } -func setupJobIDExtCallback(ctx sessionctx.Context) (jobExt *testDDLJobIDCallback, tearDown func()) { - dom := domain.GetDomain(ctx) - originHook := dom.DDL().GetHook() - jobIDExt := wrapJobIDExtCallback(originHook) - dom.DDL().SetHook(jobIDExt) - return jobIDExt, func() { - dom.DDL().SetHook(originHook) - } -} - -func checkDelRangeAdded(tk *testkit.TestKit, jobID int64, elemID int64) { - query := `select sum(cnt) from - (select count(1) cnt from mysql.gc_delete_range where job_id = ? and element_id = ? union - select count(1) cnt from mysql.gc_delete_range_done where job_id = ? and element_id = ?) as gdr;` - tk.MustQuery(query, jobID, elemID, jobID, elemID).Check(testkit.Rows("1")) -} - type testDDLJobIDCallback struct { ddl.Callback jobID int64 diff --git a/ddl/sanity_check.go b/ddl/sanity_check.go index dca1982e0d535..528370eeee157 100644 --- a/ddl/sanity_check.go +++ b/ddl/sanity_check.go @@ -31,31 +31,23 @@ import ( func checkRangeCntByTableIDs(physicalTableIDs []int64, cnt int64) { if len(physicalTableIDs) > 0 { if len(physicalTableIDs) != int(cnt) { - panic("should not happened") + panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", len(physicalTableIDs), cnt)) } } else if cnt != 1 { - panic("should not happened") + panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", 1, cnt)) } } func checkRangeCntByTableIDsAndIndexIDs(partitionTableIDs []int64, indexIDs []int64, cnt int64) { - if len(indexIDs) > 0 && len(partitionTableIDs) > 0 { - // Add this check after fixing the bug. - return - } if len(indexIDs) == 0 { return } - uniqueIndexIDs := make(map[int64]struct{}) - for _, id := range indexIDs { - uniqueIndexIDs[id] = struct{}{} - } - expectedCnt := len(uniqueIndexIDs) + expectedCnt := len(indexIDs) if len(partitionTableIDs) > 0 { expectedCnt *= len(partitionTableIDs) } if expectedCnt != int(cnt) { - panic("should not happened") + panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", expectedCnt, cnt)) } } @@ -93,7 +85,7 @@ func (d *ddl) checkDeleteRangeCnt(job *model.Job) { panic("should not happened") } if len(tableIDs) != int(cnt) { - panic("should not happened") + panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", len(tableIDs), cnt)) } case model.ActionDropTable, model.ActionTruncateTable: var startKey kv.Key @@ -109,7 +101,7 @@ func (d *ddl) checkDeleteRangeCnt(job *model.Job) { panic("should not happened") } if len(physicalTableIDs) != int(cnt) { - panic("should not happened") + panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", len(physicalTableIDs), cnt)) } case model.ActionAddIndex, model.ActionAddPrimaryKey: var indexID int64 diff --git a/docs/design/2022-03-10-backup-with-batch-create-table.md b/docs/design/2022-03-10-backup-with-batch-create-table.md new file mode 100644 index 0000000000000..7051d5f941554 --- /dev/null +++ b/docs/design/2022-03-10-backup-with-batch-create-table.md @@ -0,0 +1,113 @@ +# TiDB Design Documents + +- Author(s): [fengou1](http://github.com/fengou1) +- Discussion PR: https://github.com/pingcap/tidb/issues/28763 +- Tracking Issue: https://github.com/pingcap/tidb/pull/27036 + +## Table of Contents + +* [Introduction](#introduction) +* [Motivation or Background](#motivation-or-background) +* [Detailed Design](#detailed-design) +* [Test Design](#test-design) + * [Compatibility Tests](#compatibility-tests) + * [Benchmark Tests](#benchmark-tests) + +## Introduction + +The proposal of this design is to speed up the BR restore. + +## Motivation or Background + +The cluster has 6 TiB data, 30k tables, and 11 TiKVs. When I use BR to backup and restore the cluster, the speed is particularly slow. After investigation, BR can only create 2 tables per second, the entire restore speed takes nearly 4h, and the execution time spent creating tables is close to 4h. It can be seen that the execution speed of ddl is the bottleneck in this scenario. + +## Detailed Design + +Backup and Restore for massive tables is extremely slow, the bottleneck of creating the table is to waiting for the schema version change. Each table creates ddl cause a schema version change. 60000 tables have almost 60000 times schema change, it is very cost of restoring massive table + +Currently, BR uses an internal interface named CreateTableWithInfo to create table, which creating the table and wait the schema changing one-by-one, omitting the sync of the ddl job between BR and leader, the procedure of creating one table would be like this: +```go +for _, t := range tables { + RunInTxn(func(txn) { + m := meta.New(txn) + schemaVesrion := m.CreateTable(t) + m.UpdateSchema(schemaVersion) + }) +``` + +the new design will introduce a new batch create table api `BatchCreateTableWithInfo` +```go +for _, info := range tableInfo { + job, err := d.createTableWithInfoJob(ctx, dbName, info, onExist, true) + if err != nil { + return errors.Trace(err) + } + + // if jobs.Type == model.ActionCreateTables, it is initialized + // if not, initialize jobs by job.XXXX + if jobs.Type != model.ActionCreateTables { + jobs.Type = model.ActionCreateTables + jobs.SchemaID = job.SchemaID + jobs.SchemaName = job.SchemaName + } + + // append table job args + info, ok := job.Args[0].(*model.TableInfo) + if !ok { + return errors.Trace(fmt.Errorf("except table info")) + } + args = append(args, info) + } + + jobs.Args = append(jobs.Args, args) + + err = d.doDDLJob(ctx, jobs) + + for j := range args { + if err = d.createTableWithInfoPost(ctx, args[j], jobs.SchemaID); err != nil { + return errors.Trace(d.callHookOnChanged(err)) + } + } +``` + +For ddl work, introduce a new job type `ActionCreateTables` +```go + case model.ActionCreateTables: + tableInfos := []*model.TableInfo{} + err = job.DecodeArgs(&tableInfos) + + diff.AffectedOpts = make([]*model.AffectedOption, len(tableInfos)) + for i := range tableInfos { + diff.AffectedOpts[i] = &model.AffectedOption{ + SchemaID: job.SchemaID, + OldSchemaID: job.SchemaID, + TableID: tableInfos[i].ID, + OldTableID: tableInfos[i].ID, + } + } +``` + +for each job `ActionCreateTables`, only one schema change, so one schema version for one batch of creating table, it greatly improves the performance of creating tables. + + +The feature auto-enabled with batch size 128 to batch create the table. Users can disable the feature by specifying `--ddl-batch-size=0`. The max batch size depends on the txn message size, currently, it configures 8MB by default. + +## Test Design +UT: see PRs https://github.com/pingcap/tidb/pull/28763, https://github.com/pingcap/tics/pull/4201, https://github.com/pingcap/tidb/pull/29380 + +Integration test also covered among CDC, BR, TiDB, binlog, TiFlash etc. + + +### Compatibility Tests + +- Compatibility with binlog, please refer to https://github.com/pingcap/tidb-binlog/pull/1114. +- Compatibility with CDC, a regression test made for cdc work with br test. since CDC has a whitelist for unrecognizing ddl job and pulling data from tikv directly so that we did not find regression issues. +- Compatibility with TiFlash https://github.com/pingcap/tics/pull/4201. +- Upgrade compatibility: BR + tidb without interface BatchCreateTableWithInfo, the restore falls back to old legacy way that creates table one by one. +- Downgrade compatibility: old BR + new tidb with BatchCreateTableWithInfo, the restore using legacy way to create table. + +### Benchmark Tests +- 61259 tables restore takes 4 minute 50 seconds, 200+ tables/per seconds with following configuration: +TiDB x 1: 16 CPU, 32 GB +PD x 1: 16 CPU, 32 GB +TiKV x 3: 16 CPU, 32 GB \ No newline at end of file diff --git a/docs/tidb_http_api.md b/docs/tidb_http_api.md index 9c927f38f64db..c9d63f5ea4e6b 100644 --- a/docs/tidb_http_api.md +++ b/docs/tidb_http_api.md @@ -456,6 +456,8 @@ timezone.* curl -X POST http://{TiDBIP}:10080/ddl/owner/resign ``` + **Note**: If you request a TiDB that is not ddl owner, the response will be `This node is not a ddl owner, can't be resigned.` + 1. Get all TiDB DDL job history information. ```shell @@ -468,8 +470,6 @@ timezone.* curl http://{TiDBIP}:10080/ddl/history?limit={number} ``` - **Note**: If you request a tidb that is not ddl owner, the response will be `This node is not a ddl owner, can't be resigned.` - 1. Download TiDB debug info ```shell diff --git a/domain/infosync/info.go b/domain/infosync/info.go index d7c6a15ec4cc5..c234557501b32 100644 --- a/domain/infosync/info.go +++ b/domain/infosync/info.go @@ -183,15 +183,9 @@ func GlobalInfoSyncerInit(ctx context.Context, id string, serverIDGetter func() if err != nil { return nil, err } - if etcdCli != nil { - is.labelRuleManager = initLabelRuleManager(etcdCli.Endpoints()) - is.placementManager = initPlacementManager(etcdCli.Endpoints()) - is.tiflashPlacementManager = initTiFlashPlacementManager(etcdCli.Endpoints()) - } else { - is.labelRuleManager = initLabelRuleManager([]string{}) - is.placementManager = initPlacementManager([]string{}) - is.tiflashPlacementManager = initTiFlashPlacementManager([]string{}) - } + is.labelRuleManager = initLabelRuleManager(etcdCli) + is.placementManager = initPlacementManager(etcdCli) + is.tiflashPlacementManager = initTiFlashPlacementManager(etcdCli) setGlobalInfoSyncer(is) return is, nil } @@ -218,27 +212,27 @@ func (is *InfoSyncer) GetSessionManager() util2.SessionManager { return is.manager } -func initLabelRuleManager(addrs []string) LabelRuleManager { - if len(addrs) == 0 { +func initLabelRuleManager(etcdCli *clientv3.Client) LabelRuleManager { + if etcdCli == nil { return &mockLabelManager{labelRules: map[string][]byte{}} } - return &PDLabelManager{addrs: addrs} + return &PDLabelManager{etcdCli: etcdCli} } -func initPlacementManager(addrs []string) PlacementManager { - if len(addrs) == 0 { +func initPlacementManager(etcdCli *clientv3.Client) PlacementManager { + if etcdCli == nil { return &mockPlacementManager{} } - return &PDPlacementManager{addrs: addrs} + return &PDPlacementManager{etcdCli: etcdCli} } -func initTiFlashPlacementManager(addrs []string) TiFlashPlacementManager { - if len(addrs) == 0 { +func initTiFlashPlacementManager(etcdCli *clientv3.Client) TiFlashPlacementManager { + if etcdCli == nil { m := mockTiFlashPlacementManager{} return &m } - logutil.BgLogger().Warn("init TiFlashPlacementManager", zap.Strings("pd addrs", addrs)) - return &TiFlashPDPlacementManager{addrs: addrs} + logutil.BgLogger().Warn("init TiFlashPlacementManager", zap.Strings("pd addrs", etcdCli.Endpoints())) + return &TiFlashPDPlacementManager{etcdCli: etcdCli} } // GetMockTiFlash can only be used in tests to get MockTiFlash diff --git a/domain/infosync/label_manager.go b/domain/infosync/label_manager.go index 663d3f01976fc..c6a3ef98da268 100644 --- a/domain/infosync/label_manager.go +++ b/domain/infosync/label_manager.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/ddl/label" "github.com/pingcap/tidb/util/pdapi" + clientv3 "go.etcd.io/etcd/client/v3" ) // LabelRuleManager manages label rules @@ -35,7 +36,7 @@ type LabelRuleManager interface { // PDLabelManager manages rules with pd type PDLabelManager struct { - addrs []string + etcdCli *clientv3.Client } // PutLabelRule implements PutLabelRule @@ -44,7 +45,7 @@ func (lm *PDLabelManager) PutLabelRule(ctx context.Context, rule *label.Rule) er if err != nil { return err } - _, err = doRequest(ctx, lm.addrs, path.Join(pdapi.Config, "region-label", "rule"), "POST", bytes.NewReader(r)) + _, err = doRequest(ctx, lm.etcdCli.Endpoints(), path.Join(pdapi.Config, "region-label", "rule"), "POST", bytes.NewReader(r)) return err } @@ -55,14 +56,14 @@ func (lm *PDLabelManager) UpdateLabelRules(ctx context.Context, patch *label.Rul return err } - _, err = doRequest(ctx, lm.addrs, path.Join(pdapi.Config, "region-label", "rules"), "PATCH", bytes.NewReader(r)) + _, err = doRequest(ctx, lm.etcdCli.Endpoints(), path.Join(pdapi.Config, "region-label", "rules"), "PATCH", bytes.NewReader(r)) return err } // GetAllLabelRules implements GetAllLabelRules func (lm *PDLabelManager) GetAllLabelRules(ctx context.Context) ([]*label.Rule, error) { var rules []*label.Rule - res, err := doRequest(ctx, lm.addrs, path.Join(pdapi.Config, "region-label", "rules"), "GET", nil) + res, err := doRequest(ctx, lm.etcdCli.Endpoints(), path.Join(pdapi.Config, "region-label", "rules"), "GET", nil) if err == nil && res != nil { err = json.Unmarshal(res, &rules) @@ -78,7 +79,7 @@ func (lm *PDLabelManager) GetLabelRules(ctx context.Context, ruleIDs []string) ( } rules := []*label.Rule{} - res, err := doRequest(ctx, lm.addrs, path.Join(pdapi.Config, "region-label", "rules", "ids"), "GET", bytes.NewReader(ids)) + res, err := doRequest(ctx, lm.etcdCli.Endpoints(), path.Join(pdapi.Config, "region-label", "rules", "ids"), "GET", bytes.NewReader(ids)) if err == nil && res != nil { err = json.Unmarshal(res, &rules) diff --git a/domain/infosync/placement_manager.go b/domain/infosync/placement_manager.go index 7c4db7dcd61e3..0a36de70715a8 100644 --- a/domain/infosync/placement_manager.go +++ b/domain/infosync/placement_manager.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/util/pdapi" + clientv3 "go.etcd.io/etcd/client/v3" ) // PlacementManager manages placement settings @@ -37,13 +38,13 @@ type PlacementManager interface { // PDPlacementManager manages placement with pd type PDPlacementManager struct { - addrs []string + etcdCli *clientv3.Client } // GetRuleBundle is used to get one specific rule bundle from PD. func (m *PDPlacementManager) GetRuleBundle(ctx context.Context, name string) (*placement.Bundle, error) { bundle := &placement.Bundle{ID: name} - res, err := doRequest(ctx, m.addrs, path.Join(pdapi.Config, "placement-rule", name), "GET", nil) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), path.Join(pdapi.Config, "placement-rule", name), "GET", nil) if err == nil && res != nil { err = json.Unmarshal(res, bundle) } @@ -53,7 +54,7 @@ func (m *PDPlacementManager) GetRuleBundle(ctx context.Context, name string) (*p // GetAllRuleBundles is used to get all rule bundles from PD. It is used to load full rules from PD while fullload infoschema. func (m *PDPlacementManager) GetAllRuleBundles(ctx context.Context) ([]*placement.Bundle, error) { var bundles []*placement.Bundle - res, err := doRequest(ctx, m.addrs, path.Join(pdapi.Config, "placement-rule"), "GET", nil) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), path.Join(pdapi.Config, "placement-rule"), "GET", nil) if err == nil && res != nil { err = json.Unmarshal(res, &bundles) } @@ -71,7 +72,7 @@ func (m *PDPlacementManager) PutRuleBundles(ctx context.Context, bundles []*plac return err } - _, err = doRequest(ctx, m.addrs, path.Join(pdapi.Config, "placement-rule")+"?partial=true", "POST", bytes.NewReader(b)) + _, err = doRequest(ctx, m.etcdCli.Endpoints(), path.Join(pdapi.Config, "placement-rule")+"?partial=true", "POST", bytes.NewReader(b)) return err } diff --git a/domain/infosync/tiflash_manager.go b/domain/infosync/tiflash_manager.go index 4f655c3206df6..53c664091b6cc 100644 --- a/domain/infosync/tiflash_manager.go +++ b/domain/infosync/tiflash_manager.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/pdapi" + clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" ) @@ -60,7 +61,7 @@ type TiFlashPlacementManager interface { // TiFlashPDPlacementManager manages placement with pd for TiFlash. type TiFlashPDPlacementManager struct { - addrs []string + etcdCli *clientv3.Client } // Close is called to close TiFlashPDPlacementManager. @@ -75,7 +76,7 @@ func (m *TiFlashPDPlacementManager) SetPlacementRule(ctx context.Context, rule p } j, _ := json.Marshal(rule) buf := bytes.NewBuffer(j) - res, err := doRequest(ctx, m.addrs, path.Join(pdapi.Config, "rule"), "POST", buf) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), path.Join(pdapi.Config, "rule"), "POST", buf) if err != nil { return errors.Trace(err) } @@ -87,7 +88,7 @@ func (m *TiFlashPDPlacementManager) SetPlacementRule(ctx context.Context, rule p // DeletePlacementRule is to delete placement rule for certain group. func (m *TiFlashPDPlacementManager) DeletePlacementRule(ctx context.Context, group string, ruleID string) error { - res, err := doRequest(ctx, m.addrs, path.Join(pdapi.Config, "rule", group, ruleID), "DELETE", nil) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), path.Join(pdapi.Config, "rule", group, ruleID), "DELETE", nil) if err != nil { return errors.Trace(err) } @@ -99,7 +100,7 @@ func (m *TiFlashPDPlacementManager) DeletePlacementRule(ctx context.Context, gro // GetGroupRules to get all placement rule in a certain group. func (m *TiFlashPDPlacementManager) GetGroupRules(ctx context.Context, group string) ([]placement.TiFlashRule, error) { - res, err := doRequest(ctx, m.addrs, path.Join(pdapi.Config, "rules", "group", group), "GET", nil) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), path.Join(pdapi.Config, "rules", "group", group), "GET", nil) if err != nil { return nil, errors.Trace(err) } @@ -132,7 +133,7 @@ func (m *TiFlashPDPlacementManager) PostAccelerateSchedule(ctx context.Context, return errors.Trace(err) } buf := bytes.NewBuffer(j) - res, err := doRequest(ctx, m.addrs, "/pd/api/v1/regions/accelerate-schedule", "POST", buf) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), "/pd/api/v1/regions/accelerate-schedule", "POST", buf) if err != nil { return errors.Trace(err) } @@ -152,7 +153,7 @@ func (m *TiFlashPDPlacementManager) GetPDRegionRecordStats(ctx context.Context, p := fmt.Sprintf("/pd/api/v1/stats/region?start_key=%s&end_key=%s", url.QueryEscape(string(startKey)), url.QueryEscape(string(endKey))) - res, err := doRequest(ctx, m.addrs, p, "GET", nil) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), p, "GET", nil) if err != nil { return errors.Trace(err) } @@ -170,7 +171,7 @@ func (m *TiFlashPDPlacementManager) GetPDRegionRecordStats(ctx context.Context, // GetStoresStat gets the TiKV store information by accessing PD's api. func (m *TiFlashPDPlacementManager) GetStoresStat(ctx context.Context) (*helper.StoresStat, error) { var storesStat helper.StoresStat - res, err := doRequest(ctx, m.addrs, pdapi.Stores, "GET", nil) + res, err := doRequest(ctx, m.etcdCli.Endpoints(), pdapi.Stores, "GET", nil) if err != nil { return nil, errors.Trace(err) } diff --git a/dumpling/tools/.gitignore b/dumpling/tools/.gitignore deleted file mode 100644 index e660fd93d3196..0000000000000 --- a/dumpling/tools/.gitignore +++ /dev/null @@ -1 +0,0 @@ -bin/ diff --git a/dumpling/tools/Makefile b/dumpling/tools/Makefile deleted file mode 100644 index 468f9840172d3..0000000000000 --- a/dumpling/tools/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -all: bin/govet bin/revive bin/golangci-lint bin/failpoint-ctl - -bin/govet: - go build -o $@ github.com/dnephin/govet - -bin/revive: - go build -o $@ github.com/mgechev/revive - -bin/golangci-lint: - go build -o $@ github.com/golangci/golangci-lint/cmd/golangci-lint - -bin/failpoint-ctl: - go build -o $@ github.com/pingcap/failpoint/failpoint-ctl - diff --git a/dumpling/tools/go.mod b/dumpling/tools/go.mod deleted file mode 100644 index 38b8eb1890674..0000000000000 --- a/dumpling/tools/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/pingcap/tidb/dumpling/_tools - -go 1.16 - -require ( - github.com/dnephin/govet v0.0.0-20171012192244-4a96d43e39d3 - github.com/golangci/golangci-lint v1.33.0 - github.com/mgechev/revive v1.0.2 - github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 -) diff --git a/dumpling/tools/go.sum b/dumpling/tools/go.sum deleted file mode 100644 index 14be46a282212..0000000000000 --- a/dumpling/tools/go.sum +++ /dev/null @@ -1,639 +0,0 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5 h1:XTrzB+F8+SpRmbhAH8HLxhiiG6nYNwaBZjrFps1oWEk= -github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bombsimon/wsl/v3 v3.1.0 h1:E5SRssoBgtVFPcYWUOFJEcgaySgdtTNYzsSKDOY7ss8= -github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daixiang0/gci v0.2.4 h1:BUCKk5nlK2m+kRIsoj+wb/5hazHvHeZieBKWd9Afa8Q= -github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingajkin/go-header v0.3.1 h1:ymEpSiFjeItCy1FOP+x0M2KdCELdEAHUsNa8F+hHc6w= -github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnephin/govet v0.0.0-20171012192244-4a96d43e39d3 h1:LrNBULdC8dhFb7VeeIfuLAVq2IOFtVD9zIYh838jWfM= -github.com/dnephin/govet v0.0.0-20171012192244-4a96d43e39d3/go.mod h1:pPTX0MEEoAnfbrAGFj4nSVNhl6YbugRj6eardUZdtGo= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-critic/go-critic v0.5.2 h1:3RJdgf6u4NZUumoP8nzbqiiNT8e1tC2Oc7jlgqre/IA= -github.com/go-critic/go-critic v0.5.2/go.mod h1:cc0+HvdE3lFpqLecgqMaJcvWWH77sLdBp+wLGPM1Yyo= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d h1:pXTK/gkVNs7Zyy7WKgLXmpQ5bHTrq5GDsp8R9Qs67g0= -github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.33.0 h1:/o4OtOR3Idim4FHKBJXcy+6ZjNDm82gwK/v6+gWyH9U= -github.com/golangci/golangci-lint v1.33.0/go.mod h1:zMnMLSCaDlrXExYsuq2LOweE9CHVqYk5jexk23UsjYM= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gookit/color v1.3.1/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0 h1:E4c8Y1EQURbBEAHoXc/jBTK7Np14ArT8NPUiSFOl9yc= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/comment v1.3.0 h1:wTVgynbFu8/nz6SGgywA0TcyIoAVsYc7ai/Zp5xNGlw= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jgautheron/goconst v0.0.0-20201117150253-ccae5bf973f3 h1:7nkB9fLPMwtn/R6qfPcHileL/x9ydlhw8XyDrLI1ZXg= -github.com/jgautheron/goconst v0.0.0-20201117150253-ccae5bf973f3/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a h1:GmsqmapfzSJkm28dhRoHz2tLRbJmqhU86IPgBtN3mmk= -github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= -github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3 h1:jNYPNLe3d8smommaoQlK7LOA5ESyUJJ+Wf79ZtA7Vp4= -github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mbilski/exhaustivestruct v1.1.0 h1:4ykwscnAFeHJruT+EY3M3vdeP8uXMh0VV2E61iR7XD8= -github.com/mbilski/exhaustivestruct v1.1.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= -github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0 h1:kVlMw8h2LHPMGUVqUj6230oQjjTMFjwcZrnkhXzFfl8= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= -github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3 h1:Amgs0nbayPhBNGh1qPqqr2e7B2qNAcBgRjnBH/lmn8k= -github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.2.0 h1:UOVMyH2EKkxIfzrULvA9n/tO+HtEhqD9mrLSWMr5FwU= -github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.1.0 h1:DWbye9KyMgytn8uYpuHkwf0RHqAYO6Ay/D0TbCpPtVU= -github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= -github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.5.0 h1:kjfXLeKdk98gBe2+eYRFMpC4+mxmQQtbidpiiOQ69Qc= -github.com/securego/gosec/v2 v2.5.0/go.mod h1:L/CDXVntIff5ypVHIkqPXbtRpJiNCh6c6Amn68jXDjo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2 h1:Xr9gkxfOP0KQWXKNqmwe8vEeSUiUj4Rlee9CMVX2ZUQ= -github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.3.0 h1:rKXb6aAz2AnwS98jYlU3snCFFXnIInQdaGiftNwpj+k= -github.com/tetafro/godot v1.3.0/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d h1:3EZyvNUMsGD1QA8cu0STNn1L7I77rvhf2IhOcHYQhSw= -github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= -github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa h1:RC4maTWLKKwb7p1cnoygsbKIgNlJqSYBeAFON3Ar8As= -github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 h1:bNEHhJCnrwMKNMmOx3yAynp5vs5/gRy+XWFtZFu7NBM= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752 h1:2ntEwh02rqo2jSsrYmp4yKHHjh0CbXP3ZtSUetSB+q8= -golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.6 h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc= -honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d h1:t8TAw9WgTLghti7RYkpPmqk4JtQ3+wcP5GgZqgWeWLQ= -mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d/go.mod h1:bzrjFmaD6+xqohD3KYP0H2FEuxknnBmyyOxdhLdaIws= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 h1:kAREL6MPwpsk1/PQPFD3Eg7WAQR5mPTWZJaBiG5LDbY= -mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/dumpling/tools/go_mod_guard.go b/dumpling/tools/go_mod_guard.go deleted file mode 100644 index 15cf078d9bc13..0000000000000 --- a/dumpling/tools/go_mod_guard.go +++ /dev/null @@ -1,18 +0,0 @@ -package tools - -// This file ensures `go mod tidy` will not delete entries to all tools. - -import ( - // golangci-lint is a package-based linter - _ "github.com/golangci/golangci-lint/pkg/commands" - - // revive is a file-based linter - _ "github.com/mgechev/revive" - - // govet checks for code correctness - _ "github.com/dnephin/govet" - - // failpoint enables manual 'failure' of some execution points. - _ "github.com/pingcap/failpoint" - _ "github.com/pingcap/failpoint/code" -) diff --git a/executor/adapter.go b/executor/adapter.go index f9db843454d14..b4c4888565c0e 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -333,12 +333,15 @@ func (a *ExecStmt) RebuildPlan(ctx context.Context) (int64, error) { } func (a *ExecStmt) setPlanLabelForTopSQL(ctx context.Context) context.Context { - if a.Plan == nil || !topsqlstate.TopSQLEnabled() { + if !topsqlstate.TopSQLEnabled() { return ctx } vars := a.Ctx.GetSessionVars() normalizedSQL, sqlDigest := vars.StmtCtx.SQLDigest() normalizedPlan, planDigest := getPlanDigest(a.Ctx, a.Plan) + if len(normalizedPlan) == 0 { + return ctx + } return topsql.AttachSQLInfo(ctx, normalizedSQL, sqlDigest, normalizedPlan, planDigest, vars.InRestrictedSQL) } diff --git a/executor/builder.go b/executor/builder.go index e6bbdaa6f29a8..a2394c0537df8 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -1173,11 +1173,13 @@ func (us *UnionScanExec) handleCachedTable(b *executorBuilder, x bypassDataSourc cachedTable := tbl.(table.CachedTable) // Determine whether the cache can be used. leaseDuration := time.Duration(variable.TableCacheLease.Load()) * time.Second - cacheData := cachedTable.TryReadFromCache(startTS, leaseDuration) + cacheData, loading := cachedTable.TryReadFromCache(startTS, leaseDuration) if cacheData != nil { vars.StmtCtx.ReadFromTableCache = true x.setDummy() us.cacheTable = cacheData + } else if loading { + // continue } else { if !b.inUpdateStmt && !b.inDeleteStmt && !b.inInsertStmt && !vars.StmtCtx.InExplainStmt { store := b.ctx.GetStore() @@ -4969,13 +4971,16 @@ func (b *executorBuilder) getCacheTable(tblInfo *model.TableInfo, startTS uint64 } sessVars := b.ctx.GetSessionVars() leaseDuration := time.Duration(variable.TableCacheLease.Load()) * time.Second - cacheData := tbl.(table.CachedTable).TryReadFromCache(startTS, leaseDuration) + cacheData, loading := tbl.(table.CachedTable).TryReadFromCache(startTS, leaseDuration) if cacheData != nil { sessVars.StmtCtx.ReadFromTableCache = true return cacheData - } - if !b.ctx.GetSessionVars().StmtCtx.InExplainStmt && !b.inDeleteStmt && !b.inUpdateStmt { - tbl.(table.CachedTable).UpdateLockForRead(context.Background(), b.ctx.GetStore(), startTS, leaseDuration) + } else if loading { + // continue + } else { + if !b.ctx.GetSessionVars().StmtCtx.InExplainStmt && !b.inDeleteStmt && !b.inUpdateStmt { + tbl.(table.CachedTable).UpdateLockForRead(context.Background(), b.ctx.GetStore(), startTS, leaseDuration) + } } return nil } diff --git a/executor/charset_test.go b/executor/charset_test.go new file mode 100644 index 0000000000000..283d633e99aa9 --- /dev/null +++ b/executor/charset_test.go @@ -0,0 +1,112 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "testing" + + "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/testkit" +) + +func TestCharsetFeature(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("set names gbk") + tk.MustQuery("select @@character_set_connection").Check(testkit.Rows("gbk")) + tk.MustQuery("select @@collation_connection").Check(testkit.Rows("gbk_chinese_ci")) + tk.MustExec("set @@character_set_client=gbk") + tk.MustQuery("select @@character_set_client").Check(testkit.Rows("gbk")) + tk.MustExec("set names utf8mb4") + tk.MustExec("set @@character_set_connection=gbk") + tk.MustQuery("select @@character_set_connection").Check(testkit.Rows("gbk")) + tk.MustQuery("select @@collation_connection").Check(testkit.Rows("gbk_chinese_ci")) + + tk.MustGetErrCode("select _gbk 'a'", errno.ErrUnknownCharacterSet) + + tk.MustExec("use test") + tk.MustExec("create table t1(a char(10) charset gbk)") + tk.MustExec("create table t2(a char(10) charset gbk collate gbk_bin)") + tk.MustExec("create table t3(a char(10)) charset gbk") + tk.MustExec("alter table t3 add column b char(10) charset gbk") + tk.MustQuery("show create table t3").Check(testkit.Rows("t3 CREATE TABLE `t3` (\n" + + " `a` char(10) DEFAULT NULL,\n" + + " `b` char(10) DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci", + )) + tk.MustExec("create table t4(a char(10))") + tk.MustExec("alter table t4 add column b char(10) charset gbk") + tk.MustQuery("show create table t4").Check(testkit.Rows("t4 CREATE TABLE `t4` (\n" + + " `a` char(10) DEFAULT NULL,\n" + + " `b` char(10) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + )) + tk.MustExec("create table t5(a char(20), b char(20) charset utf8, c binary) charset gbk collate gbk_bin") + + tk.MustExec("create database test_gbk charset gbk") + tk.MustExec("use test_gbk") + tk.MustExec("create table t1(a char(10))") + tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + + " `a` char(10) DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci", + )) +} + +func TestCharsetFeatureCollation(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t" + + "(ascii_char char(10) character set ascii," + + "gbk_char char(10) character set gbk collate gbk_bin," + + "latin_char char(10) character set latin1," + + "utf8mb4_char char(10) character set utf8mb4)", + ) + tk.MustExec("insert into t values ('a', 'a', 'a', 'a'), ('a', '啊', '€', 'ㅂ')") + tk.MustQuery("select collation(concat(ascii_char, gbk_char)) from t").Check(testkit.Rows("gbk_bin", "gbk_bin")) + tk.MustQuery("select collation(concat(gbk_char, ascii_char)) from t").Check(testkit.Rows("gbk_bin", "gbk_bin")) + tk.MustQuery("select collation(concat(utf8mb4_char, gbk_char)) from t").Check(testkit.Rows("utf8mb4_bin", "utf8mb4_bin")) + tk.MustQuery("select collation(concat(gbk_char, utf8mb4_char)) from t").Check(testkit.Rows("utf8mb4_bin", "utf8mb4_bin")) + tk.MustQuery("select collation(concat('啊', convert('啊' using gbk) collate gbk_bin))").Check(testkit.Rows("gbk_bin")) + tk.MustQuery("select collation(concat(_latin1 'a', convert('啊' using gbk) collate gbk_bin))").Check(testkit.Rows("gbk_bin")) + + tk.MustGetErrCode("select collation(concat(latin_char, gbk_char)) from t", mysql.ErrCantAggregate2collations) + tk.MustGetErrCode("select collation(concat(convert('€' using latin1), convert('啊' using gbk) collate gbk_bin))", mysql.ErrCantAggregate2collations) + tk.MustGetErrCode("select collation(concat(utf8mb4_char, gbk_char collate gbk_bin)) from t", mysql.ErrCantAggregate2collations) + tk.MustGetErrCode("select collation(concat('ㅂ', convert('啊' using gbk) collate gbk_bin))", mysql.ErrCantAggregate2collations) + tk.MustGetErrCode("select collation(concat(ascii_char collate ascii_bin, gbk_char)) from t", mysql.ErrCantAggregate2collations) +} + +func TestCharsetWithPrefixIndex(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t(a char(20) charset gbk, b char(20) charset gbk, primary key (a(2)))") + tk.MustExec("insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一二三'), ('b', '一二三')") + tk.MustQuery("select * from t").Check(testkit.Rows("a 中文", "中文 中文", "一二三 一二三", "b 一二三")) + tk.MustExec("drop table t") + tk.MustExec("create table t(a char(20) charset gbk, b char(20) charset gbk, unique index idx_a(a(2)))") + tk.MustExec("insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一二三'), ('b', '一二三')") + tk.MustQuery("select * from t").Check(testkit.Rows("a 中文", "中文 中文", "一二三 一二三", "b 一二三")) +} diff --git a/executor/cluster_table_test.go b/executor/cluster_table_test.go new file mode 100644 index 0000000000000..1cbb2bf3763e8 --- /dev/null +++ b/executor/cluster_table_test.go @@ -0,0 +1,406 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "context" + "fmt" + "net" + "os" + "strconv" + "testing" + "time" + + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/auth" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/server" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/util" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func createRPCServer(t *testing.T, dom *domain.Domain) *grpc.Server { + sm := &mockSessionManager1{} + sm.PS = append(sm.PS, &util.ProcessInfo{ + ID: 1, + User: "root", + Host: "127.0.0.1", + Command: mysql.ComQuery, + }) + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv := server.NewRPCServer(config.GetGlobalConfig(), dom, sm) + port := lis.Addr().(*net.TCPAddr).Port + go func() { + err = srv.Serve(lis) + require.NoError(t, err) + }() + + config.UpdateGlobal(func(conf *config.Config) { + conf.Status.StatusPort = uint(port) + }) + + return srv +} + +func TestClusterTableSlowQuery(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + srv := createRPCServer(t, dom) + defer srv.Stop() + + logData0 := "" + logData1 := ` +# Time: 2020-02-15T18:00:01.000000+08:00 +select 1; +# Time: 2020-02-15T19:00:05.000000+08:00 +select 2;` + logData2 := ` +# Time: 2020-02-16T18:00:01.000000+08:00 +select 3; +# Time: 2020-02-16T18:00:05.000000+08:00 +select 4;` + logData3 := ` +# Time: 2020-02-16T19:00:00.000000+08:00 +select 5; +# Time: 2020-02-17T18:00:05.000000+08:00 +select 6;` + logData4 := ` +# Time: 2020-05-14T19:03:54.314615176+08:00 +select 7;` + logData := []string{logData0, logData1, logData2, logData3, logData4} + + fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log" + fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log" + fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log" + fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log" + fileName4 := "tidb-slow.log" + fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4} + + prepareLogs(t, logData, fileNames) + defer func() { + removeFiles(t, fileNames) + }() + tk := testkit.NewTestKit(t, store) + loc, err := time.LoadLocation("Asia/Shanghai") + require.NoError(t, err) + tk.Session().GetSessionVars().TimeZone = loc + tk.MustExec("use information_schema") + cases := []struct { + prepareSQL string + sql string + result []string + }{ + { + sql: "select count(*),min(time),max(time) from %s where time > '2019-01-26 21:51:00' and time < now()", + result: []string{"7|2020-02-15 18:00:01.000000|2020-05-14 19:03:54.314615"}, + }, + { + sql: "select count(*),min(time),max(time) from %s where time > '2020-02-15 19:00:00' and time < '2020-02-16 18:00:02'", + result: []string{"2|2020-02-15 19:00:05.000000|2020-02-16 18:00:01.000000"}, + }, + { + sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 17:00:00'", + result: []string{"2|2020-02-16 18:00:05.000000|2020-02-16 19:00:00.000000"}, + }, + { + sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 20:00:00'", + result: []string{"3|2020-02-16 18:00:05.000000|2020-02-17 18:00:05.000000"}, + }, + { + sql: "select count(*),min(time),max(time) from %s", + result: []string{"1|2020-05-14 19:03:54.314615|2020-05-14 19:03:54.314615"}, + }, + { + sql: "select count(*),min(time) from %s where time > '2020-02-16 20:00:00'", + result: []string{"1|2020-02-17 18:00:05.000000"}, + }, + { + sql: "select count(*) from %s where time > '2020-02-17 20:00:00'", + result: []string{"0"}, + }, + { + sql: "select query from %s where time > '2019-01-26 21:51:00' and time < now()", + result: []string{"select 1;", "select 2;", "select 3;", "select 4;", "select 5;", "select 6;", "select 7;"}, + }, + // Test for different timezone. + { + prepareSQL: "set @@time_zone = '+00:00'", + sql: "select time from %s where time = '2020-02-17 10:00:05.000000'", + result: []string{"2020-02-17 10:00:05.000000"}, + }, + { + prepareSQL: "set @@time_zone = '+02:00'", + sql: "select time from %s where time = '2020-02-17 12:00:05.000000'", + result: []string{"2020-02-17 12:00:05.000000"}, + }, + // Test for issue 17224 + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from %s where time = '2020-05-14 19:03:54.314615'", + result: []string{"2020-05-14 19:03:54.314615"}, + }, + } + for _, cas := range cases { + if len(cas.prepareSQL) > 0 { + tk.MustExec(cas.prepareSQL) + } + sql := fmt.Sprintf(cas.sql, "slow_query") + tk.MustQuery(sql).Check(testkit.RowsWithSep("|", cas.result...)) + sql = fmt.Sprintf(cas.sql, "cluster_slow_query") + tk.MustQuery(sql).Check(testkit.RowsWithSep("|", cas.result...)) + } +} + +func TestIssue20236(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + srv := createRPCServer(t, dom) + defer srv.Stop() + + logData0 := "" + logData1 := ` +# Time: 2020-02-15T18:00:01.000000+08:00 +select 1; +# Time: 2020-02-15T19:00:05.000000+08:00 +select 2; +# Time: 2020-02-15T20:00:05.000000+08:00` + logData2 := `select 3; +# Time: 2020-02-16T18:00:01.000000+08:00 +select 4; +# Time: 2020-02-16T18:00:05.000000+08:00 +select 5;` + logData3 := ` +# Time: 2020-02-16T19:00:00.000000+08:00 +select 6; +# Time: 2020-02-17T18:00:05.000000+08:00 +select 7; +# Time: 2020-02-17T19:00:00.000000+08:00` + logData4 := `select 8; +# Time: 2020-02-17T20:00:00.000000+08:00 +select 9 +# Time: 2020-05-14T19:03:54.314615176+08:00 +select 10;` + logData := []string{logData0, logData1, logData2, logData3, logData4} + + fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log" + fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log" + fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log" + fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log" + fileName4 := "tidb-slow.log" + fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4} + prepareLogs(t, logData, fileNames) + defer func() { + removeFiles(t, fileNames) + }() + tk := testkit.NewTestKit(t, store) + loc, err := time.LoadLocation("Asia/Shanghai") + require.NoError(t, err) + tk.Session().GetSessionVars().TimeZone = loc + tk.MustExec("use information_schema") + cases := []struct { + prepareSQL string + sql string + result []string + }{ + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'", + result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", + result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time", + result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc", + result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", + result: []string{"9"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')", + result: []string{"6"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc", + result: []string{"5"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3", + result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, + }, + } + for _, cas := range cases { + if len(cas.prepareSQL) > 0 { + tk.MustExec(cas.prepareSQL) + } + tk.MustQuery(cas.sql).Check(testkit.RowsWithSep("|", cas.result...)) + } +} + +func TestSQLDigestTextRetriever(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + srv := createRPCServer(t, dom) + defer srv.Stop() + + tkInit := testkit.NewTestKit(t, store) + tkInit.MustExec("use test") + tkInit.MustExec("set global tidb_enable_stmt_summary = 1") + tkInit.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1")) + tkInit.MustExec("drop table if exists test_sql_digest_text_retriever") + tkInit.MustExec("create table test_sql_digest_text_retriever (id int primary key, v int)") + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil)) + tk.MustExec("insert into test_sql_digest_text_retriever values (1, 1)") + + insertNormalized, insertDigest := parser.NormalizeDigest("insert into test_sql_digest_text_retriever values (1, 1)") + _, updateDigest := parser.NormalizeDigest("update test_sql_digest_text_retriever set v = v + 1 where id = 1") + r := &expression.SQLDigestTextRetriever{ + SQLDigestsMap: map[string]string{ + insertDigest.String(): "", + updateDigest.String(): "", + }, + } + err := r.RetrieveLocal(context.Background(), tk.Session()) + require.NoError(t, err) + require.Equal(t, insertNormalized, r.SQLDigestsMap[insertDigest.String()]) + require.Equal(t, "", r.SQLDigestsMap[updateDigest.String()]) +} + +func TestFunctionDecodeSQLDigests(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + srv := createRPCServer(t, dom) + defer srv.Stop() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil)) + tk.MustExec("set global tidb_enable_stmt_summary = 1") + tk.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1")) + tk.MustExec("drop table if exists test_func_decode_sql_digests") + tk.MustExec("create table test_func_decode_sql_digests(id int primary key, v int)") + + q1 := "begin" + norm1, digest1 := parser.NormalizeDigest(q1) + q2 := "select @@tidb_current_ts" + norm2, digest2 := parser.NormalizeDigest(q2) + q3 := "select id, v from test_func_decode_sql_digests where id = 1 for update" + norm3, digest3 := parser.NormalizeDigest(q3) + + // TIDB_DECODE_SQL_DIGESTS function doesn't actually do "decoding", instead it queries `statements_summary` and it's + // variations for the corresponding statements. + // Execute the statements so that the queries will be saved into statements_summary table. + tk.MustExec(q1) + // Save the ts to query the transaction from tidb_trx. + ts, err := strconv.ParseUint(tk.MustQuery(q2).Rows()[0][0].(string), 10, 64) + require.NoError(t, err) + require.Greater(t, ts, uint64(0)) + tk.MustExec(q3) + tk.MustExec("rollback") + + // Test statements truncating. + decoded := fmt.Sprintf(`["%s","%s","%s"]`, norm1, norm2, norm3) + digests := fmt.Sprintf(`["%s","%s","%s"]`, digest1, digest2, digest3) + tk.MustQuery("select tidb_decode_sql_digests(?, 0)", digests).Check(testkit.Rows(decoded)) + // The three queries are shorter than truncate length, equal to truncate length and longer than truncate length respectively. + tk.MustQuery("select tidb_decode_sql_digests(?, ?)", digests, len(norm2)).Check(testkit.Rows( + "[\"begin\",\"select @@tidb_current_ts\",\"select `id` , `v` from `...\"]")) + + // Empty array. + tk.MustQuery("select tidb_decode_sql_digests('[]')").Check(testkit.Rows("[]")) + + // NULL + tk.MustQuery("select tidb_decode_sql_digests(null)").Check(testkit.Rows("")) + + // Array containing wrong types and not-existing digests (maps to null). + tk.MustQuery("select tidb_decode_sql_digests(?)", fmt.Sprintf(`["%s",1,null,"%s",{"a":1},[2],"%s","","abcde"]`, digest1, digest2, digest3)). + Check(testkit.Rows(fmt.Sprintf(`["%s",null,null,"%s",null,null,"%s",null,null]`, norm1, norm2, norm3))) + + // Not JSON array (throws warnings) + tk.MustQuery(`select tidb_decode_sql_digests('{"a":1}')`).Check(testkit.Rows("")) + tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: '{"a":1}'`)) + tk.MustQuery(`select tidb_decode_sql_digests('aabbccdd')`).Check(testkit.Rows("")) + tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: 'aabbccdd'`)) + + // Invalid argument count. + tk.MustGetErrCode("select tidb_decode_sql_digests('a', 1, 2)", 1582) + tk.MustGetErrCode("select tidb_decode_sql_digests()", 1582) +} + +func TestFunctionDecodeSQLDigestsPrivilege(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + srv := createRPCServer(t, dom) + defer srv.Stop() + + dropUserTk := testkit.NewTestKit(t, store) + require.True(t, dropUserTk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil)) + + tk := testkit.NewTestKit(t, store) + require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil)) + tk.MustExec("create user 'testuser'@'localhost'") + defer dropUserTk.MustExec("drop user 'testuser'@'localhost'") + require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "testuser", Hostname: "localhost"}, nil, nil)) + tk.MustGetErrMsg("select tidb_decode_sql_digests('[\"aa\"]')", "[expression:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + tk = testkit.NewTestKit(t, store) + require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil)) + tk.MustExec("create user 'testuser2'@'localhost'") + defer dropUserTk.MustExec("drop user 'testuser2'@'localhost'") + tk.MustExec("grant process on *.* to 'testuser2'@'localhost'") + require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "testuser2", Hostname: "localhost"}, nil, nil)) + tk.MustExec("select tidb_decode_sql_digests('[\"aa\"]')") +} + +func prepareLogs(t *testing.T, logData []string, fileNames []string) { + for i, log := range logData { + f, err := os.OpenFile(fileNames[i], os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + require.NoError(t, err) + _, err = f.Write([]byte(log)) + require.NoError(t, err) + require.NoError(t, f.Close()) + } +} + +func removeFiles(t *testing.T, fileNames []string) { + for _, fileName := range fileNames { + require.NoError(t, os.Remove(fileName)) + } +} diff --git a/executor/executor_legacy_test.go b/executor/executor_legacy_test.go index 9c634158c3bea..45897826cc29c 100644 --- a/executor/executor_legacy_test.go +++ b/executor/executor_legacy_test.go @@ -18,26 +18,20 @@ import ( "context" "fmt" "math" - "net" - "os" "reflect" "runtime" "strconv" "strings" - "sync" - "sync/atomic" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/executor" - "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -48,7 +42,6 @@ import ( "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/planner" plannercore "github.com/pingcap/tidb/planner/core" - "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -62,9 +55,6 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/testutils" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/tikvrpc" - "google.golang.org/grpc" ) func TestT(t *testing.T) { @@ -76,11 +66,8 @@ func TestT(t *testing.T) { var _ = Suite(&testSuite{&baseTestSuite{}}) var _ = Suite(&testSuiteP2{&baseTestSuite{}}) -var _ = SerialSuites(&testSuiteWithCliBaseCharset{}) var _ = Suite(&testSuite2{&baseTestSuite{}}) var _ = Suite(&testSuite3{&baseTestSuite{}}) -var _ = SerialSuites(&testClusterTableSuite{}) -var _ = SerialSuites(&testSerialSuite1{&baseTestSuite{}}) var _ = SerialSuites(&testSerialSuite{&baseTestSuite{}}) type testSuite struct{ *baseTestSuite } @@ -1197,87 +1184,6 @@ func (s *testSuite2) TestStaleReadFutureTime(c *C) { c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0)) } -const ( - checkDDLAddIndexPriority = 1 -) - -type checkRequestClient struct { - tikv.Client - priority kvrpcpb.CommandPri - lowPriorityCnt uint32 - mu struct { - sync.RWMutex - checkFlags uint32 - } -} - -func (c *checkRequestClient) getCheckPriority() kvrpcpb.CommandPri { - return (kvrpcpb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority))) -} - -func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { - resp, err := c.Client.SendRequest(ctx, addr, req, timeout) - c.mu.RLock() - checkFlags := c.mu.checkFlags - c.mu.RUnlock() - if checkFlags == checkDDLAddIndexPriority { - if req.Type == tikvrpc.CmdScan { - if c.getCheckPriority() != req.Priority { - return nil, errors.New("fail to set priority") - } - } else if req.Type == tikvrpc.CmdPrewrite { - if c.getCheckPriority() == kvrpcpb.CommandPri_Low { - atomic.AddUint32(&c.lowPriorityCnt, 1) - } - } - } - return resp, err -} - -type testSuiteWithCliBaseCharset struct { - testSuiteWithCliBase -} - -type testSuiteWithCliBase struct { - store kv.Storage - dom *domain.Domain - cli *checkRequestClient -} - -func (s *testSuiteWithCliBase) SetUpSuite(c *C) { - cli := &checkRequestClient{} - hijackClient := func(c tikv.Client) tikv.Client { - cli.Client = c - return cli - } - s.cli = cli - - var err error - s.store, err = mockstore.NewMockStore( - mockstore.WithClientHijacker(hijackClient), - ) - c.Assert(err, IsNil) - session.SetStatsLease(0) - s.dom, err = session.BootstrapSession(s.store) - c.Assert(err, IsNil) - s.dom.SetStatsUpdating(true) -} - -func (s *testSuiteWithCliBase) TearDownSuite(c *C) { - s.dom.Close() - s.store.Close() -} - -func (s *testSuiteWithCliBase) TearDownTest(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - r := tk.MustQuery("show tables") - for _, tb := range r.Rows() { - tableName := tb[0] - tk.MustExec(fmt.Sprintf("drop table %v", tableName)) - } -} - func (s *testSuite3) TestYearTypeDeleteIndex(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") @@ -1610,26 +1516,6 @@ func (s *testSuite3) TearDownTest(c *C) { } } -type testSerialSuite1 struct { - *baseTestSuite -} - -func (s *testSerialSuite1) TearDownTest(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - r := tk.MustQuery("show full tables") - for _, tb := range r.Rows() { - tableName := tb[0] - if tb[1] == "VIEW" { - tk.MustExec(fmt.Sprintf("drop view %v", tableName)) - } else if tb[1] == "SEQUENCE" { - tk.MustExec(fmt.Sprintf("drop sequence %v", tableName)) - } else { - tk.MustExec(fmt.Sprintf("drop table %v", tableName)) - } - } -} - func (s *testSuiteP2) TestStrToDateBuiltin(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustQuery(`select str_to_date('20190101','%Y%m%d%!') from dual`).Check(testkit.Rows("2019-01-01")) @@ -1724,88 +1610,6 @@ func (s *testSuiteP2) TestIssue10435(c *C) { ) } -func (s *testSuiteWithCliBaseCharset) TestCharsetFeature(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - - tk.MustExec("set names gbk;") - tk.MustQuery("select @@character_set_connection;").Check(testkit.Rows("gbk")) - tk.MustQuery("select @@collation_connection;").Check(testkit.Rows("gbk_chinese_ci")) - tk.MustExec("set @@character_set_client=gbk;") - tk.MustQuery("select @@character_set_client;").Check(testkit.Rows("gbk")) - tk.MustExec("set names utf8mb4;") - tk.MustExec("set @@character_set_connection=gbk;") - tk.MustQuery("select @@character_set_connection;").Check(testkit.Rows("gbk")) - tk.MustQuery("select @@collation_connection;").Check(testkit.Rows("gbk_chinese_ci")) - - tk.MustGetErrCode("select _gbk 'a';", errno.ErrUnknownCharacterSet) - - tk.MustExec("use test") - tk.MustExec("create table t1(a char(10) charset gbk);") - tk.MustExec("create table t2(a char(10) charset gbk collate gbk_bin);") - tk.MustExec("create table t3(a char(10)) charset gbk;") - tk.MustExec("alter table t3 add column b char(10) charset gbk;") - tk.MustQuery("show create table t3").Check(testkit.Rows("t3 CREATE TABLE `t3` (\n" + - " `a` char(10) DEFAULT NULL,\n" + - " `b` char(10) DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci", - )) - tk.MustExec("create table t4(a char(10));") - tk.MustExec("alter table t4 add column b char(10) charset gbk;") - tk.MustQuery("show create table t4").Check(testkit.Rows("t4 CREATE TABLE `t4` (\n" + - " `a` char(10) DEFAULT NULL,\n" + - " `b` char(10) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec("create table t5(a char(20), b char(20) charset utf8, c binary) charset gbk collate gbk_bin;") - - tk.MustExec("create database test_gbk charset gbk;") - tk.MustExec("use test_gbk") - tk.MustExec("create table t1(a char(10));") - tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + - " `a` char(10) DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci", - )) -} - -func (s *testSuiteWithCliBaseCharset) TestCharsetFeatureCollation(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t" + - "(ascii_char char(10) character set ascii," + - "gbk_char char(10) character set gbk collate gbk_bin," + - "latin_char char(10) character set latin1," + - "utf8mb4_char char(10) character set utf8mb4)", - ) - tk.MustExec("insert into t values ('a', 'a', 'a', 'a'), ('a', '啊', '€', 'ㅂ');") - tk.MustQuery("select collation(concat(ascii_char, gbk_char)) from t;").Check(testkit.Rows("gbk_bin", "gbk_bin")) - tk.MustQuery("select collation(concat(gbk_char, ascii_char)) from t;").Check(testkit.Rows("gbk_bin", "gbk_bin")) - tk.MustQuery("select collation(concat(utf8mb4_char, gbk_char)) from t;").Check(testkit.Rows("utf8mb4_bin", "utf8mb4_bin")) - tk.MustQuery("select collation(concat(gbk_char, utf8mb4_char)) from t;").Check(testkit.Rows("utf8mb4_bin", "utf8mb4_bin")) - tk.MustQuery("select collation(concat('啊', convert('啊' using gbk) collate gbk_bin));").Check(testkit.Rows("gbk_bin")) - tk.MustQuery("select collation(concat(_latin1 'a', convert('啊' using gbk) collate gbk_bin));").Check(testkit.Rows("gbk_bin")) - - tk.MustGetErrCode("select collation(concat(latin_char, gbk_char)) from t;", mysql.ErrCantAggregate2collations) - tk.MustGetErrCode("select collation(concat(convert('€' using latin1), convert('啊' using gbk) collate gbk_bin));", mysql.ErrCantAggregate2collations) - tk.MustGetErrCode("select collation(concat(utf8mb4_char, gbk_char collate gbk_bin)) from t;", mysql.ErrCantAggregate2collations) - tk.MustGetErrCode("select collation(concat('ㅂ', convert('啊' using gbk) collate gbk_bin));", mysql.ErrCantAggregate2collations) - tk.MustGetErrCode("select collation(concat(ascii_char collate ascii_bin, gbk_char)) from t;", mysql.ErrCantAggregate2collations) -} - -func (s *testSuiteWithCliBaseCharset) TestCharsetWithPrefixIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a char(20) charset gbk, b char(20) charset gbk, primary key (a(2)));") - tk.MustExec("insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一二三'), ('b', '一二三');") - tk.MustQuery("select * from t").Check(testkit.Rows("a 中文", "中文 中文", "一二三 一二三", "b 一二三")) - tk.MustExec("drop table t") - tk.MustExec("create table t(a char(20) charset gbk, b char(20) charset gbk, unique index idx_a(a(2)));") - tk.MustExec("insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一二三'), ('b', '一二三');") - tk.MustQuery("select * from t").Check(testkit.Rows("a 中文", "中文 中文", "一二三 一二三", "b 一二三")) -} - func (s *testSuite) TestSummaryFailedUpdate(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") @@ -2298,374 +2102,6 @@ func (s *testSuiteP2) TestPointUpdatePreparedPlanWithCommitMode(c *C) { tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11")) } -type testClusterTableSuite struct { - testSuiteWithCliBase - rpcserver *grpc.Server - listenAddr string -} - -func (s *testClusterTableSuite) SetUpSuite(c *C) { - s.testSuiteWithCliBase.SetUpSuite(c) - s.rpcserver, s.listenAddr = s.setUpRPCService(c, "127.0.0.1:0") -} - -func (s *testClusterTableSuite) setUpRPCService(c *C, addr string) (*grpc.Server, string) { - sm := &mockSessionManager1{} - sm.PS = append(sm.PS, &util.ProcessInfo{ - ID: 1, - User: "root", - Host: "127.0.0.1", - Command: mysql.ComQuery, - }) - lis, err := net.Listen("tcp", addr) - c.Assert(err, IsNil) - srv := server.NewRPCServer(config.GetGlobalConfig(), s.dom, sm) - port := lis.Addr().(*net.TCPAddr).Port - addr = fmt.Sprintf("127.0.0.1:%d", port) - go func() { - err = srv.Serve(lis) - c.Assert(err, IsNil) - }() - config.UpdateGlobal(func(conf *config.Config) { - conf.Status.StatusPort = uint(port) - }) - return srv, addr -} -func (s *testClusterTableSuite) TearDownSuite(c *C) { - if s.rpcserver != nil { - s.rpcserver.Stop() - s.rpcserver = nil - } - s.testSuiteWithCliBase.TearDownSuite(c) -} - -func (s *testClusterTableSuite) TestSlowQuery(c *C) { - logData0 := "" - logData1 := ` -# Time: 2020-02-15T18:00:01.000000+08:00 -select 1; -# Time: 2020-02-15T19:00:05.000000+08:00 -select 2;` - logData2 := ` -# Time: 2020-02-16T18:00:01.000000+08:00 -select 3; -# Time: 2020-02-16T18:00:05.000000+08:00 -select 4;` - logData3 := ` -# Time: 2020-02-16T19:00:00.000000+08:00 -select 5; -# Time: 2020-02-17T18:00:05.000000+08:00 -select 6;` - logData4 := ` -# Time: 2020-05-14T19:03:54.314615176+08:00 -select 7;` - logData := []string{logData0, logData1, logData2, logData3, logData4} - - fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log" - fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log" - fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log" - fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log" - fileName4 := "tidb-slow.log" - fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4} - - prepareLogs(c, logData, fileNames) - defer func() { - removeFiles(fileNames) - }() - tk := testkit.NewTestKitWithInit(c, s.store) - loc, err := time.LoadLocation("Asia/Shanghai") - c.Assert(err, IsNil) - tk.Se.GetSessionVars().TimeZone = loc - tk.MustExec("use information_schema") - cases := []struct { - prepareSQL string - sql string - result []string - }{ - { - sql: "select count(*),min(time),max(time) from %s where time > '2019-01-26 21:51:00' and time < now()", - result: []string{"7|2020-02-15 18:00:01.000000|2020-05-14 19:03:54.314615"}, - }, - { - sql: "select count(*),min(time),max(time) from %s where time > '2020-02-15 19:00:00' and time < '2020-02-16 18:00:02'", - result: []string{"2|2020-02-15 19:00:05.000000|2020-02-16 18:00:01.000000"}, - }, - { - sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 17:00:00'", - result: []string{"2|2020-02-16 18:00:05.000000|2020-02-16 19:00:00.000000"}, - }, - { - sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 20:00:00'", - result: []string{"3|2020-02-16 18:00:05.000000|2020-02-17 18:00:05.000000"}, - }, - { - sql: "select count(*),min(time),max(time) from %s", - result: []string{"1|2020-05-14 19:03:54.314615|2020-05-14 19:03:54.314615"}, - }, - { - sql: "select count(*),min(time) from %s where time > '2020-02-16 20:00:00'", - result: []string{"1|2020-02-17 18:00:05.000000"}, - }, - { - sql: "select count(*) from %s where time > '2020-02-17 20:00:00'", - result: []string{"0"}, - }, - { - sql: "select query from %s where time > '2019-01-26 21:51:00' and time < now()", - result: []string{"select 1;", "select 2;", "select 3;", "select 4;", "select 5;", "select 6;", "select 7;"}, - }, - // Test for different timezone. - { - prepareSQL: "set @@time_zone = '+00:00'", - sql: "select time from %s where time = '2020-02-17 10:00:05.000000'", - result: []string{"2020-02-17 10:00:05.000000"}, - }, - { - prepareSQL: "set @@time_zone = '+02:00'", - sql: "select time from %s where time = '2020-02-17 12:00:05.000000'", - result: []string{"2020-02-17 12:00:05.000000"}, - }, - // Test for issue 17224 - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from %s where time = '2020-05-14 19:03:54.314615'", - result: []string{"2020-05-14 19:03:54.314615"}, - }, - } - for _, cas := range cases { - if len(cas.prepareSQL) > 0 { - tk.MustExec(cas.prepareSQL) - } - sql := fmt.Sprintf(cas.sql, "slow_query") - tk.MustQuery(sql).Check(testkit.RowsWithSep("|", cas.result...)) - sql = fmt.Sprintf(cas.sql, "cluster_slow_query") - tk.MustQuery(sql).Check(testkit.RowsWithSep("|", cas.result...)) - } -} - -func (s *testClusterTableSuite) TestIssue20236(c *C) { - logData0 := "" - logData1 := ` -# Time: 2020-02-15T18:00:01.000000+08:00 -select 1; -# Time: 2020-02-15T19:00:05.000000+08:00 -select 2; -# Time: 2020-02-15T20:00:05.000000+08:00` - logData2 := `select 3; -# Time: 2020-02-16T18:00:01.000000+08:00 -select 4; -# Time: 2020-02-16T18:00:05.000000+08:00 -select 5;` - logData3 := ` -# Time: 2020-02-16T19:00:00.000000+08:00 -select 6; -# Time: 2020-02-17T18:00:05.000000+08:00 -select 7; -# Time: 2020-02-17T19:00:00.000000+08:00` - logData4 := `select 8; -# Time: 2020-02-17T20:00:00.000000+08:00 -select 9 -# Time: 2020-05-14T19:03:54.314615176+08:00 -select 10;` - logData := []string{logData0, logData1, logData2, logData3, logData4} - - fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log" - fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log" - fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log" - fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log" - fileName4 := "tidb-slow.log" - fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4} - prepareLogs(c, logData, fileNames) - defer func() { - removeFiles(fileNames) - }() - tk := testkit.NewTestKitWithInit(c, s.store) - loc, err := time.LoadLocation("Asia/Shanghai") - c.Assert(err, IsNil) - tk.Se.GetSessionVars().TimeZone = loc - tk.MustExec("use information_schema") - cases := []struct { - prepareSQL string - sql string - result []string - }{ - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'", - result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", - result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time", - result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc", - result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", - result: []string{"9"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')", - result: []string{"6"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc", - result: []string{"5"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3", - result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, - }, - } - for _, cas := range cases { - if len(cas.prepareSQL) > 0 { - tk.MustExec(cas.prepareSQL) - } - tk.MustQuery(cas.sql).Check(testkit.RowsWithSep("|", cas.result...)) - } -} - -func (s *testClusterTableSuite) TestSQLDigestTextRetriever(c *C) { - tkInit := testkit.NewTestKitWithInit(c, s.store) - tkInit.MustExec("set global tidb_enable_stmt_summary = 1") - tkInit.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1")) - tkInit.MustExec("drop table if exists test_sql_digest_text_retriever") - tkInit.MustExec("create table test_sql_digest_text_retriever (id int primary key, v int)") - - tk := testkit.NewTestKitWithInit(c, s.store) - c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue) - tk.MustExec("insert into test_sql_digest_text_retriever values (1, 1)") - - insertNormalized, insertDigest := parser.NormalizeDigest("insert into test_sql_digest_text_retriever values (1, 1)") - _, updateDigest := parser.NormalizeDigest("update test_sql_digest_text_retriever set v = v + 1 where id = 1") - r := &expression.SQLDigestTextRetriever{ - SQLDigestsMap: map[string]string{ - insertDigest.String(): "", - updateDigest.String(): "", - }, - } - err := r.RetrieveLocal(context.Background(), tk.Se) - c.Assert(err, IsNil) - c.Assert(r.SQLDigestsMap[insertDigest.String()], Equals, insertNormalized) - c.Assert(r.SQLDigestsMap[updateDigest.String()], Equals, "") -} - -func (s *testClusterTableSuite) TestFunctionDecodeSQLDigests(c *C) { - tk := testkit.NewTestKitWithInit(c, s.store) - c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue) - tk.MustExec("set global tidb_enable_stmt_summary = 1") - tk.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1")) - tk.MustExec("drop table if exists test_func_decode_sql_digests") - tk.MustExec("create table test_func_decode_sql_digests(id int primary key, v int)") - - q1 := "begin" - norm1, digest1 := parser.NormalizeDigest(q1) - q2 := "select @@tidb_current_ts" - norm2, digest2 := parser.NormalizeDigest(q2) - q3 := "select id, v from test_func_decode_sql_digests where id = 1 for update" - norm3, digest3 := parser.NormalizeDigest(q3) - - // TIDB_DECODE_SQL_DIGESTS function doesn't actually do "decoding", instead it queries `statements_summary` and it's - // variations for the corresponding statements. - // Execute the statements so that the queries will be saved into statements_summary table. - tk.MustExec(q1) - // Save the ts to query the transaction from tidb_trx. - ts, err := strconv.ParseUint(tk.MustQuery(q2).Rows()[0][0].(string), 10, 64) - c.Assert(err, IsNil) - c.Assert(ts, Greater, uint64(0)) - tk.MustExec(q3) - tk.MustExec("rollback") - - // Test statements truncating. - decoded := fmt.Sprintf(`["%s","%s","%s"]`, norm1, norm2, norm3) - digests := fmt.Sprintf(`["%s","%s","%s"]`, digest1, digest2, digest3) - tk.MustQuery("select tidb_decode_sql_digests(?, 0)", digests).Check(testkit.Rows(decoded)) - // The three queries are shorter than truncate length, equal to truncate length and longer than truncate length respectively. - tk.MustQuery("select tidb_decode_sql_digests(?, ?)", digests, len(norm2)).Check(testkit.Rows( - "[\"begin\",\"select @@tidb_current_ts\",\"select `id` , `v` from `...\"]")) - - // Empty array. - tk.MustQuery("select tidb_decode_sql_digests('[]')").Check(testkit.Rows("[]")) - - // NULL - tk.MustQuery("select tidb_decode_sql_digests(null)").Check(testkit.Rows("")) - - // Array containing wrong types and not-existing digests (maps to null). - tk.MustQuery("select tidb_decode_sql_digests(?)", fmt.Sprintf(`["%s",1,null,"%s",{"a":1},[2],"%s","","abcde"]`, digest1, digest2, digest3)). - Check(testkit.Rows(fmt.Sprintf(`["%s",null,null,"%s",null,null,"%s",null,null]`, norm1, norm2, norm3))) - - // Not JSON array (throws warnings) - tk.MustQuery(`select tidb_decode_sql_digests('{"a":1}')`).Check(testkit.Rows("")) - tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: '{"a":1}'`)) - tk.MustQuery(`select tidb_decode_sql_digests('aabbccdd')`).Check(testkit.Rows("")) - tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: 'aabbccdd'`)) - - // Invalid argument count. - tk.MustGetErrCode("select tidb_decode_sql_digests('a', 1, 2)", 1582) - tk.MustGetErrCode("select tidb_decode_sql_digests()", 1582) -} - -func (s *testClusterTableSuite) TestFunctionDecodeSQLDigestsPrivilege(c *C) { - dropUserTk := testkit.NewTestKitWithInit(c, s.store) - c.Assert(dropUserTk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue) - - tk := testkit.NewTestKitWithInit(c, s.store) - c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue) - tk.MustExec("create user 'testuser'@'localhost'") - defer dropUserTk.MustExec("drop user 'testuser'@'localhost'") - c.Assert(tk.Se.Auth(&auth.UserIdentity{ - Username: "testuser", - Hostname: "localhost", - }, nil, nil), IsTrue) - err := tk.ExecToErr("select tidb_decode_sql_digests('[\"aa\"]')") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[expression:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") - - tk = testkit.NewTestKitWithInit(c, s.store) - c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue) - tk.MustExec("create user 'testuser2'@'localhost'") - defer dropUserTk.MustExec("drop user 'testuser2'@'localhost'") - tk.MustExec("grant process on *.* to 'testuser2'@'localhost'") - c.Assert(tk.Se.Auth(&auth.UserIdentity{ - Username: "testuser2", - Hostname: "localhost", - }, nil, nil), IsTrue) - _ = tk.MustQuery("select tidb_decode_sql_digests('[\"aa\"]')") -} - -func prepareLogs(c *C, logData []string, fileNames []string) { - writeFile := func(file string, data string) { - f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - c.Assert(err, IsNil) - _, err = f.Write([]byte(data)) - c.Assert(f.Close(), IsNil) - c.Assert(err, IsNil) - } - - for i, log := range logData { - writeFile(fileNames[i], log) - } -} - -func removeFiles(fileNames []string) { - for _, fileName := range fileNames { - os.Remove(fileName) - } -} - func (s *testSuiteP2) TestApplyCache(c *C) { tk := testkit.NewTestKit(c, s.store) @@ -2745,61 +2181,6 @@ func (s *testSuite) TestIssue19372(c *C) { tk.MustQuery("select (select t2.c_str from t2 where t2.c_str <= t1.c_str and t2.c_int in (1, 2) order by t2.c_str limit 1) x from t1 order by c_int;").Check(testkit.Rows("a", "a", "a")) } -func (s *testSerialSuite1) TestIndexLookupRuntimeStats(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test;") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (a int, b int, index(a))") - tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)") - sql := "explain analyze select * from t1 use index(a) where a > 1;" - rows := tk.MustQuery(sql).Rows() - c.Assert(len(rows), Equals, 3) - explain := fmt.Sprintf("%v", rows[0]) - c.Assert(explain, Matches, ".*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*") - indexExplain := fmt.Sprintf("%v", rows[1]) - tableExplain := fmt.Sprintf("%v", rows[2]) - c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*") - c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*") -} - -func (s *testSerialSuite1) TestHashAggRuntimeStats(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test;") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (a int, b int)") - tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)") - sql := "explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10;" - rows := tk.MustQuery(sql).Rows() - c.Assert(len(rows), Equals, 5) - explain := fmt.Sprintf("%v", rows[0]) - c.Assert(explain, Matches, ".*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*") -} - -func (s *testSerialSuite1) TestIndexMergeRuntimeStats(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test;") - tk.MustExec("drop table if exists t1") - tk.MustExec("set @@tidb_enable_index_merge = 1") - tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)") - tk.MustExec("create index t1a on t1(a)") - tk.MustExec("create index t1b on t1(b)") - tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)") - sql := "explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4;" - rows := tk.MustQuery(sql).Rows() - c.Assert(len(rows), Equals, 4) - explain := fmt.Sprintf("%v", rows[0]) - c.Assert(explain, Matches, ".*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*") - tableRangeExplain := fmt.Sprintf("%v", rows[1]) - indexExplain := fmt.Sprintf("%v", rows[2]) - tableExplain := fmt.Sprintf("%v", rows[3]) - c.Assert(tableRangeExplain, Matches, ".*time:.*loops:.*cop_task:.*") - c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*") - c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - sql = "select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a" - tk.MustQuery(sql).Check(testkit.Rows("1 1 1 1 1", "5 5 5 5 5")) -} - func (s *testSuite) TestCollectDMLRuntimeStats(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/executor/executor_test.go b/executor/executor_test.go index cc37da62feca9..1474467d70162 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -2999,3 +2999,62 @@ func TestUpdateGivenPartitionSet(t *testing.T) { tk.MustExec("insert into t4(a, b) values(1, 1),(2, 2),(3, 3);") tk.MustGetErrMsg("update t4 partition(p0) set a = 5 where a = 2", "[table:1748]Found a row not matching the given partition set") } + +func TestIndexLookupRuntimeStats(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (a int, b int, index(a))") + tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)") + rows := tk.MustQuery("explain analyze select * from t1 use index(a) where a > 1").Rows() + require.Len(t, rows, 3) + explain := fmt.Sprintf("%v", rows[0]) + require.Regexp(t, ".*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*", explain) + indexExplain := fmt.Sprintf("%v", rows[1]) + tableExplain := fmt.Sprintf("%v", rows[2]) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", indexExplain) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", tableExplain) +} + +func TestHashAggRuntimeStats(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (a int, b int)") + tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)") + rows := tk.MustQuery("explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10;").Rows() + require.Len(t, rows, 5) + explain := fmt.Sprintf("%v", rows[0]) + pattern := ".*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*" + require.Regexp(t, pattern, explain) +} + +func TestIndexMergeRuntimeStats(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_index_merge = 1") + tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)") + tk.MustExec("create index t1a on t1(a)") + tk.MustExec("create index t1b on t1(b)") + tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)") + rows := tk.MustQuery("explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4;").Rows() + require.Len(t, rows, 4) + explain := fmt.Sprintf("%v", rows[0]) + pattern := ".*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*" + require.Regexp(t, pattern, explain) + tableRangeExplain := fmt.Sprintf("%v", rows[1]) + indexExplain := fmt.Sprintf("%v", rows[2]) + tableExplain := fmt.Sprintf("%v", rows[3]) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", tableRangeExplain) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", indexExplain) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", tableExplain) + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustQuery("select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a").Check(testkit.Rows("1 1 1 1 1", "5 5 5 5 5")) +} diff --git a/executor/hash_table_test.go b/executor/hash_table_test.go index 50ef8adb72f5e..637673f2b2cc2 100644 --- a/executor/hash_table_test.go +++ b/executor/hash_table_test.go @@ -119,14 +119,14 @@ func testHashRowContainer(t *testing.T, hashFunc func() hash.Hash64, spill bool) chk1, _ := initBuildChunk(numRows) hCtx := &hashContext{ - allTypes: colTypes, + allTypes: colTypes[1:3], keyColIdx: []int{1, 2}, } hCtx.hasNull = make([]bool, numRows) for i := 0; i < numRows; i++ { hCtx.hashVals = append(hCtx.hashVals, hashFunc()) } - rowContainer := newHashRowContainer(sctx, 0, hCtx, hCtx.allTypes) + rowContainer := newHashRowContainer(sctx, 0, hCtx, colTypes) copiedRC = rowContainer.ShallowCopy() tracker := rowContainer.GetMemTracker() tracker.SetLabel(memory.LabelForBuildSideResult) @@ -150,7 +150,7 @@ func testHashRowContainer(t *testing.T, hashFunc func() hash.Hash64, spill bool) probeChk, probeColType := initProbeChunk(2) probeRow := probeChk.GetRow(1) probeCtx := &hashContext{ - allTypes: probeColType, + allTypes: probeColType[1:3], keyColIdx: []int{1, 2}, } probeCtx.hasNull = make([]bool, 1) diff --git a/executor/point_get_test.go b/executor/point_get_test.go index ff771a0fdb485..780cf2a4b1efe 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "strings" + "sync" "testing" "time" @@ -683,8 +684,6 @@ func TestPointGetWriteLock(t *testing.T) { } func TestPointGetLockExistKey(t *testing.T) { - var wg util.WaitGroupWrapper - testLock := func(rc bool, key string, tableName string) { store, clean := testkit.CreateMockStore(t) defer clean() @@ -783,6 +782,7 @@ func TestPointGetLockExistKey(t *testing.T) { )) } + var wg sync.WaitGroup for i, one := range []struct { rc bool key string @@ -792,10 +792,12 @@ func TestPointGetLockExistKey(t *testing.T) { {rc: true, key: "primary key"}, {rc: true, key: "unique key"}, } { + wg.Add(1) tableName := fmt.Sprintf("t_%d", i) - wg.Run(func() { - testLock(one.rc, one.key, tableName) - }) + go func(rc bool, key string, tableName string) { + defer wg.Done() + testLock(rc, key, tableName) + }(one.rc, one.key, tableName) } wg.Wait() } diff --git a/executor/slow_query.go b/executor/slow_query.go index 699e0ee104d47..789ced21c5b4d 100755 --- a/executor/slow_query.go +++ b/executor/slow_query.go @@ -746,7 +746,7 @@ func getColumnValueFactoryByName(sctx sessionctx.Context, colName string, column return true, nil }, nil case variable.SlowLogPrepared, variable.SlowLogSucc, variable.SlowLogPlanFromCache, variable.SlowLogPlanFromBinding, - variable.SlowLogIsInternalStr, variable.SlowLogIsExplicitTxn, variable.SlowLogIsWriteCacheTable: + variable.SlowLogIsInternalStr, variable.SlowLogIsExplicitTxn, variable.SlowLogIsWriteCacheTable, variable.SlowLogHasMoreResults: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) { v, err := strconv.ParseBool(value) if err != nil { diff --git a/executor/slow_query_test.go b/executor/slow_query_test.go index 83ab12ebc620e..4396edc03892d 100644 --- a/executor/slow_query_test.go +++ b/executor/slow_query_test.go @@ -162,7 +162,7 @@ select * from t;` `0,0,0,0,0,0,0,0,0,0,0,0,,0,0,0,0,0,0,0.38,0.021,0,0,0,1,637,0,10,10,10,10,100,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,` + `0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,65536,0,0,0,0,0,` + `Cop_backoff_regionMiss_total_times: 200 Cop_backoff_regionMiss_total_time: 0.2 Cop_backoff_regionMiss_max_time: 0.2 Cop_backoff_regionMiss_max_addr: 127.0.0.1 Cop_backoff_regionMiss_avg_time: 0.2 Cop_backoff_regionMiss_p90_time: 0.2 Cop_backoff_rpcPD_total_times: 200 Cop_backoff_rpcPD_total_time: 0.2 Cop_backoff_rpcPD_max_time: 0.2 Cop_backoff_rpcPD_max_addr: 127.0.0.1 Cop_backoff_rpcPD_avg_time: 0.2 Cop_backoff_rpcPD_p90_time: 0.2 Cop_backoff_rpcTiKV_total_times: 200 Cop_backoff_rpcTiKV_total_time: 0.2 Cop_backoff_rpcTiKV_max_time: 0.2 Cop_backoff_rpcTiKV_max_addr: 127.0.0.1 Cop_backoff_rpcTiKV_avg_time: 0.2 Cop_backoff_rpcTiKV_p90_time: 0.2,` + - `0,0,1,0,1,1,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,` + + `0,0,1,0,1,1,0,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,` + `update t set i = 1;,select * from t;` require.Equal(t, expectRecordString, recordString) @@ -185,7 +185,7 @@ select * from t;` `0,0,0,0,0,0,0,0,0,0,0,0,,0,0,0,0,0,0,0.38,0.021,0,0,0,1,637,0,10,10,10,10,100,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,` + `0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,65536,0,0,0,0,0,` + `Cop_backoff_regionMiss_total_times: 200 Cop_backoff_regionMiss_total_time: 0.2 Cop_backoff_regionMiss_max_time: 0.2 Cop_backoff_regionMiss_max_addr: 127.0.0.1 Cop_backoff_regionMiss_avg_time: 0.2 Cop_backoff_regionMiss_p90_time: 0.2 Cop_backoff_rpcPD_total_times: 200 Cop_backoff_rpcPD_total_time: 0.2 Cop_backoff_rpcPD_max_time: 0.2 Cop_backoff_rpcPD_max_addr: 127.0.0.1 Cop_backoff_rpcPD_avg_time: 0.2 Cop_backoff_rpcPD_p90_time: 0.2 Cop_backoff_rpcTiKV_total_times: 200 Cop_backoff_rpcTiKV_total_time: 0.2 Cop_backoff_rpcTiKV_max_time: 0.2 Cop_backoff_rpcTiKV_max_addr: 127.0.0.1 Cop_backoff_rpcTiKV_avg_time: 0.2 Cop_backoff_rpcTiKV_p90_time: 0.2,` + - `0,0,1,0,1,1,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,` + + `0,0,1,0,1,1,0,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,` + `update t set i = 1;,select * from t;` require.Equal(t, expectRecordString, recordString) diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 771c3fa8c7299..1789e4dd4e612 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -1371,6 +1371,19 @@ func TestPlanCacheWithStaleReadByBinaryProto(t *testing.T) { require.NoError(t, err) tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) } + + stmtID2, _, _, err := se.PrepareStmt("select * from t1 where id=1") + require.NoError(t, err) + for i := 0; i < 2; i++ { + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID2, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 100")) + } + tk.MustExec("set @@tx_read_ts=@a") + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID2, nil) + require.NoError(t, err) + // will fail + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) } func TestIssue30872(t *testing.T) { diff --git a/go.mod b/go.mod index e67a9405e2617..171473bb1b606 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 - github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f + github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 github.com/docker/go-units v0.4.0 github.com/fsouza/fake-gcs-server v1.19.0 @@ -63,7 +63,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.1-0.20220329092050-6bf6951325ad + github.com/tikv/client-go/v2 v2.0.1-0.20220406091203-f73ec0e675f4 github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 github.com/twmb/murmur3 v1.1.3 github.com/uber/jaeger-client-go v2.22.1+incompatible @@ -83,7 +83,7 @@ require ( golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f + golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 golang.org/x/tools v0.1.8 @@ -206,5 +206,5 @@ replace github.com/pingcap/tidb/parser => ./parser // fix potential security issue(CVE-2020-26160) introduced by indirect dependency. replace github.com/dgrijalva/jwt-go => github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible -// it can be removed after merging https://github.com/dgraph-io/ristretto/pull/294 -replace github.com/dgraph-io/ristretto => github.com/hawkingrei/ristretto v0.1.1-0.20220402052934-7556ec01f9db +// fix date race in the testify. it can be remove after merging https://github.com/stretchr/testify/pull/1165 +replace github.com/stretchr/testify => github.com/hawkingrei/testify v1.7.1-0.20220318075534-088488aa27f2 diff --git a/go.sum b/go.sum index 4c967837d4479..508ffc71c96fe 100644 --- a/go.sum +++ b/go.sum @@ -195,6 +195,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d h1:Wrc3UKTS+cffkOx0xRGFC+ZesNuTfn0ThvEC72N0krk= +github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= @@ -434,8 +436,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hawkingrei/ristretto v0.1.1-0.20220402052934-7556ec01f9db h1:TgwwlrryS+4yDB95DS7DNHt62YuLcMfUQ0+k42lHiao= -github.com/hawkingrei/ristretto v0.1.1-0.20220402052934-7556ec01f9db/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c= +github.com/hawkingrei/testify v1.7.1-0.20220318075534-088488aa27f2 h1:ISVSMZv3HuDPbTAd76vrHWlomIe8lJOp4KovWngSVpY= +github.com/hawkingrei/testify v1.7.1-0.20220318075534-088488aa27f2/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= @@ -739,18 +741,11 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= -github.com/tikv/client-go/v2 v2.0.1-0.20220329092050-6bf6951325ad h1:Imm87gW7/Pra/NdUc7D3wkdtlZgo/iw0lSLIWZPiMS0= -github.com/tikv/client-go/v2 v2.0.1-0.20220329092050-6bf6951325ad/go.mod h1:0scaG+seu7L56apm+Gjz9vckyO7ABIzM6T7n00mrIXs= +github.com/tikv/client-go/v2 v2.0.1-0.20220406091203-f73ec0e675f4 h1:bi/tuV42dQCu7TTTOwHQW6cHVrV1fhet+Hzo5CUODBQ= +github.com/tikv/client-go/v2 v2.0.1-0.20220406091203-f73ec0e675f4/go.mod h1:0scaG+seu7L56apm+Gjz9vckyO7ABIzM6T7n00mrIXs= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710/go.mod h1:AtvppPwkiyUgQlR1W9qSqfTB+OsOIu19jDCOxOsPkmU= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= @@ -1106,8 +1101,8 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f h1:rlezHXNlxYWvBCzNses9Dlc7nGFaNMJeqLolcmQSSZY= -golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/infoschema/tables.go b/infoschema/tables.go index 088b104ddfd81..f1a20b9606a95 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -869,6 +869,7 @@ var slowQueryCols = []columnInfo{ {name: variable.SlowLogIsWriteCacheTable, tp: mysql.TypeTiny, size: 1}, {name: variable.SlowLogPlanFromCache, tp: mysql.TypeTiny, size: 1}, {name: variable.SlowLogPlanFromBinding, tp: mysql.TypeTiny, size: 1}, + {name: variable.SlowLogHasMoreResults, tp: mysql.TypeTiny, size: 1}, {name: variable.SlowLogPlan, tp: mysql.TypeLongBlob, size: types.UnspecifiedLength}, {name: variable.SlowLogPlanDigest, tp: mysql.TypeVarchar, size: 128}, {name: variable.SlowLogPrevStmt, tp: mysql.TypeLongBlob, size: types.UnspecifiedLength}, diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 12e9c85abd227..56c3389a97580 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -38,6 +38,7 @@ import ( plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/kvcache" @@ -567,13 +568,13 @@ func TestSlowQuery(t *testing.T) { tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", slowLogFileName)) tk.MustExec("set time_zone = '+08:00';") re := tk.MustQuery("select * from information_schema.slow_query") - re.Check(testkit.RowsWithSep("|", "2019-02-12 19:33:56.571953|406315658548871171|root|localhost|6|57|0.12|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.00000001|0.000000003|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|100|10|10|10|100|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|0|0|0|0|10||0|1|0|0|1|0|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;", - "2021-09-08|14:39:54.506967|427578666238083075|root|172.16.0.0|40507|0|0|25.571605962|0.002923536|0.006800973|0.002100764|0|0|0|0.000015801|25.542014572|0|0.002294647|0.000605473|12.483|[tikvRPC regionMiss tikvRPC regionMiss regionMiss]|0|0|624|172064|60|0|0|0|0|0|0|0|0|0|0|0|0|0|0|rtdb||0|124acb3a0bec903176baca5f9da00b4e7512a41c93b417923f26502edeb324cc||0|0|0||0|0|0||856544|0|86.635049185|0.015486658|100.054|0|0||0|1|0|0|0|0||||INSERT INTO ...;", + re.Check(testkit.RowsWithSep("|", "2019-02-12 19:33:56.571953|406315658548871171|root|localhost|6|57|0.12|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.00000001|0.000000003|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|100|10|10|10|100|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|0|0|0|0|10||0|1|0|0|1|0|0|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;", + "2021-09-08|14:39:54.506967|427578666238083075|root|172.16.0.0|40507|0|0|25.571605962|0.002923536|0.006800973|0.002100764|0|0|0|0.000015801|25.542014572|0|0.002294647|0.000605473|12.483|[tikvRPC regionMiss tikvRPC regionMiss regionMiss]|0|0|624|172064|60|0|0|0|0|0|0|0|0|0|0|0|0|0|0|rtdb||0|124acb3a0bec903176baca5f9da00b4e7512a41c93b417923f26502edeb324cc||0|0|0||0|0|0||856544|0|86.635049185|0.015486658|100.054|0|0||0|1|0|0|0|0|0||||INSERT INTO ...;", )) tk.MustExec("set time_zone = '+00:00';") re = tk.MustQuery("select * from information_schema.slow_query") - re.Check(testkit.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|localhost|6|57|0.12|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.00000001|0.000000003|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|100|10|10|10|100|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|0|0|0|0|10||0|1|0|0|1|0|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;", - "2021-09-08|06:39:54.506967|427578666238083075|root|172.16.0.0|40507|0|0|25.571605962|0.002923536|0.006800973|0.002100764|0|0|0|0.000015801|25.542014572|0|0.002294647|0.000605473|12.483|[tikvRPC regionMiss tikvRPC regionMiss regionMiss]|0|0|624|172064|60|0|0|0|0|0|0|0|0|0|0|0|0|0|0|rtdb||0|124acb3a0bec903176baca5f9da00b4e7512a41c93b417923f26502edeb324cc||0|0|0||0|0|0||856544|0|86.635049185|0.015486658|100.054|0|0||0|1|0|0|0|0||||INSERT INTO ...;", + re.Check(testkit.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|localhost|6|57|0.12|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.00000001|0.000000003|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|100|10|10|10|100|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|0|0|0|0|10||0|1|0|0|1|0|0|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;", + "2021-09-08|06:39:54.506967|427578666238083075|root|172.16.0.0|40507|0|0|25.571605962|0.002923536|0.006800973|0.002100764|0|0|0|0.000015801|25.542014572|0|0.002294647|0.000605473|12.483|[tikvRPC regionMiss tikvRPC regionMiss regionMiss]|0|0|624|172064|60|0|0|0|0|0|0|0|0|0|0|0|0|0|0|rtdb||0|124acb3a0bec903176baca5f9da00b4e7512a41c93b417923f26502edeb324cc||0|0|0||0|0|0||856544|0|86.635049185|0.015486658|100.054|0|0||0|1|0|0|0|0|0||||INSERT INTO ...;", )) // Test for long query. @@ -604,6 +605,35 @@ func TestColumnStatistics(t *testing.T) { tk.MustQuery("select * from information_schema.column_statistics").Check(testkit.Rows()) } +func TestTableIfHasColumn(t *testing.T) { + columnName := variable.SlowLogHasMoreResults + store, clean := testkit.CreateMockStore(t) + defer clean() + slowLogFileName := "tidb-slow.log" + f, err := os.OpenFile(slowLogFileName, os.O_CREATE|os.O_WRONLY, 0644) + require.NoError(t, err) + _, err = f.Write([]byte(`# Time: 2019-02-12T19:33:56.571953+08:00 +# Txn_start_ts: 406315658548871171 +# User@Host: root[root] @ localhost [127.0.0.1] +# Has_more_results: true +INSERT INTO ...; +`)) + require.NoError(t, f.Close()) + require.NoError(t, err) + defer func() { require.NoError(t, os.Remove(slowLogFileName)) }() + tk := testkit.NewTestKit(t, store) + + //check schema + tk.MustQuery(`select COUNT(*) from information_schema.columns +WHERE table_name = 'slow_query' and column_name = '` + columnName + `'`). + Check(testkit.Rows("1")) + + //check select + tk.MustQuery(`select ` + columnName + + ` from information_schema.slow_query`).Check(testkit.Rows("1")) + +} + func TestReloadDropDatabase(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/metrics/metrics.go b/metrics/metrics.go index 9ee4656d57cd1..5bbb2ab4a3c01 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -163,6 +163,7 @@ func RegisterMetrics() { prometheus.MustRegister(PDApiExecutionHistogram) prometheus.MustRegister(CPUProfileCounter) prometheus.MustRegister(ReadFromTableCacheCounter) + prometheus.MustRegister(LoadTableCacheDurationHistogram) tikvmetrics.InitMetrics(TiDB, TiKVClient) tikvmetrics.RegisterMetrics() diff --git a/metrics/server.go b/metrics/server.go index e373144e97297..72dc9e4e4b8ba 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -254,6 +254,15 @@ var ( Name: "cpu_profile_total", Help: "Counter of cpu profiling", }) + + LoadTableCacheDurationHistogram = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "server", + Name: "load_table_cache_seconds", + Help: "Duration (us) for loading table cache.", + Buckets: prometheus.ExponentialBuckets(1, 2, 30), // 1us ~ 528s + }) ) // ExecuteErrorToLabel converts an execute error to label. diff --git a/parser/model/ddl.go b/parser/model/ddl.go index 5ea3790143baf..6b6ecda4a3453 100644 --- a/parser/model/ddl.go +++ b/parser/model/ddl.go @@ -298,7 +298,6 @@ type Job struct { Version int64 `json:"version"` // ReorgMeta is meta info of ddl reorganization. - // This field is depreciated. ReorgMeta *DDLReorgMeta `json:"reorg_meta"` // MultiSchemaInfo keeps some warning now for multi schema change. @@ -504,6 +503,14 @@ func (job *Job) IsRunning() bool { return job.State == JobStateRunning } +func (job *Job) IsQueueing() bool { + return job.State == JobStateQueueing +} + +func (job *Job) NotStarted() bool { + return job.State == JobStateNone || job.State == JobStateQueueing +} + // JobState is for job state. type JobState byte @@ -523,6 +530,8 @@ const ( JobStateSynced JobState = 6 // JobStateCancelling is used to mark the DDL job is cancelled by the client, but the DDL work hasn't handle it. JobStateCancelling JobState = 7 + // JobStateQueueing means the job has not yet been started. + JobStateQueueing JobState = 8 ) // String implements fmt.Stringer interface. @@ -542,6 +551,8 @@ func (s JobState) String() string { return "cancelling" case JobStateSynced: return "synced" + case JobStateQueueing: + return "queueing" default: return "none" } diff --git a/parser/model/model.go b/parser/model/model.go index 64d9cc98bc9ec..fb50c87e8f02f 100644 --- a/parser/model/model.go +++ b/parser/model/model.go @@ -71,7 +71,7 @@ func (s SchemaState) String() string { case StateGlobalTxnOnly: return "global txn only" default: - return "queueing" + return "none" } } diff --git a/parser/model/model_test.go b/parser/model/model_test.go index 7e594e245a5dc..0ae06feeebb78 100644 --- a/parser/model/model_test.go +++ b/parser/model/model_test.go @@ -222,7 +222,7 @@ func TestJobStartTime(t *testing.T) { BinlogInfo: &HistoryInfo{}, } require.Equal(t, TSConvert2Time(job.StartTS), time.Unix(0, 0)) - require.Equal(t, fmt.Sprintf("ID:123, Type:none, State:none, SchemaState:queueing, SchemaID:0, TableID:0, RowCount:0, ArgLen:0, start time: %s, Err:, ErrCount:0, SnapshotVersion:0", time.Unix(0, 0)), job.String()) + require.Equal(t, fmt.Sprintf("ID:123, Type:none, State:none, SchemaState:none, SchemaID:0, TableID:0, RowCount:0, ArgLen:0, start time: %s, Err:, ErrCount:0, SnapshotVersion:0", time.Unix(0, 0)), job.String()) } func TestJobCodec(t *testing.T) { diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 4ce4a590e08bd..69c2a337b18d5 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -957,6 +957,10 @@ func (p *LogicalJoin) constructInnerTableScanTask( Desc: desc, physicalTableID: ds.physicalTableID, isPartition: ds.isPartition, + + underInnerIndexJoin: true, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, }.Init(ds.ctx, ds.blockOffset) ts.SetSchema(ds.schema.Clone()) if rowCount <= 0 { @@ -981,7 +985,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( StatsVersion: ds.stats.StatsVersion, // NDV would not be used in cost computation of IndexJoin, set leave it as default nil. } - rowSize := ds.TblColHists.GetTableAvgRowSize(p.ctx, ds.TblCols, ts.StoreType, true) + rowSize := ts.getScanRowSize() sessVars := ds.ctx.GetSessionVars() copTask := &copTask{ tablePlan: ts, @@ -1053,6 +1057,10 @@ func (p *LogicalJoin) constructInnerIndexScanTask( Desc: desc, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, + tblColHists: ds.TblColHists, + pkIsHandleCol: ds.getPKIsHandleCol(), + + underInnerIndexJoin: true, }.Init(ds.ctx, ds.blockOffset) cop := &copTask{ indexPlan: is, @@ -1074,6 +1082,8 @@ func (p *LogicalJoin) constructInnerIndexScanTask( TableAsName: ds.TableAsName, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, }.Init(ds.ctx, ds.blockOffset) ts.schema = is.dataSourceSchema.Clone() if ds.tableInfo.IsCommonHandle { @@ -1147,7 +1157,7 @@ func (p *LogicalJoin) constructInnerIndexScanTask( tmpPath.CountAfterAccess = cnt } is.stats = ds.tableStats.ScaleByExpectCnt(tmpPath.CountAfterAccess) - rowSize := is.indexScanRowSize(path.Index, ds, true) + rowSize := is.getScanRowSize() sessVars := ds.ctx.GetSessionVars() cop.cst = tmpPath.CountAfterAccess * rowSize * sessVars.GetScanFactor(ds.tableInfo) finalStats := ds.tableStats.ScaleByExpectCnt(rowCount) diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 813765e86d723..4eb777b61b522 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -1034,9 +1034,8 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, path *util.AccessPath) ( indexPlan PhysicalPlan, partialCost float64) { - idx := path.Index is, partialCost, rowCount := ds.getOriginalPhysicalIndexScan(prop, path, false, false) - rowSize := is.indexScanRowSize(idx, ds, false) + rowSize := is.stats.HistColl.GetAvgRowSize(is.ctx, is.schema.Columns, true, false) // TODO: Consider using isCoveringIndex() to avoid another TableRead indexConds := path.IndexFilters sessVars := ds.ctx.GetSessionVars() @@ -1151,6 +1150,8 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, HandleCols: ds.handleCols, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, }.Init(ds.ctx, ds.blockOffset) ts.SetSchema(ds.schema.Clone()) err := setIndexMergeTableScanHandleCols(ds, ts) @@ -1164,7 +1165,7 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, } } } - rowSize := ds.TblColHists.GetTableAvgRowSize(ds.ctx, ds.TblCols, ts.StoreType, true) + rowSize := ts.getScanRowSize() partialCost += totalRowCount * rowSize * sessVars.GetScanFactor(ds.tableInfo) ts.stats = ds.tableStats.ScaleByExpectCnt(totalRowCount) if ds.statisticTable.Pseudo { @@ -1307,6 +1308,8 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, TableAsName: ds.TableAsName, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, }.Init(ds.ctx, is.blockOffset) ts.SetSchema(ds.schema.Clone()) ts.SetCost(cost) @@ -1358,22 +1361,20 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, return task, nil } -func (is *PhysicalIndexScan) indexScanRowSize(idx *model.IndexInfo, ds *DataSource, isForScan bool) float64 { +func (is *PhysicalIndexScan) getScanRowSize() float64 { + idx := is.Index scanCols := make([]*expression.Column, 0, len(idx.Columns)+1) // If `initSchema` has already appended the handle column in schema, just use schema columns, otherwise, add extra handle column. if len(idx.Columns) == len(is.schema.Columns) { scanCols = append(scanCols, is.schema.Columns...) - handleCol := ds.getPKIsHandleCol() + handleCol := is.pkIsHandleCol if handleCol != nil { scanCols = append(scanCols, handleCol) } } else { scanCols = is.schema.Columns } - if isForScan { - return ds.TblColHists.GetIndexAvgRowSize(is.ctx, scanCols, is.Index.Unique) - } - return ds.TblColHists.GetAvgRowSize(is.ctx, scanCols, true, false) + return is.tblColHists.GetIndexAvgRowSize(is.ctx, scanCols, is.Index.Unique) } // initSchema is used to set the schema of PhysicalIndexScan. Before calling this, @@ -2085,6 +2086,15 @@ func (ts *PhysicalTableScan) addPushedDownSelection(copTask *copTask, stats *pro } } +func (ts *PhysicalTableScan) getScanRowSize() float64 { + if ts.StoreType == kv.TiKV { + return ts.tblColHists.GetTableAvgRowSize(ts.ctx, ts.tblCols, ts.StoreType, true) + } + // If `ts.handleCol` is nil, then the schema of tableScan doesn't have handle column. + // This logic can be ensured in column pruning. + return ts.tblColHists.GetTableAvgRowSize(ts.ctx, ts.Schema().Columns, ts.StoreType, ts.HandleCols != nil) +} + func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool) (*PhysicalTableScan, float64, float64) { ts := PhysicalTableScan{ Table: ds.tableInfo, @@ -2096,6 +2106,9 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper Ranges: path.Ranges, AccessCondition: path.AccessConds, StoreType: path.StoreType, + HandleCols: ds.handleCols, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, }.Init(ds.ctx, ds.blockOffset) ts.filterCondition = make([]expression.Expression, len(path.TableFilters)) copy(ts.filterCondition, path.TableFilters) @@ -2135,14 +2148,7 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper // we still need to assume values are uniformly distributed. For simplicity, we use uniform-assumption // for all columns now, as we do in `deriveStatsByFilter`. ts.stats = ds.tableStats.ScaleByExpectCnt(rowCount) - var rowSize float64 - if ts.StoreType == kv.TiKV { - rowSize = ds.TblColHists.GetTableAvgRowSize(ds.ctx, ds.TblCols, ts.StoreType, true) - } else { - // If `ds.handleCol` is nil, then the schema of tableScan doesn't have handle column. - // This logic can be ensured in column pruning. - rowSize = ds.TblColHists.GetTableAvgRowSize(ds.ctx, ts.Schema().Columns, ts.StoreType, ds.handleCols != nil) - } + rowSize := ts.getScanRowSize() sessVars := ds.ctx.GetSessionVars() cost := rowCount * rowSize * sessVars.GetScanFactor(ds.tableInfo) if isMatchProp { @@ -2152,12 +2158,6 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper } ts.KeepOrder = true } - switch ts.StoreType { - case kv.TiKV: - cost += float64(len(ts.Ranges)) * sessVars.GetSeekFactor(ds.tableInfo) - case kv.TiFlash: - cost += float64(len(ts.Ranges)) * float64(len(ts.Columns)) * sessVars.GetSeekFactor(ds.tableInfo) - } return ts, cost, rowCount } @@ -2176,6 +2176,8 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper dataSourceSchema: ds.schema, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, + tblColHists: ds.TblColHists, + pkIsHandleCol: ds.getPKIsHandleCol(), }.Init(ds.ctx, ds.blockOffset) statsTbl := ds.statisticTable if statsTbl.Indices[idx.ID] != nil { @@ -2194,7 +2196,7 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper } } is.stats = ds.tableStats.ScaleByExpectCnt(rowCount) - rowSize := is.indexScanRowSize(idx, ds, true) + rowSize := is.getScanRowSize() sessVars := ds.ctx.GetSessionVars() cost := rowCount * rowSize * sessVars.GetScanFactor(ds.tableInfo) if isMatchProp { @@ -2204,7 +2206,6 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper } is.KeepOrder = true } - cost += float64(len(is.Ranges)) * sessVars.GetSeekFactor(ds.tableInfo) is.cost = cost return is, cost, rowCount } diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 7e1d42ed78ea0..19e6ac3d7eb00 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -5270,6 +5270,42 @@ func TestIssue29503(t *testing.T) { require.Len(t, res.Rows(), 2) } +func TestIndexJoinCost(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`drop table if exists t_outer, t_inner_pk, t_inner_idx`) + tk.MustExec(`create table t_outer (a int)`) + tk.MustExec(`create table t_inner_pk (a int primary key)`) + tk.MustExec(`create table t_inner_idx (a int, b int, key(a))`) + + tk.MustQuery(`explain format=verbose select /*+ TIDB_INLJ(t_outer, t_inner_pk) */ * from t_outer, t_inner_pk where t_outer.a=t_inner_pk.a`).Check(testkit.Rows( // IndexJoin with inner TableScan + `IndexJoin_11 12487.50 193048.09 root inner join, inner:TableReader_8, outer key:test.t_outer.a, inner key:test.t_inner_pk.a, equal cond:eq(test.t_outer.a, test.t_inner_pk.a)`, + `├─TableReader_18(Build) 9990.00 36412.58 root data:Selection_17`, + `│ └─Selection_17 9990.00 465000.00 cop[tikv] not(isnull(test.t_outer.a))`, + `│ └─TableFullScan_16 10000.00 435000.00 cop[tikv] table:t_outer keep order:false, stats:pseudo`, + `└─TableReader_8(Probe) 1.00 2.54 root data:TableRangeScan_7`, + ` └─TableRangeScan_7 1.00 0.00 cop[tikv] table:t_inner_pk range: decided by [test.t_outer.a], keep order:false, stats:pseudo`)) + tk.MustQuery(`explain format=verbose select /*+ TIDB_INLJ(t_outer, t_inner_idx) */ t_inner_idx.a from t_outer, t_inner_idx where t_outer.a=t_inner_idx.a`).Check(testkit.Rows( // IndexJoin with inner IndexScan + `IndexJoin_10 12487.50 221872.19 root inner join, inner:IndexReader_9, outer key:test.t_outer.a, inner key:test.t_inner_idx.a, equal cond:eq(test.t_outer.a, test.t_inner_idx.a)`, + `├─TableReader_20(Build) 9990.00 36412.58 root data:Selection_19`, + `│ └─Selection_19 9990.00 465000.00 cop[tikv] not(isnull(test.t_outer.a))`, + `│ └─TableFullScan_18 10000.00 435000.00 cop[tikv] table:t_outer keep order:false, stats:pseudo`, + `└─IndexReader_9(Probe) 1.25 4.56 root index:Selection_8`, + ` └─Selection_8 1.25 0.00 cop[tikv] not(isnull(test.t_inner_idx.a))`, + ` └─IndexRangeScan_7 1.25 0.00 cop[tikv] table:t_inner_idx, index:a(a) range: decided by [eq(test.t_inner_idx.a, test.t_outer.a)], keep order:false, stats:pseudo`)) + tk.MustQuery(`explain format=verbose select /*+ TIDB_INLJ(t_outer, t_inner_idx) */ * from t_outer, t_inner_idx where t_outer.a=t_inner_idx.a`).Check(testkit.Rows( // IndexJoin with inner IndexLookup + `IndexJoin_11 12487.50 518149.38 root inner join, inner:IndexLookUp_10, outer key:test.t_outer.a, inner key:test.t_inner_idx.a, equal cond:eq(test.t_outer.a, test.t_inner_idx.a)`, + `├─TableReader_23(Build) 9990.00 36412.58 root data:Selection_22`, + `│ └─Selection_22 9990.00 465000.00 cop[tikv] not(isnull(test.t_outer.a))`, + `│ └─TableFullScan_21 10000.00 435000.00 cop[tikv] table:t_outer keep order:false, stats:pseudo`, + `└─IndexLookUp_10(Probe) 1.25 34.21 root `, + ` ├─Selection_9(Build) 1.25 0.00 cop[tikv] not(isnull(test.t_inner_idx.a))`, + ` │ └─IndexRangeScan_7 1.25 0.00 cop[tikv] table:t_inner_idx, index:a(a) range: decided by [eq(test.t_inner_idx.a, test.t_outer.a)], keep order:false, stats:pseudo`, + ` └─TableRowIDScan_8(Probe) 1.25 0.00 cop[tikv] table:t_inner_idx keep order:false, stats:pseudo`)) +} + func TestHeuristicIndexSelection(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 1ec5c1fe2cafd..b0aff71a5c647 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -437,6 +437,13 @@ type PhysicalIndexScan struct { DoubleRead bool NeedCommonHandle bool + + // required by cost model + // IndexScan operators under inner side of IndexJoin no need to consider net seek cost + underInnerIndexJoin bool + // tblColHists contains all columns before pruning, which are used to calculate row-size + tblColHists *statistics.HistColl + pkIsHandleCol *expression.Column } // Clone implements PhysicalPlan interface. @@ -533,6 +540,13 @@ type PhysicalTableScan struct { PartitionInfo PartitionInfo SampleInfo *TableSampleInfo + + // required by cost model + // TableScan operators under inner side of IndexJoin no need to consider net seek cost + underInnerIndexJoin bool + // tblCols and tblColHists contains all columns before pruning, which are used to calculate row-size + tblCols []*expression.Column + tblColHists *statistics.HistColl } // Clone implements PhysicalPlan interface. diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 9d965aedb9310..d65007513a891 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1457,6 +1457,7 @@ func (b *PlanBuilder) buildPhysicalIndexLookUpReader(ctx context.Context, dbName Ranges: ranger.FullRange(), physicalTableID: physicalID, isPartition: isPartition, + tblColHists: &(statistics.PseudoTable(tblInfo)).HistColl, }.Init(b.ctx, b.getSelectOffset()) // There is no alternative plan choices, so just use pseudo stats to avoid panic. is.stats = &property.StatsInfo{HistColl: &(statistics.PseudoTable(tblInfo)).HistColl} @@ -1474,6 +1475,7 @@ func (b *PlanBuilder) buildPhysicalIndexLookUpReader(ctx context.Context, dbName TableAsName: &tblInfo.Name, physicalTableID: physicalID, isPartition: isPartition, + tblColHists: &(statistics.PseudoTable(tblInfo)).HistColl, }.Init(b.ctx, b.getSelectOffset()) ts.SetSchema(idxColSchema) ts.Columns = ExpandVirtualColumn(ts.Columns, ts.schema, ts.Table.Columns) diff --git a/planner/core/task.go b/planner/core/task.go index f21526d6cd940..6bf9f6ac729e8 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -172,16 +172,25 @@ func (t *copTask) finishIndexPlan() { } // Network cost of transferring rows of index scan to TiDB. t.cst += cnt * sessVars.GetNetworkFactor(tableInfo) * t.tblColHists.GetAvgRowSize(t.indexPlan.SCtx(), t.indexPlan.Schema().Columns, true, false) + + // net seek cost + var p PhysicalPlan + for p = t.indexPlan; len(p.Children()) > 0; p = p.Children()[0] { + } + is := p.(*PhysicalIndexScan) + if !is.underInnerIndexJoin { // no need to accumulate seek cost for IndexJoin + t.cst += float64(len(is.Ranges)) * sessVars.GetSeekFactor(is.Table) // net seek cost + } + if t.tablePlan == nil { return } // Calculate the IO cost of table scan here because we cannot know its stats until we finish index plan. - var p PhysicalPlan - for p = t.indexPlan; len(p.Children()) > 0; p = p.Children()[0] { + for p = t.tablePlan; len(p.Children()) > 0; p = p.Children()[0] { } - rowSize := t.tblColHists.GetIndexAvgRowSize(t.indexPlan.SCtx(), t.tblCols, p.(*PhysicalIndexScan).Index.Unique) - t.cst += cnt * rowSize * sessVars.GetScanFactor(tableInfo) + ts := p.(*PhysicalTableScan) + t.cst += cnt * ts.getScanRowSize() * sessVars.GetScanFactor(tableInfo) } func (t *copTask) getStoreType() kv.StoreType { @@ -1053,6 +1062,7 @@ func (t *copTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { t.finishIndexPlan() // Network cost of transferring rows of table scan to TiDB. if t.tablePlan != nil { + // net I/O cost t.cst += t.count() * sessVars.GetNetworkFactor(nil) * t.tblColHists.GetAvgRowSize(ctx, t.tablePlan.Schema().Columns, false, false) tp := t.tablePlan @@ -1065,6 +1075,17 @@ func (t *copTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { } } ts := tp.(*PhysicalTableScan) + + // net seek cost + if !ts.underInnerIndexJoin { // no need to accumulate net seek cost for IndexJoin + switch ts.StoreType { + case kv.TiKV: + t.cst += float64(len(ts.Ranges)) * sessVars.GetSeekFactor(ts.Table) + case kv.TiFlash: + t.cst += float64(len(ts.Ranges)) * float64(len(ts.Columns)) * sessVars.GetSeekFactor(ts.Table) + } + } + prevColumnLen := len(ts.Columns) prevSchema := ts.schema.Clone() ts.Columns = ExpandVirtualColumn(ts.Columns, ts.schema, ts.Table.Columns) @@ -2386,6 +2407,16 @@ func collectRowSizeFromMPPPlan(mppPlan PhysicalPlan) (rowSize float64) { return 1 // use 1 as lower-bound for safety } +func accumulateNetSeekCost4MPP(p PhysicalPlan) (cost float64) { + if ts, ok := p.(*PhysicalTableScan); ok { + return float64(len(ts.Ranges)) * float64(len(ts.Columns)) * ts.SCtx().GetSessionVars().GetSeekFactor(ts.Table) + } + for _, c := range p.Children() { + cost += accumulateNetSeekCost4MPP(c) + } + return +} + func (t *mppTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { sender := PhysicalExchangeSender{ ExchangeType: tipb.ExchangeType_PassThrough, @@ -2401,7 +2432,9 @@ func (t *mppTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { collectPartitionInfosFromMPPPlan(p, t.p) rowSize := collectRowSizeFromMPPPlan(sender) - cst := t.cst + t.count()*rowSize*ctx.GetSessionVars().GetNetworkFactor(nil) + cst := t.cst + t.count()*rowSize*ctx.GetSessionVars().GetNetworkFactor(nil) // net I/O cost + // net seek cost, unlike copTask, a mppTask may have multiple underlying TableScan, so use a recursive function to accumulate this + cst += accumulateNetSeekCost4MPP(sender) cst /= p.ctx.GetSessionVars().CopTiFlashConcurrencyFactor p.cost = cst if p.ctx.GetSessionVars().IsMPPEnforced() { diff --git a/planner/core/testdata/enforce_mpp_suite_out.json b/planner/core/testdata/enforce_mpp_suite_out.json index a7d8197164e35..4fc4915ad80d0 100644 --- a/planner/core/testdata/enforce_mpp_suite_out.json +++ b/planner/core/testdata/enforce_mpp_suite_out.json @@ -33,8 +33,8 @@ "Plan": [ "StreamAgg_24 1.00 35.88 root funcs:count(Column#6)->Column#4", "└─IndexReader_25 1.00 32.88 root index:StreamAgg_9", - " └─StreamAgg_9 1.00 485.00 cop[tikv] funcs:count(1)->Column#6", - " └─IndexRangeScan_23 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + " └─StreamAgg_9 1.00 465.00 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_23 10.00 435.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" ], "Warn": null }, @@ -43,8 +43,8 @@ "Plan": [ "StreamAgg_17 1.00 35.88 root funcs:count(Column#6)->Column#4", "└─IndexReader_18 1.00 32.88 root index:StreamAgg_9", - " └─StreamAgg_9 1.00 485.00 cop[tikv] funcs:count(1)->Column#6", - " └─IndexRangeScan_16 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + " └─StreamAgg_9 1.00 465.00 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_16 10.00 435.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" ], "Warn": null }, @@ -53,9 +53,9 @@ "Plan": [ "StreamAgg_20 1.00 19006.88 root funcs:count(Column#6)->Column#4", "└─TableReader_21 1.00 19003.88 root data:StreamAgg_9", - " └─StreamAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#6", - " └─Selection_19 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_18 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─StreamAgg_9 1.00 285030.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_19 10.00 285000.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_18 10000.00 255000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null }, @@ -74,8 +74,8 @@ "Plan": [ "StreamAgg_30 1.00 35.88 root funcs:count(Column#7)->Column#4", "└─IndexReader_31 1.00 32.88 root index:StreamAgg_10", - " └─StreamAgg_10 1.00 485.00 cop[tikv] funcs:count(1)->Column#7", - " └─IndexRangeScan_29 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + " └─StreamAgg_10 1.00 465.00 cop[tikv] funcs:count(1)->Column#7", + " └─IndexRangeScan_29 10.00 435.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" ], "Warn": null }, @@ -84,8 +84,8 @@ "Plan": [ "StreamAgg_18 1.00 35.88 root funcs:count(Column#6)->Column#4", "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", - " └─StreamAgg_10 1.00 485.00 cop[tikv] funcs:count(1)->Column#6", - " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + " └─StreamAgg_10 1.00 465.00 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_17 10.00 435.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" ], "Warn": null }, @@ -94,10 +94,10 @@ "Plan": [ "HashAgg_21 1.00 11910.73 root funcs:count(Column#6)->Column#4", "└─TableReader_23 1.00 11877.13 root data:ExchangeSender_22", - " └─ExchangeSender_22 1.00 285050.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg_9 1.00 285050.00 mpp[tiflash] funcs:count(1)->Column#6", - " └─Selection_20 10.00 285020.00 mpp[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_19 10000.00 255020.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + " └─ExchangeSender_22 1.00 285030.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285030.00 mpp[tiflash] funcs:count(1)->Column#6", + " └─Selection_20 10.00 285000.00 mpp[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_19 10000.00 255000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null }, @@ -111,10 +111,10 @@ "Plan": [ "HashAgg_24 1.00 33.89 root funcs:count(Column#6)->Column#4", "└─TableReader_26 1.00 0.29 root data:ExchangeSender_25", - " └─ExchangeSender_25 1.00 285050.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg_9 1.00 285050.00 mpp[tiflash] funcs:count(1)->Column#6", - " └─Selection_23 10.00 285020.00 mpp[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_22 10000.00 255020.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + " └─ExchangeSender_25 1.00 285030.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285030.00 mpp[tiflash] funcs:count(1)->Column#6", + " └─Selection_23 10.00 285000.00 mpp[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_22 10000.00 255000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null }, @@ -123,8 +123,8 @@ "Plan": [ "StreamAgg_18 1.00 35.88 root funcs:count(Column#6)->Column#4", "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", - " └─StreamAgg_10 1.00 485.00 cop[tikv] funcs:count(1)->Column#6", - " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + " └─StreamAgg_10 1.00 465.00 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_17 10.00 435.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" ], "Warn": null }, @@ -133,10 +133,10 @@ "Plan": [ "HashAgg_21 1.00 33.89 root funcs:count(Column#6)->Column#4", "└─TableReader_23 1.00 0.29 root data:ExchangeSender_22", - " └─ExchangeSender_22 1.00 285050.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg_9 1.00 285050.00 mpp[tiflash] funcs:count(1)->Column#6", - " └─Selection_20 10.00 285020.00 mpp[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_19 10000.00 255020.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + " └─ExchangeSender_22 1.00 285030.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285030.00 mpp[tiflash] funcs:count(1)->Column#6", + " └─Selection_20 10.00 285000.00 mpp[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_19 10000.00 255000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null }, @@ -150,10 +150,10 @@ "Plan": [ "HashAgg_25 1.00 33.60 root funcs:count(Column#6)->Column#4", "└─TableReader_27 1.00 0.29 root data:ExchangeSender_26", - " └─ExchangeSender_26 1.00 285050.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg_10 1.00 285050.00 mpp[tiflash] funcs:count(1)->Column#6", - " └─Selection_24 10.00 285020.00 mpp[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_23 10000.00 255020.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + " └─ExchangeSender_26 1.00 285030.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg_10 1.00 285030.00 mpp[tiflash] funcs:count(1)->Column#6", + " └─Selection_24 10.00 285000.00 mpp[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_23 10000.00 255000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null }, @@ -162,8 +162,8 @@ "Plan": [ "StreamAgg_19 1.00 35.88 root funcs:count(Column#6)->Column#4", "└─IndexReader_20 1.00 32.88 root index:StreamAgg_11", - " └─StreamAgg_11 1.00 485.00 cop[tikv] funcs:count(1)->Column#6", - " └─IndexRangeScan_18 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + " └─StreamAgg_11 1.00 465.00 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_18 10.00 435.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" ], "Warn": [ "MPP mode may be blocked because you have set a hint to read table `t` from TiKV." @@ -174,10 +174,10 @@ "Plan": [ "HashAgg_22 1.00 33.60 root funcs:count(Column#6)->Column#4", "└─TableReader_24 1.00 0.29 root data:ExchangeSender_23", - " └─ExchangeSender_23 1.00 285050.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg_10 1.00 285050.00 mpp[tiflash] funcs:count(1)->Column#6", - " └─Selection_21 10.00 285020.00 mpp[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_20 10000.00 255020.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + " └─ExchangeSender_23 1.00 285030.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg_10 1.00 285030.00 mpp[tiflash] funcs:count(1)->Column#6", + " └─Selection_21 10.00 285000.00 mpp[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_20 10000.00 255000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null } diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json index a4ea379454661..87da8449649af 100644 --- a/planner/core/testdata/integration_suite_out.json +++ b/planner/core/testdata/integration_suite_out.json @@ -1714,7 +1714,7 @@ "SQL": "select f, g from t1 where f = 2 and g > 3", "Plan": [ "IndexReader_6 33.33 160.78 root index:IndexRangeScan_5", - "└─IndexRangeScan_5 33.33 1870.00 cop[tikv] table:t1, index:f_g(f, g) range:(2 3,2 +inf], keep order:false, stats:pseudo" + "└─IndexRangeScan_5 33.33 1850.00 cop[tikv] table:t1, index:f_g(f, g) range:(2 3,2 +inf], keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 unique index f_g of t1 is selected since the path only fetches limited number of rows with single scan" @@ -1816,10 +1816,10 @@ { "SQL": "select * from t where a > 1 order by f", "Plan": [ - "IndexLookUp_14 3333.33 139413.67 root ", + "IndexLookUp_14 3333.33 136747.00 root ", "├─Selection_13(Build) 3333.33 0.00 cop[tikv] gt(test.t.a, 1)", - "│ └─IndexFullScan_11 10000.00 555020.00 cop[tikv] table:t, index:f(f) keep order:true, stats:pseudo", - "└─TableRowIDScan_12(Probe) 3333.33 555020.00 cop[tikv] table:t keep order:false, stats:pseudo" + "│ └─IndexFullScan_11 10000.00 555000.00 cop[tikv] table:t, index:f(f) keep order:true, stats:pseudo", + "└─TableRowIDScan_12(Probe) 3333.33 555000.00 cop[tikv] table:t keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 [t,f,f_g] remain after pruning paths for t given Prop{SortItems: [{test.t.f asc}], TaskTp: rootTask}" @@ -1828,9 +1828,9 @@ { "SQL": "select * from t where f > 1", "Plan": [ - "TableReader_7 3333.33 88640.22 root data:Selection_6", - "└─Selection_6 3333.33 1140020.00 cop[tikv] gt(test.t.f, 1)", - " └─TableFullScan_5 10000.00 1110020.00 cop[tikv] table:t keep order:false, stats:pseudo" + "IndexLookUp_10 3333.33 86674.83 root ", + "├─IndexRangeScan_8(Build) 3333.33 185000.00 cop[tikv] table:t, index:f(f) range:(1,+inf], keep order:false, stats:pseudo", + "└─TableRowIDScan_9(Probe) 3333.33 185000.00 cop[tikv] table:t keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 [t,f,f_g] remain after pruning paths for t given Prop{SortItems: [], TaskTp: rootTask}" @@ -1840,7 +1840,7 @@ "SQL": "select f from t where f > 1", "Plan": [ "IndexReader_6 3333.33 11140.22 root index:IndexRangeScan_5", - "└─IndexRangeScan_5 3333.33 140020.00 cop[tikv] table:t, index:f(f) range:(1,+inf], keep order:false, stats:pseudo" + "└─IndexRangeScan_5 3333.33 140000.00 cop[tikv] table:t, index:f(f) range:(1,+inf], keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 [f,f_g] remain after pruning paths for t given Prop{SortItems: [], TaskTp: rootTask}" @@ -1849,10 +1849,10 @@ { "SQL": "select * from t where f > 3 and g = 5", "Plan": [ - "IndexLookUp_15 3.33 215.74 root ", - "├─IndexRangeScan_12(Build) 10.00 590.00 cop[tikv] table:t, index:g(g) range:[5,5], keep order:false, stats:pseudo", + "IndexLookUp_15 3.33 206.74 root ", + "├─IndexRangeScan_12(Build) 10.00 570.00 cop[tikv] table:t, index:g(g) range:[5,5], keep order:false, stats:pseudo", "└─Selection_14(Probe) 3.33 0.00 cop[tikv] gt(test.t.f, 3)", - " └─TableRowIDScan_13 10.00 590.00 cop[tikv] table:t keep order:false, stats:pseudo" + " └─TableRowIDScan_13 10.00 570.00 cop[tikv] table:t keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 [t,f_g,g] remain after pruning paths for t given Prop{SortItems: [], TaskTp: rootTask}" @@ -1861,10 +1861,10 @@ { "SQL": "select * from t where g = 5 order by f", "Plan": [ - "Sort_5 10.00 362.68 root test.t.f", - "└─IndexLookUp_13 10.00 239.01 root ", - " ├─IndexRangeScan_11(Build) 10.00 590.00 cop[tikv] table:t, index:g(g) range:[5,5], keep order:false, stats:pseudo", - " └─TableRowIDScan_12(Probe) 10.00 590.00 cop[tikv] table:t keep order:false, stats:pseudo" + "Sort_5 10.00 353.68 root test.t.f", + "└─IndexLookUp_13 10.00 230.01 root ", + " ├─IndexRangeScan_11(Build) 10.00 570.00 cop[tikv] table:t, index:g(g) range:[5,5], keep order:false, stats:pseudo", + " └─TableRowIDScan_12(Probe) 10.00 570.00 cop[tikv] table:t keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 [t,g] remain after pruning paths for t given Prop{SortItems: [], TaskTp: rootTask}" @@ -1873,10 +1873,10 @@ { "SQL": "select * from t where d = 3 order by c, e", "Plan": [ - "IndexLookUp_15 10.00 57230.78 root ", + "IndexLookUp_15 10.00 57222.78 root ", "├─Selection_14(Build) 10.00 0.00 cop[tikv] eq(test.t.d, 3)", - "│ └─IndexFullScan_12 10000.00 825020.00 cop[tikv] table:t, index:c_d_e(c, d, e) keep order:true, stats:pseudo", - "└─TableRowIDScan_13(Probe) 10.00 825020.00 cop[tikv] table:t keep order:false, stats:pseudo" + "│ └─IndexFullScan_12 10000.00 825000.00 cop[tikv] table:t, index:c_d_e(c, d, e) keep order:true, stats:pseudo", + "└─TableRowIDScan_13(Probe) 10.00 825000.00 cop[tikv] table:t keep order:false, stats:pseudo" ], "Warnings": [ "Note 1105 [t,c_d_e] remain after pruning paths for t given Prop{SortItems: [{test.t.c asc} {test.t.e asc}], TaskTp: rootTask}" @@ -1896,8 +1896,8 @@ "SQL": "explain format = 'verbose' select * from t where b > 5", "Plan": [ "TableReader_7 3.00 19.21 root data:Selection_6", - "└─Selection_6 3.00 215.00 cop[tikv] gt(test.t.b, 5)", - " └─TableFullScan_5 5.00 200.00 cop[tikv] table:t keep order:false" + "└─Selection_6 3.00 195.00 cop[tikv] gt(test.t.b, 5)", + " └─TableFullScan_5 5.00 180.00 cop[tikv] table:t keep order:false" ], "Warnings": null }, @@ -1906,9 +1906,9 @@ "Plan": [ "Limit_11 0.00 14.33 root offset:0, count:1", "└─TableReader_24 0.00 14.33 root data:Limit_23", - " └─Limit_23 0.00 215.00 cop[tikv] offset:0, count:1", - " └─Selection_22 0.00 215.00 cop[tikv] eq(test.t.b, 6)", - " └─TableFullScan_21 5.00 200.00 cop[tikv] table:t keep order:true" + " └─Limit_23 0.00 195.00 cop[tikv] offset:0, count:1", + " └─Selection_22 0.00 195.00 cop[tikv] eq(test.t.b, 6)", + " └─TableFullScan_21 5.00 180.00 cop[tikv] table:t keep order:true" ], "Warnings": null }, @@ -1917,9 +1917,9 @@ "Plan": [ "Limit_8 0.00 14.33 root offset:0, count:1", "└─TableReader_13 0.00 14.33 root data:Limit_12", - " └─Limit_12 0.00 215.00 cop[tikv] offset:0, count:1", - " └─Selection_11 0.00 215.00 cop[tikv] eq(test.t.b, 6)", - " └─TableFullScan_10 5.00 200.00 cop[tikv] table:t keep order:false" + " └─Limit_12 0.00 195.00 cop[tikv] offset:0, count:1", + " └─Selection_11 0.00 195.00 cop[tikv] eq(test.t.b, 6)", + " └─TableFullScan_10 5.00 180.00 cop[tikv] table:t keep order:false" ], "Warnings": null }, @@ -1931,9 +1931,9 @@ { "SQL": "explain format = 'verbose' select * from t where b > 5", "Plan": [ - "IndexLookUp_7 3.00 64.81 root ", - "├─IndexRangeScan_5(Build) 3.00 191.00 cop[tikv] table:t, index:idx_b(b) range:(5,+inf], keep order:false", - "└─TableRowIDScan_6(Probe) 3.00 191.00 cop[tikv] table:t keep order:false" + "IndexLookUp_7 3.00 57.91 root ", + "├─IndexRangeScan_5(Build) 3.00 171.00 cop[tikv] table:t, index:idx_b(b) range:(5,+inf], keep order:false", + "└─TableRowIDScan_6(Probe) 3.00 171.00 cop[tikv] table:t keep order:false" ], "Warnings": [ "Note 1105 [idx_b] remain after pruning paths for t given Prop{SortItems: [], TaskTp: rootTask}" @@ -1945,8 +1945,8 @@ "TopN_9 0.00 19.34 root test.t.a, offset:0, count:1", "└─IndexLookUp_16 0.00 19.33 root ", " ├─TopN_15(Build) 0.00 0.00 cop[tikv] test.t.a, offset:0, count:1", - " │ └─IndexRangeScan_13 0.00 20.00 cop[tikv] table:t, index:idx_b(b) range:[6,6], keep order:false", - " └─TableRowIDScan_14(Probe) 0.00 20.00 cop[tikv] table:t keep order:false" + " │ └─IndexRangeScan_13 0.00 0.00 cop[tikv] table:t, index:idx_b(b) range:[6,6], keep order:false", + " └─TableRowIDScan_14(Probe) 0.00 0.00 cop[tikv] table:t keep order:false" ], "Warnings": [ "Note 1105 [idx_b] remain after pruning paths for t given Prop{SortItems: [], TaskTp: copDoubleReadTask}" @@ -1956,9 +1956,9 @@ "SQL": "explain format = 'verbose' select * from t where b = 6 limit 1", "Plan": [ "IndexLookUp_13 0.00 19.33 root limit embedded(offset:0, count:1)", - "├─Limit_12(Build) 0.00 20.00 cop[tikv] offset:0, count:1", - "│ └─IndexRangeScan_10 0.00 20.00 cop[tikv] table:t, index:idx_b(b) range:[6,6], keep order:false", - "└─TableRowIDScan_11(Probe) 0.00 20.00 cop[tikv] table:t keep order:false" + "├─Limit_12(Build) 0.00 0.00 cop[tikv] offset:0, count:1", + "│ └─IndexRangeScan_10 0.00 0.00 cop[tikv] table:t, index:idx_b(b) range:[6,6], keep order:false", + "└─TableRowIDScan_11(Probe) 0.00 0.00 cop[tikv] table:t keep order:false" ], "Warnings": [ "Note 1105 [idx_b] remain after pruning paths for t given Prop{SortItems: [], TaskTp: copDoubleReadTask}" @@ -2340,8 +2340,8 @@ "Plan": [ "StreamAgg_20 1.00 12.68 root funcs:count(Column#9)->Column#4", "└─TableReader_21 1.00 9.68 root data:StreamAgg_8", - " └─StreamAgg_8 1.00 137.00 cop[tikv] funcs:count(1)->Column#9", - " └─TableFullScan_18 3.00 128.00 cop[tikv] table:t3 keep order:false" + " └─StreamAgg_8 1.00 117.00 cop[tikv] funcs:count(1)->Column#9", + " └─TableFullScan_18 3.00 108.00 cop[tikv] table:t3 keep order:false" ] }, { @@ -2349,8 +2349,8 @@ "Plan": [ "StreamAgg_25 1.00 8.18 root funcs:count(Column#7)->Column#4", "└─TableReader_26 1.00 5.17 root data:StreamAgg_9", - " └─StreamAgg_9 1.00 69.50 batchCop[tiflash] funcs:count(1)->Column#7", - " └─TableFullScan_24 3.00 60.50 batchCop[tiflash] table:t2 keep order:false" + " └─StreamAgg_9 1.00 49.50 batchCop[tiflash] funcs:count(1)->Column#7", + " └─TableFullScan_24 3.00 40.50 batchCop[tiflash] table:t2 keep order:false" ] }, { @@ -2358,7 +2358,7 @@ "Plan": [ "Sort_4 3.00 45.85 root test.t3.a", "└─TableReader_8 3.00 11.78 root data:TableFullScan_7", - " └─TableFullScan_7 3.00 128.00 cop[tikv] table:t3 keep order:false" + " └─TableFullScan_7 3.00 108.00 cop[tikv] table:t3 keep order:false" ] }, { @@ -2366,7 +2366,7 @@ "Plan": [ "Sort_4 3.00 45.85 root test.t3.b", "└─TableReader_8 3.00 11.78 root data:TableFullScan_7", - " └─TableFullScan_7 3.00 128.00 cop[tikv] table:t3 keep order:false" + " └─TableFullScan_7 3.00 108.00 cop[tikv] table:t3 keep order:false" ] }, { @@ -2375,7 +2375,7 @@ "TopN_7 1.00 13.22 root test.t3.a, offset:0, count:1", "└─TableReader_16 1.00 10.22 root data:TopN_15", " └─TopN_15 1.00 0.00 cop[tikv] test.t3.a, offset:0, count:1", - " └─TableFullScan_14 3.00 128.00 cop[tikv] table:t3 keep order:false" + " └─TableFullScan_14 3.00 108.00 cop[tikv] table:t3 keep order:false" ] }, { @@ -2384,19 +2384,19 @@ "TopN_7 1.00 13.22 root test.t3.b, offset:0, count:1", "└─TableReader_16 1.00 10.22 root data:TopN_15", " └─TopN_15 1.00 0.00 cop[tikv] test.t3.b, offset:0, count:1", - " └─TableFullScan_14 3.00 128.00 cop[tikv] table:t3 keep order:false" + " └─TableFullScan_14 3.00 108.00 cop[tikv] table:t3 keep order:false" ] }, { "SQL": "explain format = 'verbose' select count(*) from t2 group by a", "Plan": [ "TableReader_24 3.00 3.33 root data:ExchangeSender_23", - "└─ExchangeSender_23 3.00 77.00 mpp[tiflash] ExchangeType: PassThrough", + "└─ExchangeSender_23 3.00 57.00 mpp[tiflash] ExchangeType: PassThrough", " └─Projection_22 3.00 0.00 mpp[tiflash] Column#4", - " └─HashAgg_8 3.00 77.00 mpp[tiflash] group by:test.t2.a, funcs:count(1)->Column#4", - " └─ExchangeReceiver_21 3.00 68.00 mpp[tiflash] ", - " └─ExchangeSender_20 3.00 68.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t2.a, collate: binary]", - " └─TableFullScan_19 3.00 65.00 mpp[tiflash] table:t2 keep order:false" + " └─HashAgg_8 3.00 57.00 mpp[tiflash] group by:test.t2.a, funcs:count(1)->Column#4", + " └─ExchangeReceiver_21 3.00 48.00 mpp[tiflash] ", + " └─ExchangeSender_20 3.00 48.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t2.a, collate: binary]", + " └─TableFullScan_19 3.00 45.00 mpp[tiflash] table:t2 keep order:false" ] }, { @@ -2404,7 +2404,7 @@ "Plan": [ "StreamAgg_10 1.00 1.33 root funcs:count(1)->Column#4", "└─IndexReader_15 0.00 1.33 root index:IndexRangeScan_14", - " └─IndexRangeScan_14 0.00 20.00 cop[tikv] table:t3, index:c(b) range:[0,0], keep order:false" + " └─IndexRangeScan_14 0.00 0.00 cop[tikv] table:t3, index:c(b) range:[0,0], keep order:false" ] }, { @@ -2412,8 +2412,8 @@ "Plan": [ "StreamAgg_10 1.00 19.33 root funcs:count(test.t3.a)->Column#4", "└─IndexLookUp_17 0.00 19.33 root ", - " ├─IndexRangeScan_15(Build) 0.00 20.00 cop[tikv] table:t3, index:c(b) range:[0,0], keep order:false", - " └─TableRowIDScan_16(Probe) 0.00 20.00 cop[tikv] table:t3 keep order:false" + " ├─IndexRangeScan_15(Build) 0.00 0.00 cop[tikv] table:t3, index:c(b) range:[0,0], keep order:false", + " └─TableRowIDScan_16(Probe) 0.00 0.00 cop[tikv] table:t3 keep order:false" ] }, { @@ -2421,8 +2421,8 @@ "Plan": [ "StreamAgg_11 1.00 4.93 root funcs:count(1)->Column#4", "└─TableReader_23 0.00 4.93 root data:Selection_22", - " └─Selection_22 0.00 74.00 cop[tiflash] eq(test.t2.a, 0)", - " └─TableFullScan_21 3.00 65.00 cop[tiflash] table:t2 keep order:false" + " └─Selection_22 0.00 54.00 cop[tiflash] eq(test.t2.a, 0)", + " └─TableFullScan_21 3.00 45.00 cop[tiflash] table:t2 keep order:false" ] }, { @@ -2431,10 +2431,10 @@ "StreamAgg_10 1.00 60.22 root funcs:count(1)->Column#7", "└─HashJoin_40 3.00 51.22 root inner join, equal:[eq(test.t3.a, test.t3.b)]", " ├─IndexReader_28(Build) 3.00 11.66 root index:IndexFullScan_27", - " │ └─IndexFullScan_27 3.00 150.50 cop[tikv] table:t3, index:c(b) keep order:false", + " │ └─IndexFullScan_27 3.00 130.50 cop[tikv] table:t3, index:c(b) keep order:false", " └─TableReader_26(Probe) 3.00 10.76 root data:Selection_25", - " └─Selection_25 3.00 137.00 cop[tikv] not(isnull(test.t3.a))", - " └─TableFullScan_24 3.00 128.00 cop[tikv] table:t keep order:false" + " └─Selection_25 3.00 117.00 cop[tikv] not(isnull(test.t3.a))", + " └─TableFullScan_24 3.00 108.00 cop[tikv] table:t keep order:false" ] }, { @@ -2442,14 +2442,14 @@ "Plan": [ "StreamAgg_14 1.00 18.93 root funcs:count(1)->Column#7", "└─TableReader_46 3.00 9.93 root data:ExchangeSender_45", - " └─ExchangeSender_45 3.00 235.38 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_42 3.00 235.38 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a)]", - " ├─ExchangeReceiver_21(Build) 3.00 77.00 mpp[tiflash] ", - " │ └─ExchangeSender_20 3.00 77.00 mpp[tiflash] ExchangeType: Broadcast", - " │ └─Selection_19 3.00 74.00 mpp[tiflash] not(isnull(test.t1.a))", - " │ └─TableFullScan_18 3.00 65.00 mpp[tiflash] table:t1 keep order:false", - " └─Selection_23(Probe) 3.00 74.00 mpp[tiflash] not(isnull(test.t2.a))", - " └─TableFullScan_22 3.00 65.00 mpp[tiflash] table:t2 keep order:false" + " └─ExchangeSender_45 3.00 195.38 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_42 3.00 195.38 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─ExchangeReceiver_21(Build) 3.00 57.00 mpp[tiflash] ", + " │ └─ExchangeSender_20 3.00 57.00 mpp[tiflash] ExchangeType: Broadcast", + " │ └─Selection_19 3.00 54.00 mpp[tiflash] not(isnull(test.t1.a))", + " │ └─TableFullScan_18 3.00 45.00 mpp[tiflash] table:t1 keep order:false", + " └─Selection_23(Probe) 3.00 54.00 mpp[tiflash] not(isnull(test.t2.a))", + " └─TableFullScan_22 3.00 45.00 mpp[tiflash] table:t2 keep order:false" ] }, { @@ -2458,16 +2458,16 @@ "StreamAgg_15 1.00 60.60 root funcs:count(1)->Column#10", "└─HashJoin_65 3.00 51.60 root inner join, equal:[eq(test.t1.b, test.t3.b)]", " ├─IndexReader_53(Build) 3.00 11.66 root index:IndexFullScan_52", - " │ └─IndexFullScan_52 3.00 150.50 cop[tikv] table:t3, index:c(b) keep order:false", + " │ └─IndexFullScan_52 3.00 130.50 cop[tikv] table:t3, index:c(b) keep order:false", " └─TableReader_39(Probe) 3.00 11.14 root data:ExchangeSender_38", - " └─ExchangeSender_38 3.00 264.38 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_29 3.00 264.38 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a)]", - " ├─ExchangeReceiver_35(Build) 3.00 106.00 mpp[tiflash] ", - " │ └─ExchangeSender_34 3.00 106.00 mpp[tiflash] ExchangeType: Broadcast", - " │ └─Selection_33 3.00 103.00 mpp[tiflash] not(isnull(test.t1.a)), not(isnull(test.t1.b))", - " │ └─TableFullScan_32 3.00 94.00 mpp[tiflash] table:t1 keep order:false", - " └─Selection_37(Probe) 3.00 74.00 mpp[tiflash] not(isnull(test.t2.a))", - " └─TableFullScan_36 3.00 65.00 mpp[tiflash] table:t2 keep order:false" + " └─ExchangeSender_38 3.00 204.38 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_29 3.00 204.38 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─ExchangeReceiver_35(Build) 3.00 66.00 mpp[tiflash] ", + " │ └─ExchangeSender_34 3.00 66.00 mpp[tiflash] ExchangeType: Broadcast", + " │ └─Selection_33 3.00 63.00 mpp[tiflash] not(isnull(test.t1.a)), not(isnull(test.t1.b))", + " │ └─TableFullScan_32 3.00 54.00 mpp[tiflash] table:t1 keep order:false", + " └─Selection_37(Probe) 3.00 54.00 mpp[tiflash] not(isnull(test.t2.a))", + " └─TableFullScan_36 3.00 45.00 mpp[tiflash] table:t2 keep order:false" ] }, { @@ -2477,18 +2477,18 @@ "├─Selection_39(Build) 0.80 11.18 root eq(2, Column#18)", "│ └─StreamAgg_60 1.00 8.18 root funcs:count(Column#32)->Column#18", "│ └─TableReader_61 1.00 5.17 root data:StreamAgg_44", - "│ └─StreamAgg_44 1.00 69.50 batchCop[tiflash] funcs:count(1)->Column#32", - "│ └─TableFullScan_59 3.00 60.50 batchCop[tiflash] table:t1 keep order:false", + "│ └─StreamAgg_44 1.00 49.50 batchCop[tiflash] funcs:count(1)->Column#32", + "│ └─TableFullScan_59 3.00 40.50 batchCop[tiflash] table:t1 keep order:false", "└─Projection_20(Probe) 3.00 95.82 root 1->Column#28", " └─Apply_22 3.00 76.02 root CARTESIAN left outer join", " ├─TableReader_24(Build) 3.00 10.16 root data:TableFullScan_23", - " │ └─TableFullScan_23 3.00 128.00 cop[tikv] table:t keep order:false", + " │ └─TableFullScan_23 3.00 108.00 cop[tikv] table:t keep order:false", " └─Projection_27(Probe) 1.00 21.95 root 1->Column#26", " └─Limit_30 1.00 3.35 root offset:0, count:1", " └─TableReader_38 1.00 3.35 root data:ExchangeSender_37", - " └─ExchangeSender_37 1.00 79.50 mpp[tiflash] ExchangeType: PassThrough", - " └─Limit_36 1.00 79.50 mpp[tiflash] offset:0, count:1", - " └─TableFullScan_35 1.00 79.50 mpp[tiflash] table:t2 keep order:false" + " └─ExchangeSender_37 1.00 19.50 mpp[tiflash] ExchangeType: PassThrough", + " └─Limit_36 1.00 19.50 mpp[tiflash] offset:0, count:1", + " └─TableFullScan_35 1.00 19.50 mpp[tiflash] table:t2 keep order:false" ] }, { @@ -2498,12 +2498,12 @@ "└─MergeJoin_31 3.00 50.65 root inner join, left key:test.t1.a, right key:test.t2.a", " ├─Sort_29(Build) 3.00 20.83 root test.t2.a", " │ └─TableReader_28 3.00 6.56 root data:Selection_27", - " │ └─Selection_27 3.00 74.00 cop[tiflash] not(isnull(test.t2.a))", - " │ └─TableFullScan_26 3.00 65.00 cop[tiflash] table:t2 keep order:false", + " │ └─Selection_27 3.00 54.00 cop[tiflash] not(isnull(test.t2.a))", + " │ └─TableFullScan_26 3.00 45.00 cop[tiflash] table:t2 keep order:false", " └─Sort_22(Probe) 3.00 20.83 root test.t1.a", " └─TableReader_21 3.00 6.56 root data:Selection_20", - " └─Selection_20 3.00 74.00 cop[tiflash] not(isnull(test.t1.a))", - " └─TableFullScan_19 3.00 65.00 cop[tiflash] table:t1 keep order:false" + " └─Selection_20 3.00 54.00 cop[tiflash] not(isnull(test.t1.a))", + " └─TableFullScan_19 3.00 45.00 cop[tiflash] table:t1 keep order:false" ] } ] diff --git a/server/conn.go b/server/conn.go index ee6fd0225125d..bd6147e2514ae 100644 --- a/server/conn.go +++ b/server/conn.go @@ -2341,6 +2341,7 @@ func (cc *clientConn) handleChangeUser(ctx context.Context, data []byte) error { if err := cc.ctx.Close(); err != nil { logutil.Logger(ctx).Debug("close old context failed", zap.Error(err)) } + cc.ctx = nil if err := cc.openSessionAndDoAuth(pass, ""); err != nil { return err } diff --git a/server/conn_stmt.go b/server/conn_stmt.go index 5d76adefbc6b4..f1e519eecbe55 100644 --- a/server/conn_stmt.go +++ b/server/conn_stmt.go @@ -130,13 +130,6 @@ func (cc *clientConn) handleStmtExecute(ctx context.Context, data []byte) (err e stmtID := binary.LittleEndian.Uint32(data[0:4]) pos += 4 - if topsqlstate.TopSQLEnabled() { - preparedStmt, _ := cc.preparedStmtID2CachePreparedStmt(stmtID) - if preparedStmt != nil && preparedStmt.SQLDigest != nil { - ctx = topsql.AttachSQLInfo(ctx, preparedStmt.NormalizedSQL, preparedStmt.SQLDigest, "", nil, false) - } - } - stmt := cc.ctx.GetStatement(int(stmtID)) if stmt == nil { return mysql.NewErr(mysql.ErrUnknownStmtHandler, diff --git a/server/conn_test.go b/server/conn_test.go index 2560ebcdc0df9..f6fbcb5e53c8e 100644 --- a/server/conn_test.go +++ b/server/conn_test.go @@ -41,6 +41,125 @@ import ( "github.com/tikv/client-go/v2/testutils" ) +type Issue33699CheckType struct { + name string + defVal string + setVal string + isSessionVariable bool +} + +func (c *Issue33699CheckType) toSetSessionVar() string { + if c.isSessionVariable { + return fmt.Sprintf("set session %s=%s", c.name, c.setVal) + } + return fmt.Sprintf("set @%s=%s", c.name, c.setVal) +} + +func (c *Issue33699CheckType) toGetSessionVar() string { + if c.isSessionVariable { + return fmt.Sprintf("select @@session.%s", c.name) + } + return fmt.Sprintf("select @%s", c.name) +} + +func TestIssue33699(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + var outBuffer bytes.Buffer + tidbdrv := NewTiDBDriver(store) + cfg := newTestConfig() + cfg.Port, cfg.Status.StatusPort = 0, 0 + cfg.Status.ReportStatus = false + server, err := NewServer(cfg, tidbdrv) + require.NoError(t, err) + defer server.Close() + + cc := &clientConn{ + connectionID: 1, + salt: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14}, + server: server, + pkt: &packetIO{ + bufWriter: bufio.NewWriter(&outBuffer), + }, + collation: mysql.DefaultCollationID, + peerHost: "localhost", + alloc: arena.NewAllocator(512), + chunkAlloc: chunk.NewAllocator(), + capability: mysql.ClientProtocol41, + } + + tk := testkit.NewTestKit(t, store) + ctx := &TiDBContext{Session: tk.Session()} + cc.ctx = ctx + + // change user. + doChangeUser := func() { + userData := append([]byte("root"), 0x0, 0x0) + userData = append(userData, []byte("test")...) + userData = append(userData, 0x0) + changeUserReq := dispatchInput{ + com: mysql.ComChangeUser, + in: userData, + err: nil, + out: []byte{0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0}, + } + inBytes := append([]byte{changeUserReq.com}, changeUserReq.in...) + err = cc.dispatch(context.Background(), inBytes) + require.Equal(t, changeUserReq.err, err) + if err == nil { + err = cc.flush(context.TODO()) + require.NoError(t, err) + require.Equal(t, changeUserReq.out, outBuffer.Bytes()) + } else { + _ = cc.flush(context.TODO()) + } + outBuffer.Reset() + } + // check variable. + checks := []Issue33699CheckType{ + { // self define. + "a", + "", + "1", + false, + }, + { // session variable + "net_read_timeout", + "30", + "1234", + true, + }, + { + "net_write_timeout", + "60", + "1234", + true, + }, + } + + // default; + for _, ck := range checks { + tk.MustQuery(ck.toGetSessionVar()).Check(testkit.Rows(ck.defVal)) + } + // set; + for _, ck := range checks { + tk.MustExec(ck.toSetSessionVar()) + } + // check after set. + for _, ck := range checks { + tk.MustQuery(ck.toGetSessionVar()).Check(testkit.Rows(ck.setVal)) + } + doChangeUser() + require.NotEqual(t, ctx, cc.ctx) + require.NotEqual(t, ctx.Session, cc.ctx.Session) + // new session,so values is defaults; + tk.SetSession(cc.ctx.Session) // set new session. + for _, ck := range checks { + tk.MustQuery(ck.toGetSessionVar()).Check(testkit.Rows(ck.defVal)) + } +} + func TestMalformHandshakeHeader(t *testing.T) { data := []byte{0x00} var p handshakeResponse41 diff --git a/session/session.go b/session/session.go index f0e95db11e490..32af323a6d44c 100644 --- a/session/session.go +++ b/session/session.go @@ -51,7 +51,7 @@ import ( "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/store/driver/txn" "github.com/pingcap/tidb/store/helper" - "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/util/logutil/consistency" "github.com/pingcap/tidb/util/topsql" @@ -632,10 +632,11 @@ type cachedTableRenewLease struct { func (c *cachedTableRenewLease) start(ctx context.Context) error { c.exit = make(chan struct{}) c.lease = make([]uint64, len(c.tables)) - wg := make(chan error) + wg := make(chan error, len(c.tables)) ith := 0 - for tid, raw := range c.tables { - go c.keepAlive(ctx, wg, raw.(tables.StateRemote), tid, &c.lease[ith]) + for _, raw := range c.tables { + tbl := raw.(table.CachedTable) + go tbl.WriteLockAndKeepAlive(ctx, c.exit, &c.lease[ith], wg) ith++ } @@ -650,47 +651,6 @@ func (c *cachedTableRenewLease) start(ctx context.Context) error { return err } -const cacheTableWriteLease = 5 * time.Second - -func (c *cachedTableRenewLease) keepAlive(ctx context.Context, wg chan error, handle tables.StateRemote, tid int64, leasePtr *uint64) { - writeLockLease, err := handle.LockForWrite(ctx, tid, cacheTableWriteLease) - atomic.StoreUint64(leasePtr, writeLockLease) - wg <- err - if err != nil { - logutil.Logger(ctx).Warn("[cached table] lock for write lock fail", zap.Error(err)) - return - } - - t := time.NewTicker(cacheTableWriteLease) - defer t.Stop() - for { - select { - case <-t.C: - if err := c.renew(ctx, handle, tid, leasePtr); err != nil { - logutil.Logger(ctx).Warn("[cached table] renew write lock lease fail", zap.Error(err)) - return - } - case <-c.exit: - return - } - } -} - -func (c *cachedTableRenewLease) renew(ctx context.Context, handle tables.StateRemote, tid int64, leasePtr *uint64) error { - oldLease := atomic.LoadUint64(leasePtr) - physicalTime := oracle.GetTimeFromTS(oldLease) - newLease := oracle.GoTimeToTS(physicalTime.Add(cacheTableWriteLease)) - - succ, err := handle.RenewWriteLease(ctx, tid, newLease) - if err != nil { - return errors.Trace(err) - } - if succ { - atomic.StoreUint64(leasePtr, newLease) - } - return nil -} - func (c *cachedTableRenewLease) stop(ctx context.Context) { close(c.exit) } @@ -2107,10 +2067,11 @@ func resetCTEStorageMap(se *session) error { return errors.New("type assertion for CTEStorageMap failed") } for _, v := range storageMap { - // No need to lock IterInTbl. v.ResTbl.Lock() - defer v.ResTbl.Unlock() err1 := v.ResTbl.DerefAndClose() + // Make sure we do not hold the lock for longer than necessary. + v.ResTbl.Unlock() + // No need to lock IterInTbl. err2 := v.IterInTbl.DerefAndClose() if err1 != nil { return err1 @@ -2274,9 +2235,9 @@ func (s *session) cachedPlanExec(ctx context.Context, // IsCachedExecOk check if we can execute using plan cached in prepared structure // Be careful for the short path, current precondition is ths cached plan satisfying // IsPointGetWithPKOrUniqueKeyByAutoCommit -func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore.CachedPrepareStmt) (bool, error) { +func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore.CachedPrepareStmt, isStaleness bool) (bool, error) { prepared := preparedStmt.PreparedAst - if prepared.CachedPlan == nil || preparedStmt.SnapshotTSEvaluator != nil { + if prepared.CachedPlan == nil || isStaleness { return false, nil } // check auto commit @@ -2357,7 +2318,7 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ } executor.CountStmtNode(preparedStmt.PreparedAst.Stmt, s.sessionVars.InRestrictedSQL) - ok, err = s.IsCachedExecOk(ctx, preparedStmt) + ok, err = s.IsCachedExecOk(ctx, preparedStmt, snapshotTS != 0) if err != nil { return nil, err } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 1dae2a52a93bf..9cd113476763b 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -565,11 +565,7 @@ var defaultSysVars = []*SysVar{ return nil }}, {Scope: ScopeGlobal, Name: TiDBEnableTelemetry, Value: BoolToOnOff(DefTiDBEnableTelemetry), Type: TypeBool}, - {Scope: ScopeGlobal, Name: TiDBEnableHistoricalStats, Value: Off, Type: TypeBool, GetGlobal: func(s *SessionVars) (string, error) { - return getTiDBTableValue(s, "tidb_enable_historical_stats", Off) - }, SetGlobal: func(s *SessionVars, val string) error { - return setTiDBTableValue(s, "tidb_enable_historical_stats", val, "Current historical statistics enable status") - }}, + {Scope: ScopeGlobal, Name: TiDBEnableHistoricalStats, Value: Off, Type: TypeBool}, /* tikv gc metrics */ {Scope: ScopeGlobal, Name: TiDBGCEnable, Value: On, Type: TypeBool, GetGlobal: func(s *SessionVars) (string, error) { return getTiDBTableValue(s, "tikv_gc_enable", On) diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index 977ee42a69da6..495fc1f4d5b58 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -229,6 +229,10 @@ func SetStmtVar(vars *SessionVars, name string, value string) error { return vars.SetStmtVar(name, sVal) } +// Deprecated: Read the value from the mysql.tidb table. +// This supports the use case that a TiDB server *older* than 5.0 is a member of the cluster. +// i.e. system variables such as tidb_gc_concurrency, tidb_gc_enable, tidb_gc_life_time +// do not exist. func getTiDBTableValue(vars *SessionVars, name, defaultVal string) (string, error) { val, err := vars.GlobalVarsAccessor.GetTiDBTableValue(name) if err != nil { // handle empty result or other errors @@ -237,6 +241,10 @@ func getTiDBTableValue(vars *SessionVars, name, defaultVal string) (string, erro return trueFalseToOnOff(val), nil } +// Deprecated: Set the value from the mysql.tidb table. +// This supports the use case that a TiDB server *older* than 5.0 is a member of the cluster. +// i.e. system variables such as tidb_gc_concurrency, tidb_gc_enable, tidb_gc_life_time +// do not exist. func setTiDBTableValue(vars *SessionVars, name, value, comment string) error { value = OnOffToTrueFalse(value) return vars.GlobalVarsAccessor.SetTiDBTableValue(name, value, comment) diff --git a/table/table.go b/table/table.go index 8c90b9e7323da..775cb03bb6cf9 100644 --- a/table/table.go +++ b/table/table.go @@ -260,9 +260,14 @@ type CachedTable interface { Init(exec sqlexec.SQLExecutor) error // TryReadFromCache checks if the cache table is readable. - TryReadFromCache(ts uint64, leaseDuration time.Duration) kv.MemBuffer + TryReadFromCache(ts uint64, leaseDuration time.Duration) (kv.MemBuffer, bool) // UpdateLockForRead If you cannot meet the conditions of the read buffer, // you need to update the lock information and read the data from the original table UpdateLockForRead(ctx context.Context, store kv.Storage, ts uint64, leaseDuration time.Duration) + + // WriteLockAndKeepAlive first obtain the write lock, then it renew the lease to keep the lock alive. + // 'exit' is a channel to tell the keep alive goroutine to exit. + // The result is sent to the 'wg' channel. + WriteLockAndKeepAlive(ctx context.Context, exit chan struct{}, leasePtr *uint64, wg chan error) } diff --git a/table/tables/cache.go b/table/tables/cache.go index 4de65204cb944..fc9f3f52ce16c 100644 --- a/table/tables/cache.go +++ b/table/tables/cache.go @@ -23,10 +23,12 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" @@ -40,13 +42,30 @@ var ( type cachedTable struct { TableCommon cacheData atomic.Value - handle StateRemote totalSize int64 + // StateRemote is not thread-safe, this tokenLimit is used to keep only one visitor. + tokenLimit +} + +type tokenLimit chan StateRemote + +func (t tokenLimit) TakeStateRemoteHandle() StateRemote { + handle := <-t + return handle +} - renewReadLease tokenLimit +func (t tokenLimit) TakeStateRemoteHandleNoWait() StateRemote { + select { + case handle := <-t: + return handle + default: + return nil + } } -type tokenLimit = chan struct{} +func (t tokenLimit) PutStateRemoteHandle(handle StateRemote) { + t <- handle +} // cacheData pack the cache data and lease. type cacheData struct { @@ -71,10 +90,10 @@ func newMemBuffer(store kv.Storage) (kv.MemBuffer, error) { return buffTxn.GetMemBuffer(), nil } -func (c *cachedTable) TryReadFromCache(ts uint64, leaseDuration time.Duration) kv.MemBuffer { +func (c *cachedTable) TryReadFromCache(ts uint64, leaseDuration time.Duration) (kv.MemBuffer, bool /*loading*/) { tmp := c.cacheData.Load() if tmp == nil { - return nil + return nil, false } data := tmp.(*cacheData) if ts >= data.Start && ts < data.Lease { @@ -88,22 +107,22 @@ func (c *cachedTable) TryReadFromCache(ts uint64, leaseDuration time.Duration) k }) if distance >= 0 && distance <= leaseDuration/2 || triggerFailpoint { - select { - case c.renewReadLease <- struct{}{}: - go c.renewLease(ts, data, leaseDuration) - default: + if h := c.TakeStateRemoteHandleNoWait(); h != nil { + go c.renewLease(h, ts, data, leaseDuration) } } - return data.MemBuffer + // If data is not nil, but data.MemBuffer is nil, it means the data is being + // loading by a background goroutine. + return data.MemBuffer, data.MemBuffer == nil } - return nil + return nil, false } // newCachedTable creates a new CachedTable Instance func newCachedTable(tbl *TableCommon) (table.Table, error) { ret := &cachedTable{ - TableCommon: *tbl, - renewReadLease: make(chan struct{}, 1), + TableCommon: *tbl, + tokenLimit: make(chan StateRemote, 1), } return ret, nil } @@ -115,11 +134,12 @@ func (c *cachedTable) Init(exec sqlexec.SQLExecutor) error { if !ok { return errors.New("Need sqlExec rather than sqlexec.SQLExecutor") } - c.handle = NewStateRemote(raw) + handle := NewStateRemote(raw) + c.PutStateRemoteHandle(handle) return nil } -func (c *cachedTable) loadDataFromOriginalTable(store kv.Storage, lease uint64) (kv.MemBuffer, uint64, int64, error) { +func (c *cachedTable) loadDataFromOriginalTable(store kv.Storage) (kv.MemBuffer, uint64, int64, error) { buffer, err := newMemBuffer(store) if err != nil { return nil, 0, 0, err @@ -132,9 +152,6 @@ func (c *cachedTable) loadDataFromOriginalTable(store kv.Storage, lease uint64) return errors.Trace(err) } startTS = txn.StartTS() - if startTS >= lease { - return errors.New("the loaded data is outdated for caching") - } it, err := txn.Iter(prefix, prefix.PrefixNext()) if err != nil { return errors.Trace(err) @@ -165,45 +182,57 @@ func (c *cachedTable) loadDataFromOriginalTable(store kv.Storage, lease uint64) } func (c *cachedTable) UpdateLockForRead(ctx context.Context, store kv.Storage, ts uint64, leaseDuration time.Duration) { - select { - case c.renewReadLease <- struct{}{}: - go c.updateLockForRead(ctx, store, ts, leaseDuration) - default: - // There is a inflight calling already. + if h := c.TakeStateRemoteHandle(); h != nil { + go c.updateLockForRead(ctx, h, store, ts, leaseDuration) } } -func (c *cachedTable) updateLockForRead(ctx context.Context, store kv.Storage, ts uint64, leaseDuration time.Duration) { +func (c *cachedTable) updateLockForRead(ctx context.Context, handle StateRemote, store kv.Storage, ts uint64, leaseDuration time.Duration) { defer func() { if r := recover(); r != nil { log.Error("panic in the recoverable goroutine", zap.Reflect("r", r), zap.Stack("stack trace")) } - <-c.renewReadLease + c.PutStateRemoteHandle(handle) }() // Load data from original table and the update lock information. tid := c.Meta().ID lease := leaseFromTS(ts, leaseDuration) - succ, err := c.handle.LockForRead(ctx, tid, lease) + succ, err := handle.LockForRead(ctx, tid, lease) if err != nil { log.Warn("lock cached table for read", zap.Error(err)) return } if succ { - mb, startTS, totalSize, err := c.loadDataFromOriginalTable(store, lease) - if err != nil { - log.Info("load data from table", zap.Error(err)) - return - } - c.cacheData.Store(&cacheData{ - Start: startTS, + Start: ts, Lease: lease, - MemBuffer: mb, + MemBuffer: nil, // Async loading, this will be set later. }) - atomic.StoreInt64(&c.totalSize, totalSize) + + // Make the load data process async, in case that loading data takes longer the + // lease duration, then the loaded data get staled and that process repeats forever. + go func() { + start := time.Now() + mb, startTS, totalSize, err := c.loadDataFromOriginalTable(store) + metrics.LoadTableCacheDurationHistogram.Observe(time.Since(start).Seconds()) + if err != nil { + log.Info("load data from table fail", zap.Error(err)) + return + } + + tmp := c.cacheData.Load().(*cacheData) + if tmp != nil && tmp.Start == ts { + c.cacheData.Store(&cacheData{ + Start: startTS, + Lease: tmp.Lease, + MemBuffer: mb, + }) + atomic.StoreInt64(&c.totalSize, totalSize) + } + }() } // Current status is not suitable to cache. } @@ -215,11 +244,11 @@ func (c *cachedTable) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . if atomic.LoadInt64(&c.totalSize) > cachedTableSizeLimit { return nil, table.ErrOptOnCacheTable.GenWithStackByArgs("table too large") } - txnCtxAddCachedTable(sctx, c.Meta().ID, c.handle) + txnCtxAddCachedTable(sctx, c.Meta().ID, c) return c.TableCommon.AddRecord(sctx, r, opts...) } -func txnCtxAddCachedTable(sctx sessionctx.Context, tid int64, handle StateRemote) { +func txnCtxAddCachedTable(sctx sessionctx.Context, tid int64, handle *cachedTable) { txnCtx := sctx.GetSessionVars().TxnCtx if txnCtx.CachedTables == nil { txnCtx.CachedTables = make(map[int64]interface{}) @@ -235,29 +264,31 @@ func (c *cachedTable) UpdateRecord(ctx context.Context, sctx sessionctx.Context, if atomic.LoadInt64(&c.totalSize) > cachedTableSizeLimit { return table.ErrOptOnCacheTable.GenWithStackByArgs("table too large") } - txnCtxAddCachedTable(sctx, c.Meta().ID, c.handle) + txnCtxAddCachedTable(sctx, c.Meta().ID, c) return c.TableCommon.UpdateRecord(ctx, sctx, h, oldData, newData, touched) } // RemoveRecord implements table.Table RemoveRecord interface. func (c *cachedTable) RemoveRecord(sctx sessionctx.Context, h kv.Handle, r []types.Datum) error { - txnCtxAddCachedTable(sctx, c.Meta().ID, c.handle) + txnCtxAddCachedTable(sctx, c.Meta().ID, c) return c.TableCommon.RemoveRecord(sctx, h, r) } // TestMockRenewLeaseABA2 is used by test function TestRenewLeaseABAFailPoint. var TestMockRenewLeaseABA2 chan struct{} -func (c *cachedTable) renewLease(ts uint64, data *cacheData, leaseDuration time.Duration) { - defer func() { <-c.renewReadLease }() - +func (c *cachedTable) renewLease(handle StateRemote, ts uint64, data *cacheData, leaseDuration time.Duration) { failpoint.Inject("mockRenewLeaseABA2", func(_ failpoint.Value) { + c.PutStateRemoteHandle(handle) <-TestMockRenewLeaseABA2 + c.TakeStateRemoteHandle() }) + defer c.PutStateRemoteHandle(handle) + tid := c.Meta().ID lease := leaseFromTS(ts, leaseDuration) - newLease, err := c.handle.RenewReadLease(context.Background(), tid, data.Lease, lease) + newLease, err := handle.RenewReadLease(context.Background(), tid, data.Lease, lease) if err != nil && !kv.IsTxnRetryableError(err) { log.Warn("Renew read lease error", zap.Error(err)) } @@ -273,3 +304,54 @@ func (c *cachedTable) renewLease(ts uint64, data *cacheData, leaseDuration time. TestMockRenewLeaseABA2 <- struct{}{} }) } + +const cacheTableWriteLease = 5 * time.Second + +func (c *cachedTable) WriteLockAndKeepAlive(ctx context.Context, exit chan struct{}, leasePtr *uint64, wg chan error) { + writeLockLease, err := c.lockForWrite(ctx) + atomic.StoreUint64(leasePtr, writeLockLease) + wg <- err + if err != nil { + logutil.Logger(ctx).Warn("[cached table] lock for write lock fail", zap.Error(err)) + return + } + + t := time.NewTicker(cacheTableWriteLease) + defer t.Stop() + for { + select { + case <-t.C: + if err := c.renew(ctx, leasePtr); err != nil { + logutil.Logger(ctx).Warn("[cached table] renew write lock lease fail", zap.Error(err)) + return + } + case <-exit: + return + } + } +} + +func (c *cachedTable) renew(ctx context.Context, leasePtr *uint64) error { + oldLease := atomic.LoadUint64(leasePtr) + physicalTime := oracle.GetTimeFromTS(oldLease) + newLease := oracle.GoTimeToTS(physicalTime.Add(cacheTableWriteLease)) + + h := c.TakeStateRemoteHandle() + defer c.PutStateRemoteHandle(h) + + succ, err := h.RenewWriteLease(ctx, c.Meta().ID, newLease) + if err != nil { + return errors.Trace(err) + } + if succ { + atomic.StoreUint64(leasePtr, newLease) + } + return nil +} + +func (c *cachedTable) lockForWrite(ctx context.Context) (uint64, error) { + handle := c.TakeStateRemoteHandle() + defer c.PutStateRemoteHandle(handle) + + return handle.LockForWrite(ctx, c.Meta().ID, cacheTableWriteLease) +} diff --git a/table/tables/state_remote.go b/table/tables/state_remote.go index 6abbebf3e30a5..4a8d0b39b632c 100644 --- a/table/tables/state_remote.go +++ b/table/tables/state_remote.go @@ -17,7 +17,6 @@ package tables import ( "context" "strconv" - "sync" "time" "github.com/pingcap/errors" @@ -56,6 +55,7 @@ func (l CachedTableLockType) String() string { } // StateRemote is the interface to control the remote state of the cached table's lock meta information. +// IMPORTANT: It's not thread-safe, the caller should be aware of that! type StateRemote interface { // Load obtain the corresponding lock type and lease value according to the tableID Load(ctx context.Context, tid int64) (CachedTableLockType, uint64, error) @@ -82,7 +82,13 @@ type sqlExec interface { type stateRemoteHandle struct { exec sqlExec - sync.Mutex + + // local state, this could be staled. + // Since stateRemoteHandle is used in single thread, it's safe for all operations + // to check the local state first to avoid unnecessary remote TiKV access. + lockType CachedTableLockType + lease uint64 + oldReadLease uint64 } // NewStateRemote creates a StateRemote object. @@ -95,16 +101,22 @@ func NewStateRemote(exec sqlExec) *stateRemoteHandle { var _ StateRemote = &stateRemoteHandle{} func (h *stateRemoteHandle) Load(ctx context.Context, tid int64) (CachedTableLockType, uint64, error) { - lockType, lease, _, err := h.loadRow(ctx, tid) + lockType, lease, _, err := h.loadRow(ctx, tid, false) return lockType, lease, err } func (h *stateRemoteHandle) LockForRead(ctx context.Context, tid int64, newLease uint64) ( /*succ*/ bool, error) { - h.Lock() - defer h.Unlock() succ := false - err := h.runInTxn(ctx, func(ctx context.Context, now uint64) error { - lockType, lease, _, err := h.loadRow(ctx, tid) + if h.lease >= newLease { + // There is a write lock or intention, don't lock for read. + switch h.lockType { + case CachedTableLockIntend, CachedTableLockWrite: + return false, nil + } + } + + err := h.runInTxn(ctx, false, func(ctx context.Context, now uint64) error { + lockType, lease, _, err := h.loadRow(ctx, tid, false) if err != nil { return errors.Trace(err) } @@ -137,9 +149,17 @@ func (h *stateRemoteHandle) LockForRead(ctx context.Context, tid int64, newLease // LockForWrite try to add a write lock to the table with the specified tableID, return the write lock lease. func (h *stateRemoteHandle) LockForWrite(ctx context.Context, tid int64, leaseDuration time.Duration) (uint64, error) { - h.Lock() - defer h.Unlock() var ret uint64 + + if h.lockType == CachedTableLockWrite { + safe := oracle.GoTimeToTS(time.Now().Add(leaseDuration / 2)) + if h.lease > safe { + // It means the remote has already been write locked and the lock will be valid for a while. + // So we can return directly. + return h.lease, nil + } + } + for { waitAndRetry, lease, err := h.lockForWriteOnce(ctx, tid, leaseDuration) if err != nil { @@ -155,8 +175,15 @@ func (h *stateRemoteHandle) LockForWrite(ctx context.Context, tid int64, leaseDu } func (h *stateRemoteHandle) lockForWriteOnce(ctx context.Context, tid int64, leaseDuration time.Duration) (waitAndRetry time.Duration, ts uint64, err error) { - err = h.runInTxn(ctx, func(ctx context.Context, now uint64) error { - lockType, lease, oldReadLease, err := h.loadRow(ctx, tid) + var ( + _updateLocal bool + _lockType CachedTableLockType + _lease uint64 + _oldReadLease uint64 + ) + + err = h.runInTxn(ctx, true, func(ctx context.Context, now uint64) error { + lockType, lease, oldReadLease, err := h.loadRow(ctx, tid, true) if err != nil { return errors.Trace(err) } @@ -175,6 +202,11 @@ func (h *stateRemoteHandle) lockForWriteOnce(ctx context.Context, tid int64, lea if err = h.updateRow(ctx, tid, "WRITE", ts); err != nil { return errors.Trace(err) } + { + _updateLocal = true + _lockType = CachedTableLockWrite + _lease = ts + } case CachedTableLockRead: // Change from READ to INTEND if _, err = h.execSQL(ctx, @@ -187,23 +219,47 @@ func (h *stateRemoteHandle) lockForWriteOnce(ctx context.Context, tid int64, lea // Wait for lease to expire, and then retry. waitAndRetry = waitForLeaseExpire(lease, now) + { + _updateLocal = true + _lockType = CachedTableLockIntend + _oldReadLease = lease + _lease = ts + } case CachedTableLockIntend: // `now` exceed `oldReadLease` means wait for READ lock lease is done, it's safe to read here. if now > oldReadLease { - if lockType == CachedTableLockIntend { - if err = h.updateRow(ctx, tid, "WRITE", ts); err != nil { - return errors.Trace(err) - } + if err = h.updateRow(ctx, tid, "WRITE", ts); err != nil { + return errors.Trace(err) + } + { + _updateLocal = true + _lockType = CachedTableLockWrite + _lease = ts } return nil } // Otherwise, the WRITE should wait for the READ lease expire. // And then retry changing the lock to WRITE waitAndRetry = waitForLeaseExpire(oldReadLease, now) + case CachedTableLockWrite: + if err = h.updateRow(ctx, tid, "WRITE", ts); err != nil { + return errors.Trace(err) + } + { + _updateLocal = true + _lockType = CachedTableLockWrite + _lease = ts + } } return nil }) + if err == nil && _updateLocal { + h.lockType = _lockType + h.lease = _lease + h.oldReadLease = _oldReadLease + } + return } @@ -223,11 +279,9 @@ func waitForLeaseExpire(oldReadLease, now uint64) time.Duration { // RenewReadLease renew the read lock lease. // Return the current lease value on success, and return 0 on fail. func (h *stateRemoteHandle) RenewReadLease(ctx context.Context, tid int64, oldLocalLease, newValue uint64) (uint64, error) { - h.Lock() - defer h.Unlock() var newLease uint64 - err := h.runInTxn(ctx, func(ctx context.Context, now uint64) error { - lockType, remoteLease, _, err := h.loadRow(ctx, tid) + err := h.runInTxn(ctx, false, func(ctx context.Context, now uint64) error { + lockType, remoteLease, _, err := h.loadRow(ctx, tid, false) if err != nil { return errors.Trace(err) } @@ -267,15 +321,18 @@ func (h *stateRemoteHandle) RenewReadLease(ctx context.Context, tid int64, oldLo } return nil }) + return newLease, err } func (h *stateRemoteHandle) RenewWriteLease(ctx context.Context, tid int64, newLease uint64) (bool, error) { - h.Lock() - defer h.Unlock() var succ bool - err := h.runInTxn(ctx, func(ctx context.Context, now uint64) error { - lockType, oldLease, _, err := h.loadRow(ctx, tid) + var ( + _lockType CachedTableLockType + _lease uint64 + ) + err := h.runInTxn(ctx, true, func(ctx context.Context, now uint64) error { + lockType, oldLease, _, err := h.loadRow(ctx, tid, true) if err != nil { return errors.Trace(err) } @@ -295,13 +352,25 @@ func (h *stateRemoteHandle) RenewWriteLease(ctx context.Context, tid int64, newL } } succ = true + _lockType = CachedTableLockWrite + _lease = newLease return nil }) + + if succ { + h.lockType = _lockType + h.lease = _lease + } return succ, err } -func (h *stateRemoteHandle) beginTxn(ctx context.Context) error { - _, err := h.execSQL(ctx, "begin optimistic") +func (h *stateRemoteHandle) beginTxn(ctx context.Context, pessimistic bool) error { + var err error + if pessimistic { + _, err = h.execSQL(ctx, "begin pessimistic") + } else { + _, err = h.execSQL(ctx, "begin optimistic") + } return err } @@ -315,8 +384,8 @@ func (h *stateRemoteHandle) rollbackTxn(ctx context.Context) error { return err } -func (h *stateRemoteHandle) runInTxn(ctx context.Context, fn func(ctx context.Context, txnTS uint64) error) error { - err := h.beginTxn(ctx) +func (h *stateRemoteHandle) runInTxn(ctx context.Context, pessimistic bool, fn func(ctx context.Context, txnTS uint64) error) error { + err := h.beginTxn(ctx, pessimistic) if err != nil { return errors.Trace(err) } @@ -345,8 +414,14 @@ func (h *stateRemoteHandle) runInTxn(ctx context.Context, fn func(ctx context.Co return h.commitTxn(ctx) } -func (h *stateRemoteHandle) loadRow(ctx context.Context, tid int64) (CachedTableLockType, uint64, uint64, error) { - chunkRows, err := h.execSQL(ctx, "select lock_type, lease, oldReadLease from mysql.table_cache_meta where tid = %?", tid) +func (h *stateRemoteHandle) loadRow(ctx context.Context, tid int64, forUpdate bool) (CachedTableLockType, uint64, uint64, error) { + var chunkRows []chunk.Row + var err error + if forUpdate { + chunkRows, err = h.execSQL(ctx, "select lock_type, lease, oldReadLease from mysql.table_cache_meta where tid = %? for update", tid) + } else { + chunkRows, err = h.execSQL(ctx, "select lock_type, lease, oldReadLease from mysql.table_cache_meta where tid = %?", tid) + } if err != nil { return 0, 0, 0, errors.Trace(err) } @@ -358,6 +433,12 @@ func (h *stateRemoteHandle) loadRow(ctx context.Context, tid int64) (CachedTable lockType := CachedTableLockType(col1.Value - 1) lease := chunkRows[0].GetUint64(1) oldReadLease := chunkRows[0].GetUint64(2) + + // Also store a local copy after loadRow() + h.lockType = lockType + h.lease = lease + h.oldReadLease = oldReadLease + return lockType, lease, oldReadLease, nil } diff --git a/unstable.txt b/unstable.txt index cffb45911e42e..033587bf46cc4 100644 --- a/unstable.txt +++ b/unstable.txt @@ -1,33 +1,19 @@ ddl TestModifyColumn -ddl TestModifyColumnTypeWhenInterception -util/cpuprofile TestGetCPUProfile +ddl TestPartition +ddl TestDuplicateErrorMessage +ddl TestAddIndexWithSplitTable +ddl TestAddColumn executor TestDefaultValForAnalyze executor TestPBMemoryLeak -executor TestInvalidReadCacheTable -executor TestCancelMppTasks executor testSuite.TestTimestampDefaultValueTimeZone -executor TestIndexJoin31494 -executor TestInvalidReadCacheTable executor TestPartitionTable -executor TestRangePartitionBoundariesLtM +executor TestIn server TestTopSQLCPUProfile -server TestErrorNoRollback -server TestUint64 -planner/core TestIssue27544 -planner/core TestIssue20139 -planner/core TestAggPushToCopForCachedTable -planner/core TestIssue23887 -planner/core TestOrderedResultModeOnJoin -planner/core TestCompareSubquery -planner/core TestEstimation -planner/core TestPlanStringer -planner/core TestMultiColInExpression -planner/core TestIssue16407 -planner/core TestPrepareCacheNow -planner/core TestDAGPlanBuilderSplitAvg -planner/core TestIndexMerge -planner/core TestUnmatchedTableInHint -planner/core TestCopPaging -planner/core TestPlanCachePointGetAndTableDual -planner/core TestSimplifyOuterJoinWithCast -executor/seqtest TestShow +planner/core TestIssue20710 +planner/core TestPartitionWithVariedDataSources +planner/core TestPlanCacheUnionScan +planner/core TestIssue32632 +executor TestInvalidReadTemporaryTable +session testPessimisticSuite.TestAmendForIndexChange +store/mockstore/unistore/tikv TestDeadlock +expression TestIssue27831 \ No newline at end of file diff --git a/util/admin/admin_test.go b/util/admin/admin_test.go index 591e741e6689b..de8d63188fa45 100644 --- a/util/admin/admin_test.go +++ b/util/admin/admin_test.go @@ -103,7 +103,7 @@ func TestGetDDLJobs(t *testing.T) { currJobs2 = currJobs2[:0] err = IterAllDDLJobs(txn, func(jobs []*model.Job) (b bool, e error) { for _, job := range jobs { - if job.State == model.JobStateNone { + if job.NotStarted() { currJobs2 = append(currJobs2, job) } else { return true, nil diff --git a/util/fastrand/random.go b/util/fastrand/random.go index 8c19f8e017133..90fa1f3f41748 100644 --- a/util/fastrand/random.go +++ b/util/fastrand/random.go @@ -15,15 +15,32 @@ package fastrand import ( + "math/bits" _ "unsafe" // required by go:linkname ) +// wyrand is a fast PRNG. See https://github.com/wangyi-fudan/wyhash +type wyrand uint64 + +func _wymix(a, b uint64) uint64 { + hi, lo := bits.Mul64(a, b) + return hi ^ lo +} + +func (r *wyrand) Next() uint64 { + *r += wyrand(0xa0761d6478bd642f) + return _wymix(uint64(*r), uint64(*r^wyrand(0xe7037ed1a0b428db))) +} + // Buf generates a random string using ASCII characters but avoid separator character. // See https://github.com/mysql/mysql-server/blob/5.7/mysys_ssl/crypt_genhash_impl.cc#L435 func Buf(size int) []byte { buf := make([]byte, size) + r := wyrand(Uint32()) for i := 0; i < size; i++ { - buf[i] = byte(Uint32N(127)) + // This is similar to Uint32() % n, but faster. + // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + buf[i] = byte(uint32(uint64(uint32(r.Next())) * uint64(127) >> 32)) if buf[i] == 0 || buf[i] == byte('$') { buf[i]++ }