diff --git a/br/cmd/br/BUILD.bazel b/br/cmd/br/BUILD.bazel index b82ecbd2bc8dc..a20f29fce1e45 100644 --- a/br/cmd/br/BUILD.bazel +++ b/br/cmd/br/BUILD.bazel @@ -32,7 +32,7 @@ go_library( "//br/pkg/utils", "//br/pkg/version/build", "//pkg/config", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/session", "//pkg/util", "//pkg/util/gctuner", diff --git a/br/cmd/br/debug.go b/br/cmd/br/debug.go index 27e55e276b08c..7dd600d025783 100644 --- a/br/cmd/br/debug.go +++ b/br/cmd/br/debug.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/tidb/br/pkg/task" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" tidblogutil "github.com/pingcap/tidb/pkg/util/logutil" "github.com/spf13/cobra" "go.uber.org/zap" diff --git a/br/pkg/backup/BUILD.bazel b/br/pkg/backup/BUILD.bazel index 7b49957ca7c6b..a752836cdbe80 100644 --- a/br/pkg/backup/BUILD.bazel +++ b/br/pkg/backup/BUILD.bazel @@ -30,7 +30,7 @@ go_library( "//pkg/distsql", "//pkg/kv", "//pkg/meta", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/statistics/handle", "//pkg/statistics/handle/util", "//pkg/util", @@ -78,7 +78,7 @@ go_test( "//br/pkg/rtree", "//br/pkg/storage", "//br/pkg/utils", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testsetup", diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 636afdaf24700..67e2d9edc896c 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -35,7 +35,7 @@ import ( "github.com/pingcap/tidb/pkg/distsql" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/tikv/client-go/v2/oracle" diff --git a/br/pkg/backup/client_test.go b/br/pkg/backup/client_test.go index 873e53c7f8e8a..03a6d94dd0c04 100644 --- a/br/pkg/backup/client_test.go +++ b/br/pkg/backup/client_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" diff --git a/br/pkg/backup/schema.go b/br/pkg/backup/schema.go index bd33b29d70240..ac7bc98258a19 100644 --- a/br/pkg/backup/schema.go +++ b/br/pkg/backup/schema.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/statistics/handle" "github.com/pingcap/tidb/pkg/statistics/handle/util" tidbutil "github.com/pingcap/tidb/pkg/util" diff --git a/br/pkg/checkpoint/BUILD.bazel b/br/pkg/checkpoint/BUILD.bazel index 4580576dc19a1..a9edaf1334ef0 100644 --- a/br/pkg/checkpoint/BUILD.bazel +++ b/br/pkg/checkpoint/BUILD.bazel @@ -19,6 +19,7 @@ go_library( "//br/pkg/storage", "//br/pkg/summary", "//br/pkg/utils", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/util", "@com_github_pingcap_errors//:errors", @@ -42,6 +43,7 @@ go_test( ":checkpoint", "//br/pkg/pdutil", "//br/pkg/storage", + "//pkg/meta/model", "//pkg/parser/model", "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/encryptionpb", diff --git a/br/pkg/checkpoint/checkpoint_test.go b/br/pkg/checkpoint/checkpoint_test.go index ef6ee22f51380..17af80dacc48f 100644 --- a/br/pkg/checkpoint/checkpoint_test.go +++ b/br/pkg/checkpoint/checkpoint_test.go @@ -26,7 +26,8 @@ import ( "github.com/pingcap/tidb/br/pkg/checkpoint" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" ) @@ -95,8 +96,8 @@ func TestCheckpointMeta(t *testing.T) { SQLs: []checkpoint.CheckpointIngestIndexRepairSQL{ { IndexID: 1, - SchemaName: model.NewCIStr("2"), - TableName: model.NewCIStr("3"), + SchemaName: pmodel.NewCIStr("2"), + TableName: pmodel.NewCIStr("3"), IndexName: "4", AddSQL: "5", AddArgs: []any{"6", "7", "8"}, @@ -107,8 +108,8 @@ func TestCheckpointMeta(t *testing.T) { repairSQLs, err := checkpoint.LoadCheckpointIngestIndexRepairSQLs(ctx, s, "123") require.NoError(t, err) require.Equal(t, repairSQLs.SQLs[0].IndexID, int64(1)) - require.Equal(t, repairSQLs.SQLs[0].SchemaName, model.NewCIStr("2")) - require.Equal(t, repairSQLs.SQLs[0].TableName, model.NewCIStr("3")) + require.Equal(t, repairSQLs.SQLs[0].SchemaName, pmodel.NewCIStr("2")) + require.Equal(t, repairSQLs.SQLs[0].TableName, pmodel.NewCIStr("3")) require.Equal(t, repairSQLs.SQLs[0].IndexName, "4") require.Equal(t, repairSQLs.SQLs[0].AddSQL, "5") require.Equal(t, repairSQLs.SQLs[0].AddArgs, []any{"6", "7", "8"}) diff --git a/br/pkg/checkpoint/log_restore.go b/br/pkg/checkpoint/log_restore.go index 3107a67e8cd80..ebe82083aa503 100644 --- a/br/pkg/checkpoint/log_restore.go +++ b/br/pkg/checkpoint/log_restore.go @@ -24,7 +24,8 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "go.uber.org/zap" ) @@ -247,12 +248,12 @@ func removeCheckpointTaskInfoForLogRestore(ctx context.Context, s storage.Extern } type CheckpointIngestIndexRepairSQL struct { - IndexID int64 `json:"index-id"` - SchemaName model.CIStr `json:"schema-name"` - TableName model.CIStr `json:"table-name"` - IndexName string `json:"index-name"` - AddSQL string `json:"add-sql"` - AddArgs []any `json:"add-args"` + IndexID int64 `json:"index-id"` + SchemaName pmodel.CIStr `json:"schema-name"` + TableName pmodel.CIStr `json:"table-name"` + IndexName string `json:"index-name"` + AddSQL string `json:"add-sql"` + AddArgs []any `json:"add-args"` } type CheckpointIngestIndexRepairSQLs struct { diff --git a/br/pkg/checksum/BUILD.bazel b/br/pkg/checksum/BUILD.bazel index e2481ad2612a4..3835e6ad0c8c7 100644 --- a/br/pkg/checksum/BUILD.bazel +++ b/br/pkg/checksum/BUILD.bazel @@ -16,7 +16,7 @@ go_library( "//br/pkg/utils", "//pkg/distsql", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/variable", "//pkg/tablecodec", "//pkg/util/ranger", @@ -44,6 +44,7 @@ go_test( "//br/pkg/mock", "//pkg/distsql", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/sessionctx/variable", "//pkg/testkit", diff --git a/br/pkg/checksum/executor.go b/br/pkg/checksum/executor.go index 22f5a13d23a65..c61488aaeeeac 100644 --- a/br/pkg/checksum/executor.go +++ b/br/pkg/checksum/executor.go @@ -13,7 +13,7 @@ import ( "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/distsql" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/ranger" diff --git a/br/pkg/checksum/executor_test.go b/br/pkg/checksum/executor_test.go index c27e1c77581da..06d4d1f3ac551 100644 --- a/br/pkg/checksum/executor_test.go +++ b/br/pkg/checksum/executor_test.go @@ -13,7 +13,8 @@ import ( "github.com/pingcap/tidb/br/pkg/mock" "github.com/pingcap/tidb/pkg/distsql" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -22,8 +23,8 @@ import ( func getTableInfo(t *testing.T, mock *mock.Cluster, db, table string) *model.TableInfo { info, err := mock.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - cDBName := model.NewCIStr(db) - cTableName := model.NewCIStr(table) + cDBName := pmodel.NewCIStr(db) + cTableName := pmodel.NewCIStr(table) tableInfo, err := info.TableByName(context.Background(), cDBName, cTableName) require.NoError(t, err) return tableInfo.Meta() diff --git a/br/pkg/glue/BUILD.bazel b/br/pkg/glue/BUILD.bazel index cff22faa27ee9..1ab377fc602dd 100644 --- a/br/pkg/glue/BUILD.bazel +++ b/br/pkg/glue/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//pkg/ddl", "//pkg/domain", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/sessionctx", "@com_github_fatih_color//:color", diff --git a/br/pkg/glue/glue.go b/br/pkg/glue/glue.go index 1895ee092cc78..d691d638169ab 100644 --- a/br/pkg/glue/glue.go +++ b/br/pkg/glue/glue.go @@ -8,7 +8,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" pd "github.com/tikv/pd/client" ) @@ -53,7 +54,7 @@ type Session interface { Execute(ctx context.Context, sql string) error ExecuteInternal(ctx context.Context, sql string, args ...any) error CreateDatabase(ctx context.Context, schema *model.DBInfo) error - CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo, + CreateTable(ctx context.Context, dbName pmodel.CIStr, table *model.TableInfo, cs ...ddl.CreateTableOption) error CreatePlacementPolicy(ctx context.Context, policy *model.PolicyInfo) error Close() diff --git a/br/pkg/gluetidb/BUILD.bazel b/br/pkg/gluetidb/BUILD.bazel index baebdba642728..66498201712b7 100644 --- a/br/pkg/gluetidb/BUILD.bazel +++ b/br/pkg/gluetidb/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//pkg/domain", "//pkg/executor", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session", "//pkg/session/types", @@ -33,6 +34,7 @@ go_test( flaky = True, deps = [ "//br/pkg/glue", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session", "//pkg/testkit", diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index 7963c14b0f5bb..cf1a876755c99 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -17,7 +17,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" sessiontypes "github.com/pingcap/tidb/pkg/session/types" "github.com/pingcap/tidb/pkg/sessionctx" @@ -246,7 +247,7 @@ func (gs *tidbSession) CreateTables(_ context.Context, } // CreateTable implements glue.Session. -func (gs *tidbSession) CreateTable(_ context.Context, dbName model.CIStr, +func (gs *tidbSession) CreateTable(_ context.Context, dbName pmodel.CIStr, table *model.TableInfo, cs ...ddl.CreateTableOption) error { return errors.Trace(executor.BRIECreateTable(gs.se, dbName, table, brComment, cs...)) } diff --git a/br/pkg/gluetidb/glue_test.go b/br/pkg/gluetidb/glue_test.go index 5c7eb6a0cb598..ceb43566f503b 100644 --- a/br/pkg/gluetidb/glue_test.go +++ b/br/pkg/gluetidb/glue_test.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/pingcap/tidb/br/pkg/glue" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" @@ -44,7 +45,7 @@ func TestTheSessionIsoation(t *testing.T) { }) require.NoError(t, glueSe.CreateDatabase(ctx, &model.DBInfo{ - Name: model.NewCIStr("test_db"), + Name: pmodel.NewCIStr("test_db"), })) tk := testkit.NewTestKit(t, store) tk.MustExec("use test_db") @@ -53,27 +54,27 @@ func TestTheSessionIsoation(t *testing.T) { req.NoError(glueSe.ExecuteInternal(ctx, "use test;")) infos := []*model.TableInfo{} infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_1"), + Name: pmodel.NewCIStr("tables_1"), Columns: []*model.ColumnInfo{ - {Name: model.NewCIStr("foo"), FieldType: *types.NewFieldType(types.KindBinaryLiteral), State: model.StatePublic}, + {Name: pmodel.NewCIStr("foo"), FieldType: *types.NewFieldType(types.KindBinaryLiteral), State: model.StatePublic}, }, }) infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_2"), + Name: pmodel.NewCIStr("tables_2"), PlacementPolicyRef: &model.PolicyRefInfo{ - Name: model.NewCIStr("threereplication"), + Name: pmodel.NewCIStr("threereplication"), }, Columns: []*model.ColumnInfo{ - {Name: model.NewCIStr("foo"), FieldType: *types.NewFieldType(types.KindBinaryLiteral), State: model.StatePublic}, + {Name: pmodel.NewCIStr("foo"), FieldType: *types.NewFieldType(types.KindBinaryLiteral), State: model.StatePublic}, }, }) infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_3"), + Name: pmodel.NewCIStr("tables_3"), PlacementPolicyRef: &model.PolicyRefInfo{ - Name: model.NewCIStr("fivereplication"), + Name: pmodel.NewCIStr("fivereplication"), }, Columns: []*model.ColumnInfo{ - {Name: model.NewCIStr("foo"), FieldType: *types.NewFieldType(types.KindBinaryLiteral), State: model.StatePublic}, + {Name: pmodel.NewCIStr("foo"), FieldType: *types.NewFieldType(types.KindBinaryLiteral), State: model.StatePublic}, }, }) polices := []*model.PolicyInfo{ @@ -81,13 +82,13 @@ func TestTheSessionIsoation(t *testing.T) { PlacementSettings: &model.PlacementSettings{ Followers: 4, }, - Name: model.NewCIStr("fivereplication"), + Name: pmodel.NewCIStr("fivereplication"), }, { PlacementSettings: &model.PlacementSettings{ Followers: 2, }, - Name: model.NewCIStr("threereplication"), + Name: pmodel.NewCIStr("threereplication"), }, } for _, pinfo := range polices { diff --git a/br/pkg/gluetidb/mock/BUILD.bazel b/br/pkg/gluetidb/mock/BUILD.bazel index 81dc56ba2b45f..e8a640a5839b1 100644 --- a/br/pkg/gluetidb/mock/BUILD.bazel +++ b/br/pkg/gluetidb/mock/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/ddl", "//pkg/domain", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session/types", "//pkg/sessionctx", diff --git a/br/pkg/gluetidb/mock/mock.go b/br/pkg/gluetidb/mock/mock.go index dc54f48ebda3a..6b4c792d69710 100644 --- a/br/pkg/gluetidb/mock/mock.go +++ b/br/pkg/gluetidb/mock/mock.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" sessiontypes "github.com/pingcap/tidb/pkg/session/types" "github.com/pingcap/tidb/pkg/sessionctx" pd "github.com/tikv/pd/client" @@ -85,7 +86,7 @@ func (*mockSession) CreateTables(_ context.Context, _ map[string][]*model.TableI } // CreateTable implements glue.Session. -func (*mockSession) CreateTable(_ context.Context, _ model.CIStr, +func (*mockSession) CreateTable(_ context.Context, _ pmodel.CIStr, _ *model.TableInfo, _ ...ddl.CreateTableOption) error { log.Fatal("unimplemented CreateDatabase for mock session") return nil diff --git a/br/pkg/metautil/BUILD.bazel b/br/pkg/metautil/BUILD.bazel index 7c8d8dc0214b0..44a35bc2b209c 100644 --- a/br/pkg/metautil/BUILD.bazel +++ b/br/pkg/metautil/BUILD.bazel @@ -14,7 +14,7 @@ go_library( "//br/pkg/logutil", "//br/pkg/storage", "//br/pkg/summary", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/statistics/handle", "//pkg/statistics/handle/types", "//pkg/statistics/handle/util", @@ -47,6 +47,7 @@ go_test( shard_count = 9, deps = [ "//br/pkg/storage", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/statistics/handle/types", "//pkg/statistics/handle/util", diff --git a/br/pkg/metautil/load.go b/br/pkg/metautil/load.go index e0c2ad717f3b3..ff9bc4336e131 100644 --- a/br/pkg/metautil/load.go +++ b/br/pkg/metautil/load.go @@ -18,7 +18,7 @@ import ( "context" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // Database wraps the schema and tables of a database. diff --git a/br/pkg/metautil/load_test.go b/br/pkg/metautil/load_test.go index 9a1f19ebdd92a..b0d0eb81e09d2 100644 --- a/br/pkg/metautil/load_test.go +++ b/br/pkg/metautil/load_test.go @@ -23,7 +23,8 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/encryptionpb" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/stretchr/testify/require" @@ -41,8 +42,8 @@ func TestLoadBackupMeta(t *testing.T) { store, err := storage.NewLocalStorage(testDir) require.NoError(t, err) - tblName := model.NewCIStr("t1") - dbName := model.NewCIStr("test") + tblName := pmodel.NewCIStr("t1") + dbName := pmodel.NewCIStr("test") tblID := int64(123) mockTbl := &model.TableInfo{ ID: tblID, @@ -118,8 +119,8 @@ func TestLoadBackupMetaPartionTable(t *testing.T) { store, err := storage.NewLocalStorage(testDir) require.NoError(t, err) - tblName := model.NewCIStr("t1") - dbName := model.NewCIStr("test") + tblName := pmodel.NewCIStr("t1") + dbName := pmodel.NewCIStr("test") tblID := int64(123) partID1 := int64(124) partID2 := int64(125) @@ -221,7 +222,7 @@ func TestLoadBackupMetaPartionTable(t *testing.T) { } func buildTableAndFiles(name string, tableID, fileCount int) (*model.TableInfo, []*backuppb.File) { - tblName := model.NewCIStr(name) + tblName := pmodel.NewCIStr(name) tblID := int64(tableID) mockTbl := &model.TableInfo{ ID: tblID, @@ -248,7 +249,7 @@ func buildBenchmarkBackupmeta(b *testing.B, dbName string, tableCount, fileCount mockDB := model.DBInfo{ ID: 1, - Name: model.NewCIStr(dbName), + Name: pmodel.NewCIStr(dbName), } mockDB.Deprecated.Tables = []*model.TableInfo{ mockTbl, diff --git a/br/pkg/metautil/metafile.go b/br/pkg/metautil/metafile.go index 03cc95ca1b5de..83542d2880e14 100644 --- a/br/pkg/metautil/metafile.go +++ b/br/pkg/metautil/metafile.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/tablecodec" tidbutil "github.com/pingcap/tidb/pkg/util" diff --git a/br/pkg/metautil/statsfile.go b/br/pkg/metautil/statsfile.go index c156932580a5c..6621a87cb6e14 100644 --- a/br/pkg/metautil/statsfile.go +++ b/br/pkg/metautil/statsfile.go @@ -26,7 +26,7 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/statistics/handle" statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types" statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util" diff --git a/br/pkg/mock/BUILD.bazel b/br/pkg/mock/BUILD.bazel index ce48625863b51..2839c1e50cd49 100644 --- a/br/pkg/mock/BUILD.bazel +++ b/br/pkg/mock/BUILD.bazel @@ -19,7 +19,7 @@ go_library( "//pkg/lightning/backend", "//pkg/lightning/backend/encode", "//pkg/lightning/verification", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/server", "//pkg/session", "//pkg/store/mockstore", diff --git a/br/pkg/mock/backend.go b/br/pkg/mock/backend.go index 7f38d950f6dfd..0da3733bb8558 100644 --- a/br/pkg/mock/backend.go +++ b/br/pkg/mock/backend.go @@ -16,7 +16,7 @@ import ( uuid "github.com/google/uuid" backend "github.com/pingcap/tidb/pkg/lightning/backend" encode "github.com/pingcap/tidb/pkg/lightning/backend/encode" - model "github.com/pingcap/tidb/pkg/parser/model" + model "github.com/pingcap/tidb/pkg/meta/model" gomock "go.uber.org/mock/gomock" ) diff --git a/br/pkg/restore/BUILD.bazel b/br/pkg/restore/BUILD.bazel index 3d52de515d143..0b571e4889f13 100644 --- a/br/pkg/restore/BUILD.bazel +++ b/br/pkg/restore/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "//pkg/domain", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/util", "@com_github_go_sql_driver_mysql//:mysql", diff --git a/br/pkg/restore/ingestrec/BUILD.bazel b/br/pkg/restore/ingestrec/BUILD.bazel index a69cde11f8290..e09029c3a8154 100644 --- a/br/pkg/restore/ingestrec/BUILD.bazel +++ b/br/pkg/restore/ingestrec/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/types", "@com_github_pingcap_errors//:errors", @@ -25,6 +26,7 @@ go_test( ":ingestrec", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session", "//pkg/store/mockstore", diff --git a/br/pkg/restore/ingestrec/ingest_recorder.go b/br/pkg/restore/ingestrec/ingest_recorder.go index 54a0dccd31d26..1ffa40063bf19 100644 --- a/br/pkg/restore/ingestrec/ingest_recorder.go +++ b/br/pkg/restore/ingestrec/ingest_recorder.go @@ -22,15 +22,16 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "go.uber.org/zap" ) // IngestIndexInfo records the information used to generate index drop/re-add SQL. type IngestIndexInfo struct { - SchemaName model.CIStr - TableName model.CIStr + SchemaName pmodel.CIStr + TableName pmodel.CIStr ColumnList string ColumnArgs []any IsPrimary bool diff --git a/br/pkg/restore/ingestrec/ingest_recorder_test.go b/br/pkg/restore/ingestrec/ingest_recorder_test.go index b6473f724cdbe..8cefa23108140 100644 --- a/br/pkg/restore/ingestrec/ingest_recorder_test.go +++ b/br/pkg/restore/ingestrec/ingest_recorder_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/br/pkg/restore/ingestrec" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/stretchr/testify/require" @@ -60,7 +61,7 @@ func getIndex(id int64, columnsName []string) *model.IndexInfo { columns := make([]*model.IndexColumn, 0, len(columnsName)) for _, columnName := range columnsName { columns = append(columns, &model.IndexColumn{ - Name: model.CIStr{ + Name: pmodel.CIStr{ O: columnName, L: columnName, }, @@ -68,7 +69,7 @@ func getIndex(id int64, columnsName []string) *model.IndexInfo { } return &model.IndexInfo{ ID: id, - Name: model.CIStr{ + Name: pmodel.CIStr{ O: columnsName[0], L: columnsName[0], // noused }, @@ -118,22 +119,22 @@ func TestAddIngestRecorder(t *testing.T) { createMeta(t, store, func(m *meta.Meta) { dbInfo := &model.DBInfo{ ID: 1, - Name: model.NewCIStr(SchemaName), + Name: pmodel.NewCIStr(SchemaName), State: model.StatePublic, } err := m.CreateDatabase(dbInfo) require.NoError(t, err) tblInfo := &model.TableInfo{ ID: TableID, - Name: model.NewCIStr(TableName), + Name: pmodel.NewCIStr(TableName), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Hidden: false, State: model.StatePublic, }, { - Name: model.NewCIStr("y"), + Name: pmodel.NewCIStr("y"), Hidden: false, State: model.StatePublic, }, @@ -141,22 +142,22 @@ func TestAddIngestRecorder(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("x"), - Table: model.NewCIStr(TableName), + Name: pmodel.NewCIStr("x"), + Table: pmodel.NewCIStr(TableName), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Offset: 0, Length: -1, }, { - Name: model.NewCIStr("y"), + Name: pmodel.NewCIStr("y"), Offset: 1, Length: -1, }, }, Comment: "123", - Tp: model.IndexTypeBtree, + Tp: pmodel.IndexTypeBtree, State: model.StatePublic, }, }, @@ -301,28 +302,28 @@ func TestIndexesKind(t *testing.T) { createMeta(t, store, func(m *meta.Meta) { dbInfo := &model.DBInfo{ ID: 1, - Name: model.NewCIStr(SchemaName), + Name: pmodel.NewCIStr(SchemaName), State: model.StatePublic, } err := m.CreateDatabase(dbInfo) require.NoError(t, err) tblInfo := &model.TableInfo{ ID: TableID, - Name: model.NewCIStr(TableName), + Name: pmodel.NewCIStr(TableName), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Hidden: false, State: model.StatePublic, }, { - Name: model.NewCIStr("_V$_x_0"), + Name: pmodel.NewCIStr("_V$_x_0"), Hidden: true, GeneratedExprString: "`x` * 2", State: model.StatePublic, }, { - Name: model.NewCIStr("z"), + Name: pmodel.NewCIStr("z"), Hidden: false, State: model.StatePublic, }, @@ -330,27 +331,27 @@ func TestIndexesKind(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("x"), - Table: model.NewCIStr(TableName), + Name: pmodel.NewCIStr("x"), + Table: pmodel.NewCIStr(TableName), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Offset: 0, Length: -1, }, { - Name: model.NewCIStr("_V$_x_0"), + Name: pmodel.NewCIStr("_V$_x_0"), Offset: 1, Length: -1, }, { - Name: model.NewCIStr("z"), + Name: pmodel.NewCIStr("z"), Offset: 2, Length: 4, }, }, Comment: "123", - Tp: model.IndexTypeHash, + Tp: pmodel.IndexTypeHash, Invisible: true, State: model.StatePublic, }, @@ -394,7 +395,7 @@ func TestIndexesKind(t *testing.T) { require.Equal(t, 1, count) require.Equal(t, TableID, tableID) require.Equal(t, int64(1), indexID) - require.Equal(t, model.NewCIStr(SchemaName), info.SchemaName) + require.Equal(t, pmodel.NewCIStr(SchemaName), info.SchemaName) require.Equal(t, "%n,(`x` * 2),%n(4)", info.ColumnList) require.Equal(t, []any{"x", "z"}, info.ColumnArgs) require.Equal(t, TableName, info.IndexInfo.Table.O) @@ -410,22 +411,22 @@ func TestRewriteTableID(t *testing.T) { createMeta(t, store, func(m *meta.Meta) { dbInfo := &model.DBInfo{ ID: 1, - Name: model.NewCIStr(SchemaName), + Name: pmodel.NewCIStr(SchemaName), State: model.StatePublic, } err := m.CreateDatabase(dbInfo) require.NoError(t, err) tblInfo := &model.TableInfo{ ID: TableID, - Name: model.NewCIStr(TableName), + Name: pmodel.NewCIStr(TableName), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Hidden: false, State: model.StatePublic, }, { - Name: model.NewCIStr("y"), + Name: pmodel.NewCIStr("y"), Hidden: false, State: model.StatePublic, }, @@ -433,22 +434,22 @@ func TestRewriteTableID(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("x"), - Table: model.NewCIStr(TableName), + Name: pmodel.NewCIStr("x"), + Table: pmodel.NewCIStr(TableName), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Offset: 0, Length: -1, }, { - Name: model.NewCIStr("y"), + Name: pmodel.NewCIStr("y"), Offset: 1, Length: -1, }, }, Comment: "123", - Tp: model.IndexTypeBtree, + Tp: pmodel.IndexTypeBtree, State: model.StatePublic, }, }, diff --git a/br/pkg/restore/internal/prealloc_db/BUILD.bazel b/br/pkg/restore/internal/prealloc_db/BUILD.bazel index f6721bde62373..c4fb560ec9010 100644 --- a/br/pkg/restore/internal/prealloc_db/BUILD.bazel +++ b/br/pkg/restore/internal/prealloc_db/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//br/pkg/utils", "//pkg/ddl", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx/variable", @@ -39,6 +40,7 @@ go_test( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/types", diff --git a/br/pkg/restore/internal/prealloc_db/db.go b/br/pkg/restore/internal/prealloc_db/db.go index 8664771b486f9..35d4758abf868 100644 --- a/br/pkg/restore/internal/prealloc_db/db.go +++ b/br/pkg/restore/internal/prealloc_db/db.go @@ -16,7 +16,8 @@ import ( "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "go.uber.org/zap" @@ -85,7 +86,7 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { } return errors.Trace(err) case model.ActionCreateTable: - err = db.se.CreateTable(ctx, model.NewCIStr(ddlJob.SchemaName), tableInfo) + err = db.se.CreateTable(ctx, pmodel.NewCIStr(ddlJob.SchemaName), tableInfo) if err != nil { log.Error("create table failed", zap.Stringer("db", dbInfo.Name), @@ -378,7 +379,7 @@ func (db *DB) Close() { db.se.Close() } -func (db *DB) ensurePlacementPolicy(ctx context.Context, policyName model.CIStr, policies *sync.Map) error { +func (db *DB) ensurePlacementPolicy(ctx context.Context, policyName pmodel.CIStr, policies *sync.Map) error { if policies == nil { return nil } diff --git a/br/pkg/restore/internal/prealloc_db/db_test.go b/br/pkg/restore/internal/prealloc_db/db_test.go index f767d05ca1cdc..091cde985cbe9 100644 --- a/br/pkg/restore/internal/prealloc_db/db_test.go +++ b/br/pkg/restore/internal/prealloc_db/db_test.go @@ -20,7 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/testkit" @@ -46,9 +47,9 @@ func TestRestoreAutoIncID(t *testing.T) { // Get schemas of db and table info, err := s.Mock.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoErrorf(t, err, "Error get snapshot info schema: %s", err) - dbInfo, exists := info.SchemaByName(model.NewCIStr("test")) + dbInfo, exists := info.SchemaByName(pmodel.NewCIStr("test")) require.Truef(t, exists, "Error get db info") - tableInfo, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("\"t\"")) + tableInfo, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("\"t\"")) require.NoErrorf(t, err, "Error get table info: %s", err) table := metautil.Table{ Info: tableInfo.Meta(), @@ -181,12 +182,12 @@ func prepareAllocTables( info, err := dom.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbInfo, exists := info.SchemaByName(model.NewCIStr("test")) + dbInfo, exists := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) tableInfos = make([]*metautil.Table, 0, 4) for i := 1; i <= len(createTableSQLs); i += 1 { tableName := fmt.Sprintf("t%d", i) - tableInfo, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr(tableName)) + tableInfo, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr(tableName)) require.NoError(t, err) tableInfos = append(tableInfos, &metautil.Table{ DB: dbInfo.Clone(), @@ -220,7 +221,7 @@ func cloneTableInfos( for i := int64(0); i < int64(len(createTableSQLs)); i += 1 { newTableInfo := originTableInfos[i].Info.Clone() newTableInfo.ID = id + i + 1 - newTableInfo.Name = model.NewCIStr(fmt.Sprintf("%s%d", prefix, i+1)) + newTableInfo.Name = pmodel.NewCIStr(fmt.Sprintf("%s%d", prefix, i+1)) tableInfos = append(tableInfos, &metautil.Table{ DB: originTableInfos[i].DB.Clone(), Info: newTableInfo, @@ -239,7 +240,7 @@ func fakePolicyInfo(ident byte) *model.PolicyInfo { id := int64(ident) uid := uint64(ident) str := string(ident) - cistr := model.NewCIStr(str) + cistr := pmodel.NewCIStr(str) return &model.PolicyInfo{ PlacementSettings: &model.PlacementSettings{ Followers: uid, @@ -317,7 +318,7 @@ func TestPolicyMode(t *testing.T) { policyMap.Store(fakepolicy1.Name.L, fakepolicy1) err = db.CreateDatabase(ctx, &model.DBInfo{ ID: 20000, - Name: model.NewCIStr("test_db"), + Name: pmodel.NewCIStr("test_db"), Charset: "utf8mb4", Collate: "utf8mb4_bin", State: model.StatePublic, @@ -348,7 +349,7 @@ func TestUpdateMetaVersion(t *testing.T) { db.Session().Execute(ctx, "insert into test.t values (1),(2),(3);") info, err := s.Mock.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - tableInfo, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tableInfo, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) restoreTS := uint64(0) ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) @@ -374,7 +375,7 @@ func TestCreateTablesInDb(t *testing.T) { info, err := s.Mock.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoErrorf(t, err, "Error get snapshot info schema: %s", err) - dbSchema, isExist := info.SchemaByName(model.NewCIStr("test")) + dbSchema, isExist := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, isExist) tables := make([]*metautil.Table, 4) @@ -386,10 +387,10 @@ func TestCreateTablesInDb(t *testing.T) { DB: dbSchema, Info: &model.TableInfo{ ID: int64(i), - Name: model.NewCIStr("test" + strconv.Itoa(i)), + Name: pmodel.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ ID: 1, - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *intField, State: model.StatePublic, }}, @@ -426,17 +427,17 @@ func TestDDLJobMap(t *testing.T) { info, err := s.Mock.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbInfo, exists := info.SchemaByName(model.NewCIStr("test")) + dbInfo, exists := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) - tableInfo1, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tableInfo1, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) - tableInfo2, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tableInfo2, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) - tableInfo3, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tableInfo3, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) - tableInfo4, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t4")) + tableInfo4, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t4")) require.NoError(t, err) - tableInfo5, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t5")) + tableInfo5, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t5")) require.NoError(t, err) toBeCorrectedTables := map[restore.UniqueTableName]bool{ @@ -500,7 +501,7 @@ func TestDB_ExecDDL2(t *testing.T) { BinlogInfo: &model.HistoryInfo{ DBInfo: &model.DBInfo{ ID: 20000, - Name: model.NewCIStr("test_db"), + Name: pmodel.NewCIStr("test_db"), Charset: "utf8mb4", Collate: "utf8mb4_bin", State: model.StatePublic, @@ -514,13 +515,13 @@ func TestDB_ExecDDL2(t *testing.T) { BinlogInfo: &model.HistoryInfo{ TableInfo: &model.TableInfo{ ID: 20000, - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), Charset: "utf8mb4", Collate: "utf8mb4_bin", Columns: []*model.ColumnInfo{ { ID: 1, - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *fieldType, State: model.StatePublic, Version: 2, @@ -563,9 +564,9 @@ func TestCreateTableConsistent(t *testing.T) { getTableInfo := func(name string) (*model.DBInfo, *model.TableInfo) { info, err := s.Mock.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbInfo, exists := info.SchemaByName(model.NewCIStr("test")) + dbInfo, exists := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) - tableInfo, err := info.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr(name)) + tableInfo, err := info.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr(name)) require.NoError(t, err) return dbInfo, tableInfo.Meta() } diff --git a/br/pkg/restore/internal/prealloc_table_id/BUILD.bazel b/br/pkg/restore/internal/prealloc_table_id/BUILD.bazel index c19b294e2bf16..719d93152fc9b 100644 --- a/br/pkg/restore/internal/prealloc_table_id/BUILD.bazel +++ b/br/pkg/restore/internal/prealloc_table_id/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//br/pkg/restore:__subpackages__"], deps = [ "//br/pkg/metautil", - "//pkg/parser/model", + "//pkg/meta/model", ], ) @@ -19,7 +19,7 @@ go_test( deps = [ ":prealloc_table_id", "//br/pkg/metautil", - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_stretchr_testify//require", ], ) diff --git a/br/pkg/restore/internal/prealloc_table_id/alloc.go b/br/pkg/restore/internal/prealloc_table_id/alloc.go index bafccac362f6f..e224f721afccb 100644 --- a/br/pkg/restore/internal/prealloc_table_id/alloc.go +++ b/br/pkg/restore/internal/prealloc_table_id/alloc.go @@ -7,7 +7,7 @@ import ( "math" "github.com/pingcap/tidb/br/pkg/metautil" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) const ( diff --git a/br/pkg/restore/internal/prealloc_table_id/alloc_test.go b/br/pkg/restore/internal/prealloc_table_id/alloc_test.go index b1e7de8978cf6..0c08e40fdf231 100644 --- a/br/pkg/restore/internal/prealloc_table_id/alloc_test.go +++ b/br/pkg/restore/internal/prealloc_table_id/alloc_test.go @@ -8,7 +8,7 @@ import ( "github.com/pingcap/tidb/br/pkg/metautil" prealloctableid "github.com/pingcap/tidb/br/pkg/restore/internal/prealloc_table_id" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/stretchr/testify/require" ) diff --git a/br/pkg/restore/log_client/BUILD.bazel b/br/pkg/restore/log_client/BUILD.bazel index 535bf803b46c5..2351e2908ea64 100644 --- a/br/pkg/restore/log_client/BUILD.bazel +++ b/br/pkg/restore/log_client/BUILD.bazel @@ -38,7 +38,7 @@ go_library( "//pkg/domain", "//pkg/kv", "//pkg/meta", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/util", "//pkg/util/codec", "//pkg/util/redact", diff --git a/br/pkg/restore/log_client/client.go b/br/pkg/restore/log_client/client.go index 52f15354d8e83..6c4666725ef4f 100644 --- a/br/pkg/restore/log_client/client.go +++ b/br/pkg/restore/log_client/client.go @@ -58,7 +58,7 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" tidbutil "github.com/pingcap/tidb/pkg/util" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/tikv/client-go/v2/config" diff --git a/br/pkg/restore/misc.go b/br/pkg/restore/misc.go index 62d7fbc32fdb4..ca66eae720347 100644 --- a/br/pkg/restore/misc.go +++ b/br/pkg/restore/misc.go @@ -28,7 +28,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/tikv/client-go/v2/oracle" pd "github.com/tikv/pd/client" @@ -58,8 +59,8 @@ func TransferBoolToValue(enable bool) string { // GetTableSchema returns the schema of a table from TiDB. func GetTableSchema( dom *domain.Domain, - dbName model.CIStr, - tableName model.CIStr, + dbName pmodel.CIStr, + tableName pmodel.CIStr, ) (*model.TableInfo, error) { info := dom.InfoSchema() table, err := info.TableByName(context.Background(), dbName, tableName) diff --git a/br/pkg/restore/snap_client/BUILD.bazel b/br/pkg/restore/snap_client/BUILD.bazel index 6bf12771e5ce0..3dbb1260c46e0 100644 --- a/br/pkg/restore/snap_client/BUILD.bazel +++ b/br/pkg/restore/snap_client/BUILD.bazel @@ -40,6 +40,7 @@ go_library( "//pkg/domain/infosync", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/tablecodec", @@ -96,6 +97,7 @@ go_test( "//br/pkg/utils", "//br/pkg/utiltest", "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/session", diff --git a/br/pkg/restore/snap_client/client.go b/br/pkg/restore/snap_client/client.go index 0cf20cf42baee..d7c5a1d599bcb 100644 --- a/br/pkg/restore/snap_client/client.go +++ b/br/pkg/restore/snap_client/client.go @@ -52,7 +52,7 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/redact" kvutil "github.com/tikv/client-go/v2/util" diff --git a/br/pkg/restore/snap_client/client_test.go b/br/pkg/restore/snap_client/client_test.go index b43324d9fca64..380e4421b68fd 100644 --- a/br/pkg/restore/snap_client/client_test.go +++ b/br/pkg/restore/snap_client/client_test.go @@ -35,7 +35,8 @@ import ( importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" "github.com/pingcap/tidb/br/pkg/utiltest" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" @@ -53,7 +54,7 @@ func TestCreateTables(t *testing.T) { info, err := m.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbSchema, isExist := info.SchemaByName(model.NewCIStr("test")) + dbSchema, isExist := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, isExist) client.SetBatchDdlSize(1) @@ -65,10 +66,10 @@ func TestCreateTables(t *testing.T) { DB: dbSchema, Info: &model.TableInfo{ ID: int64(i), - Name: model.NewCIStr("test" + strconv.Itoa(i)), + Name: pmodel.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ ID: 1, - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *intField, State: model.StatePublic, }}, @@ -155,7 +156,7 @@ func TestCheckTargetClusterFresh(t *testing.T) { ctx := context.Background() require.NoError(t, client.CheckTargetClusterFresh(ctx)) - require.NoError(t, client.CreateDatabases(ctx, []*metautil.Database{{Info: &model.DBInfo{Name: model.NewCIStr("user_db")}}})) + require.NoError(t, client.CreateDatabases(ctx, []*metautil.Database{{Info: &model.DBInfo{Name: pmodel.NewCIStr("user_db")}}})) require.True(t, berrors.ErrRestoreNotFreshCluster.Equal(client.CheckTargetClusterFresh(ctx))) } @@ -172,7 +173,7 @@ func TestCheckTargetClusterFreshWithTable(t *testing.T) { ctx := context.Background() info, err := cluster.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbSchema, isExist := info.SchemaByName(model.NewCIStr("test")) + dbSchema, isExist := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, isExist) intField := types.NewFieldType(mysql.TypeLong) intField.SetCharset("binary") @@ -180,10 +181,10 @@ func TestCheckTargetClusterFreshWithTable(t *testing.T) { DB: dbSchema, Info: &model.TableInfo{ ID: int64(1), - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), Columns: []*model.ColumnInfo{{ ID: 1, - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *intField, State: model.StatePublic, }}, diff --git a/br/pkg/restore/snap_client/export_test.go b/br/pkg/restore/snap_client/export_test.go index 27f48efb8a4eb..74ddcf9ae8600 100644 --- a/br/pkg/restore/snap_client/export_test.go +++ b/br/pkg/restore/snap_client/export_test.go @@ -24,7 +24,7 @@ import ( importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" tidbutil "github.com/pingcap/tidb/pkg/util" "golang.org/x/exp/slices" ) diff --git a/br/pkg/restore/snap_client/pipeline_items.go b/br/pkg/restore/snap_client/pipeline_items.go index d31b4db6be738..f10b2a25b9f3b 100644 --- a/br/pkg/restore/snap_client/pipeline_items.go +++ b/br/pkg/restore/snap_client/pipeline_items.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/engine" pdhttp "github.com/tikv/pd/client/http" diff --git a/br/pkg/restore/snap_client/placement_rule_manager_test.go b/br/pkg/restore/snap_client/placement_rule_manager_test.go index 8ff29e6dc0aa6..c078ebd6e48c4 100644 --- a/br/pkg/restore/snap_client/placement_rule_manager_test.go +++ b/br/pkg/restore/snap_client/placement_rule_manager_test.go @@ -26,7 +26,8 @@ import ( snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/utiltest" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" @@ -40,8 +41,8 @@ func generateTables() []*snapclient.CreatedTable { ID: 1, }, OldTable: &metautil.Table{ - DB: &model.DBInfo{Name: model.NewCIStr("test")}, - Info: &model.TableInfo{Name: model.NewCIStr("t1")}, + DB: &model.DBInfo{Name: pmodel.NewCIStr("test")}, + Info: &model.TableInfo{Name: pmodel.NewCIStr("t1")}, }, }, { @@ -49,8 +50,8 @@ func generateTables() []*snapclient.CreatedTable { ID: 100, }, OldTable: &metautil.Table{ - DB: &model.DBInfo{Name: model.NewCIStr("test")}, - Info: &model.TableInfo{Name: model.NewCIStr("t100")}, + DB: &model.DBInfo{Name: pmodel.NewCIStr("test")}, + Info: &model.TableInfo{Name: pmodel.NewCIStr("t100")}, }, }, } diff --git a/br/pkg/restore/snap_client/systable_restore.go b/br/pkg/restore/snap_client/systable_restore.go index a99d9925b1ab2..fdc1b88783967 100644 --- a/br/pkg/restore/snap_client/systable_restore.go +++ b/br/pkg/restore/snap_client/systable_restore.go @@ -16,7 +16,8 @@ import ( "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/bindinfo" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" filter "github.com/pingcap/tidb/pkg/util/table-filter" "go.uber.org/multierr" @@ -159,20 +160,20 @@ func (rc *SnapClient) restoreSystemSchema(ctx context.Context, f filter.Filter, // For fast querying whether a table exists and the temporary database of it. type database struct { ExistingTables map[string]*model.TableInfo - Name model.CIStr - TemporaryName model.CIStr + Name pmodel.CIStr + TemporaryName pmodel.CIStr } // getSystemDatabaseByName make a record of a system database, such as mysql and sys, from info schema by its name. func (rc *SnapClient) getSystemDatabaseByName(ctx context.Context, name string) (*database, bool, error) { infoSchema := rc.dom.InfoSchema() - schema, ok := infoSchema.SchemaByName(model.NewCIStr(name)) + schema, ok := infoSchema.SchemaByName(pmodel.NewCIStr(name)) if !ok { return nil, false, nil } db := &database{ ExistingTables: map[string]*model.TableInfo{}, - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), TemporaryName: utils.TemporaryDBName(name), } // It's OK to get all the tables from system tables. @@ -315,7 +316,7 @@ func CheckSysTableCompatibility(dom *domain.Domain, tables []*metautil.Table) er privilegeTablesInBackup = append(privilegeTablesInBackup, table) } } - sysDB := model.NewCIStr(mysql.SystemDB) + sysDB := pmodel.NewCIStr(mysql.SystemDB) for _, table := range privilegeTablesInBackup { ti, err := restore.GetTableSchema(dom, sysDB, table.Info.Name) if err != nil { diff --git a/br/pkg/restore/snap_client/systable_restore_test.go b/br/pkg/restore/snap_client/systable_restore_test.go index 4c0c62b1ba674..adfa4231e03af 100644 --- a/br/pkg/restore/snap_client/systable_restore_test.go +++ b/br/pkg/restore/snap_client/systable_restore_test.go @@ -25,7 +25,8 @@ import ( snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/utiltest" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/session" "github.com/stretchr/testify/require" @@ -40,17 +41,17 @@ func TestCheckSysTableCompatibility(t *testing.T) { info, err := cluster.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbSchema, isExist := info.SchemaByName(model.NewCIStr(mysql.SystemDB)) + dbSchema, isExist := info.SchemaByName(pmodel.NewCIStr(mysql.SystemDB)) require.True(t, isExist) tmpSysDB := dbSchema.Clone() tmpSysDB.Name = utils.TemporaryDBName(mysql.SystemDB) - sysDB := model.NewCIStr(mysql.SystemDB) - userTI, err := restore.GetTableSchema(cluster.Domain, sysDB, model.NewCIStr("user")) + sysDB := pmodel.NewCIStr(mysql.SystemDB) + userTI, err := restore.GetTableSchema(cluster.Domain, sysDB, pmodel.NewCIStr("user")) require.NoError(t, err) // user table in cluster have more columns(success) mockedUserTI := userTI.Clone() - userTI.Columns = append(userTI.Columns, &model.ColumnInfo{Name: model.NewCIStr("new-name")}) + userTI.Columns = append(userTI.Columns, &model.ColumnInfo{Name: pmodel.NewCIStr("new-name")}) err = snapclient.CheckSysTableCompatibility(cluster.Domain, []*metautil.Table{{ DB: tmpSysDB, Info: mockedUserTI, @@ -60,7 +61,7 @@ func TestCheckSysTableCompatibility(t *testing.T) { // user table in cluster have less columns(failed) mockedUserTI = userTI.Clone() - mockedUserTI.Columns = append(mockedUserTI.Columns, &model.ColumnInfo{Name: model.NewCIStr("new-name")}) + mockedUserTI.Columns = append(mockedUserTI.Columns, &model.ColumnInfo{Name: pmodel.NewCIStr("new-name")}) err = snapclient.CheckSysTableCompatibility(cluster.Domain, []*metautil.Table{{ DB: tmpSysDB, Info: mockedUserTI, @@ -94,12 +95,12 @@ func TestCheckSysTableCompatibility(t *testing.T) { require.NoError(t, err) // use the mysql.db table to test for column count mismatch. - dbTI, err := restore.GetTableSchema(cluster.Domain, sysDB, model.NewCIStr("db")) + dbTI, err := restore.GetTableSchema(cluster.Domain, sysDB, pmodel.NewCIStr("db")) require.NoError(t, err) // other system tables in cluster have more columns(failed) mockedDBTI := dbTI.Clone() - dbTI.Columns = append(dbTI.Columns, &model.ColumnInfo{Name: model.NewCIStr("new-name")}) + dbTI.Columns = append(dbTI.Columns, &model.ColumnInfo{Name: pmodel.NewCIStr("new-name")}) err = snapclient.CheckSysTableCompatibility(cluster.Domain, []*metautil.Table{{ DB: tmpSysDB, Info: mockedDBTI, diff --git a/br/pkg/restore/tiflashrec/BUILD.bazel b/br/pkg/restore/tiflashrec/BUILD.bazel index 44e9ece230e8a..07791c626ec1e 100644 --- a/br/pkg/restore/tiflashrec/BUILD.bazel +++ b/br/pkg/restore/tiflashrec/BUILD.bazel @@ -9,9 +9,9 @@ go_library( "//br/pkg/logutil", "//br/pkg/utils", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/format", - "//pkg/parser/model", "@com_github_pingcap_log//:log", "@org_uber_go_zap//:zap", ], @@ -26,6 +26,7 @@ go_test( deps = [ ":tiflashrec", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/model", "@com_github_stretchr_testify//require", ], diff --git a/br/pkg/restore/tiflashrec/tiflash_recorder.go b/br/pkg/restore/tiflashrec/tiflash_recorder.go index 8add1bb8f3849..c87f0372f86a6 100644 --- a/br/pkg/restore/tiflashrec/tiflash_recorder.go +++ b/br/pkg/restore/tiflashrec/tiflash_recorder.go @@ -23,9 +23,9 @@ import ( "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" "go.uber.org/zap" ) diff --git a/br/pkg/restore/tiflashrec/tiflash_recorder_test.go b/br/pkg/restore/tiflashrec/tiflash_recorder_test.go index b729fd97ea4ec..532462f181eba 100644 --- a/br/pkg/restore/tiflashrec/tiflash_recorder_test.go +++ b/br/pkg/restore/tiflashrec/tiflash_recorder_test.go @@ -20,7 +20,8 @@ import ( "github.com/pingcap/tidb/br/pkg/restore/tiflashrec" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -136,7 +137,7 @@ func TestGenSql(t *testing.T) { tInfo := func(id int, name string) *model.TableInfo { return &model.TableInfo{ ID: int64(id), - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), } } fakeInfo := infoschema.MockInfoSchema([]*model.TableInfo{ @@ -175,7 +176,7 @@ func TestGenResetSql(t *testing.T) { tInfo := func(id int, name string) *model.TableInfo { return &model.TableInfo{ ID: int64(id), - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), } } fakeInfo := infoschema.MockInfoSchema([]*model.TableInfo{ diff --git a/br/pkg/restore/utils/BUILD.bazel b/br/pkg/restore/utils/BUILD.bazel index 8b0376d89c641..a40f0a883ae09 100644 --- a/br/pkg/restore/utils/BUILD.bazel +++ b/br/pkg/restore/utils/BUILD.bazel @@ -13,7 +13,7 @@ go_library( "//br/pkg/errors", "//br/pkg/logutil", "//br/pkg/rtree", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/tablecodec", "//pkg/util/codec", "//pkg/util/redact", @@ -41,6 +41,7 @@ go_test( "//br/pkg/errors", "//br/pkg/rtree", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/sessionctx/stmtctx", "//pkg/tablecodec", diff --git a/br/pkg/restore/utils/misc.go b/br/pkg/restore/utils/misc.go index ba146de5a53d4..2135206531ce6 100644 --- a/br/pkg/restore/utils/misc.go +++ b/br/pkg/restore/utils/misc.go @@ -15,7 +15,7 @@ package utils import ( - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/codec" ) diff --git a/br/pkg/restore/utils/rewrite_rule.go b/br/pkg/restore/utils/rewrite_rule.go index 5d9878fd3f689..053d5550766a1 100644 --- a/br/pkg/restore/utils/rewrite_rule.go +++ b/br/pkg/restore/utils/rewrite_rule.go @@ -24,7 +24,7 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/rtree" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/redact" diff --git a/br/pkg/restore/utils/rewrite_rule_test.go b/br/pkg/restore/utils/rewrite_rule_test.go index dc8fcd68be30d..a8a985f1d0e20 100644 --- a/br/pkg/restore/utils/rewrite_rule_test.go +++ b/br/pkg/restore/utils/rewrite_rule_test.go @@ -25,7 +25,8 @@ import ( "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" @@ -299,22 +300,22 @@ func generateRewriteTableInfos() (newTableInfo, oldTableInfo *model.TableInfo) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("i1"), + Name: pmodel.NewCIStr("i1"), }, { ID: 2, - Name: model.NewCIStr("i2"), + Name: pmodel.NewCIStr("i2"), }, }, Partition: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ { ID: 100, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, { ID: 200, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), }, }, }, @@ -324,22 +325,22 @@ func generateRewriteTableInfos() (newTableInfo, oldTableInfo *model.TableInfo) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("i1"), + Name: pmodel.NewCIStr("i1"), }, { ID: 2, - Name: model.NewCIStr("i2"), + Name: pmodel.NewCIStr("i2"), }, }, Partition: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ { ID: 101, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, { ID: 201, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), }, }, }, diff --git a/br/pkg/stream/BUILD.bazel b/br/pkg/stream/BUILD.bazel index 87f21f801044b..aff6b7ee2a8e2 100644 --- a/br/pkg/stream/BUILD.bazel +++ b/br/pkg/stream/BUILD.bazel @@ -26,7 +26,7 @@ go_library( "//pkg/ddl", "//pkg/kv", "//pkg/meta", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/tablecodec", "//pkg/util", "//pkg/util/codec", @@ -65,6 +65,7 @@ go_test( "//br/pkg/streamhelper", "//pkg/ddl", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/br/pkg/stream/rewrite_meta_rawkv.go b/br/pkg/stream/rewrite_meta_rawkv.go index 5cdfaa2975eba..e0b7ac6252958 100644 --- a/br/pkg/stream/rewrite_meta_rawkv.go +++ b/br/pkg/stream/rewrite_meta_rawkv.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" filter "github.com/pingcap/tidb/pkg/util/table-filter" "go.uber.org/zap" ) diff --git a/br/pkg/stream/rewrite_meta_rawkv_test.go b/br/pkg/stream/rewrite_meta_rawkv_test.go index 040304cafdf18..0c904ab18d489 100644 --- a/br/pkg/stream/rewrite_meta_rawkv_test.go +++ b/br/pkg/stream/rewrite_meta_rawkv_test.go @@ -10,8 +10,9 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" @@ -48,7 +49,7 @@ func MockEmptySchemasReplace(midr *mockInsertDeleteRange, dbMap map[UpstreamID]* func produceDBInfoValue(dbName string, dbID int64) ([]byte, error) { dbInfo := model.DBInfo{ ID: dbID, - Name: model.NewCIStr(dbName), + Name: pmodel.NewCIStr(dbName), } return json.Marshal(&dbInfo) } @@ -56,7 +57,7 @@ func produceDBInfoValue(dbName string, dbID int64) ([]byte, error) { func produceTableInfoValue(tableName string, tableID int64) ([]byte, error) { tableInfo := model.TableInfo{ ID: tableID, - Name: model.NewCIStr(tableName), + Name: pmodel.NewCIStr(tableName), } return json.Marshal(&tableInfo) @@ -351,11 +352,11 @@ func TestRewriteTableInfoForPartitionTable(t *testing.T) { // create tableinfo. pt1 := model.PartitionDefinition{ ID: pt1ID, - Name: model.NewCIStr(pt1Name), + Name: pmodel.NewCIStr(pt1Name), } pt2 := model.PartitionDefinition{ ID: pt2ID, - Name: model.NewCIStr(pt2Name), + Name: pmodel.NewCIStr(pt2Name), } pi := model.PartitionInfo{ @@ -367,7 +368,7 @@ func TestRewriteTableInfoForPartitionTable(t *testing.T) { tbl := model.TableInfo{ ID: tableID, - Name: model.NewCIStr(tableName), + Name: pmodel.NewCIStr(tableName), Partition: &pi, } value, err := json.Marshal(&tbl) @@ -451,11 +452,11 @@ func TestRewriteTableInfoForExchangePartition(t *testing.T) { // construct table t1 with the partition pi(pt1, pt2). pt1 := model.PartitionDefinition{ ID: pt1ID, - Name: model.NewCIStr(pt1Name), + Name: pmodel.NewCIStr(pt1Name), } pt2 := model.PartitionDefinition{ ID: pt2ID, - Name: model.NewCIStr(pt2Name), + Name: pmodel.NewCIStr(pt2Name), } pi := model.PartitionInfo{ @@ -465,7 +466,7 @@ func TestRewriteTableInfoForExchangePartition(t *testing.T) { pi.Definitions = append(pi.Definitions, pt1, pt2) t1 := model.TableInfo{ ID: tableID1, - Name: model.NewCIStr(tableName1), + Name: pmodel.NewCIStr(tableName1), Partition: &pi, } db1 := model.DBInfo{} @@ -473,7 +474,7 @@ func TestRewriteTableInfoForExchangePartition(t *testing.T) { // construct table t2 without partition. t2 := model.TableInfo{ ID: tableID2, - Name: model.NewCIStr(tableName2), + Name: pmodel.NewCIStr(tableName2), } db2 := model.DBInfo{} @@ -537,16 +538,16 @@ func TestRewriteTableInfoForTTLTable(t *testing.T) { tbl := model.TableInfo{ ID: tableID, - Name: model.NewCIStr(tableName), + Name: pmodel.NewCIStr(tableName), Columns: []*model.ColumnInfo{ { ID: colID, - Name: model.NewCIStr(colName), + Name: pmodel.NewCIStr(colName), FieldType: *types.NewFieldType(mysql.TypeTimestamp), }, }, TTLInfo: &model.TTLInfo{ - ColumnName: model.NewCIStr(colName), + ColumnName: pmodel.NewCIStr(colName), IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay), Enable: true, diff --git a/br/pkg/stream/stream_mgr.go b/br/pkg/stream/stream_mgr.go index 4acdaa6faf650..d53a66b7f1416 100644 --- a/br/pkg/stream/stream_mgr.go +++ b/br/pkg/stream/stream_mgr.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util" filter "github.com/pingcap/tidb/pkg/util/table-filter" diff --git a/br/pkg/task/BUILD.bazel b/br/pkg/task/BUILD.bazel index 545409bd37629..89bbcfac685b3 100644 --- a/br/pkg/task/BUILD.bazel +++ b/br/pkg/task/BUILD.bazel @@ -53,6 +53,7 @@ go_library( "//pkg/domain", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx/stmtctx", @@ -127,6 +128,7 @@ go_test( "//br/pkg/utiltest", "//pkg/config", "//pkg/ddl", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/statistics/handle/util", diff --git a/br/pkg/task/common.go b/br/pkg/task/common.go index a38a90f596671..94ef67364c376 100644 --- a/br/pkg/task/common.go +++ b/br/pkg/task/common.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/spf13/cobra" diff --git a/br/pkg/task/config_test.go b/br/pkg/task/config_test.go index 85532f019863d..bfb08fff2afd6 100644 --- a/br/pkg/task/config_test.go +++ b/br/pkg/task/config_test.go @@ -29,7 +29,8 @@ import ( snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/stretchr/testify/require" @@ -205,12 +206,12 @@ func mockReadSchemasFromBackupMeta(t *testing.T, db2Tables map[string][]string) mockSchemas := make([]*backuppb.Schema, 0) var dbID int64 = 1 for db, tables := range db2Tables { - dbName := model.NewCIStr(db) + dbName := pmodel.NewCIStr(db) mockTblList := make([]*model.TableInfo, 0) tblBytesList, statsBytesList := make([][]byte, 0), make([][]byte, 0) for i, table := range tables { - tblName := model.NewCIStr(table) + tblName := pmodel.NewCIStr(table) mockTbl := &model.TableInfo{ ID: dbID*100 + int64(i), Name: tblName, diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index e0447b79c0a02..e0cc7b55b539c 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -35,7 +35,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/engine" "github.com/spf13/cobra" @@ -1516,7 +1517,7 @@ func PreCheckTableClusterIndex( if job.Type == model.ActionCreateTable { tableInfo := job.BinlogInfo.TableInfo if tableInfo != nil { - oldTableInfo, err := restore.GetTableSchema(dom, model.NewCIStr(job.SchemaName), tableInfo.Name) + oldTableInfo, err := restore.GetTableSchema(dom, pmodel.NewCIStr(job.SchemaName), tableInfo.Name) // table exists in database if err == nil { if tableInfo.IsCommonHandle != oldTableInfo.IsCommonHandle { diff --git a/br/pkg/task/restore_test.go b/br/pkg/task/restore_test.go index 10ac59f7b0932..4713e5a540ab7 100644 --- a/br/pkg/task/restore_test.go +++ b/br/pkg/task/restore_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/br/pkg/task" utiltest "github.com/pingcap/tidb/br/pkg/utiltest" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" @@ -69,10 +70,10 @@ func TestPreCheckTableTiFlashReplicas(t *testing.T) { } tables[i] = &metautil.Table{ - DB: &model.DBInfo{Name: model.NewCIStr("test")}, + DB: &model.DBInfo{Name: pmodel.NewCIStr("test")}, Info: &model.TableInfo{ ID: int64(i), - Name: model.NewCIStr("test" + strconv.Itoa(i)), + Name: pmodel.NewCIStr("test" + strconv.Itoa(i)), TiFlashReplica: tiflashReplica, }, } @@ -113,7 +114,7 @@ func TestPreCheckTableClusterIndex(t *testing.T) { info, err := m.Domain.GetSnapshotInfoSchema(math.MaxUint64) require.NoError(t, err) - dbSchema, isExist := info.SchemaByName(model.NewCIStr("test")) + dbSchema, isExist := info.SchemaByName(pmodel.NewCIStr("test")) require.True(t, isExist) tables := make([]*metautil.Table, 4) @@ -124,10 +125,10 @@ func TestPreCheckTableClusterIndex(t *testing.T) { DB: dbSchema, Info: &model.TableInfo{ ID: int64(i), - Name: model.NewCIStr("test" + strconv.Itoa(i)), + Name: pmodel.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ ID: 1, - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *intField, State: model.StatePublic, }}, @@ -153,7 +154,7 @@ func TestPreCheckTableClusterIndex(t *testing.T) { Query: "", BinlogInfo: &model.HistoryInfo{ TableInfo: &model.TableInfo{ - Name: model.NewCIStr("test1"), + Name: pmodel.NewCIStr("test1"), IsCommonHandle: true, }, }, @@ -281,9 +282,9 @@ func TestFilterDDLJobs(t *testing.T) { require.NoErrorf(t, err, "Finially flush backupmeta failed", err) infoSchema, err := s.Mock.Domain.GetSnapshotInfoSchema(ts) require.NoErrorf(t, err, "Error get snapshot info schema: %s", err) - dbInfo, ok := infoSchema.SchemaByName(model.NewCIStr("test_db")) + dbInfo, ok := infoSchema.SchemaByName(pmodel.NewCIStr("test_db")) require.Truef(t, ok, "DB info not exist") - tableInfo, err := infoSchema.TableByName(context.Background(), model.NewCIStr("test_db"), model.NewCIStr("test_table")) + tableInfo, err := infoSchema.TableByName(context.Background(), pmodel.NewCIStr("test_db"), pmodel.NewCIStr("test_table")) require.NoErrorf(t, err, "Error get table info: %s", err) tables := []*metautil.Table{{ DB: dbInfo, @@ -346,9 +347,9 @@ func TestFilterDDLJobsV2(t *testing.T) { infoSchema, err := s.Mock.Domain.GetSnapshotInfoSchema(ts) require.NoErrorf(t, err, "Error get snapshot info schema: %s", err) - dbInfo, ok := infoSchema.SchemaByName(model.NewCIStr("test_db")) + dbInfo, ok := infoSchema.SchemaByName(pmodel.NewCIStr("test_db")) require.Truef(t, ok, "DB info not exist") - tableInfo, err := infoSchema.TableByName(context.Background(), model.NewCIStr("test_db"), model.NewCIStr("test_table")) + tableInfo, err := infoSchema.TableByName(context.Background(), pmodel.NewCIStr("test_db"), pmodel.NewCIStr("test_table")) require.NoErrorf(t, err, "Error get table info: %s", err) tables := []*metautil.Table{{ DB: dbInfo, diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 29e3177df7e0c..1704fbc832711 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -54,7 +54,7 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/cdcutil" "github.com/spf13/pflag" "github.com/tikv/client-go/v2/oracle" diff --git a/br/pkg/utils/BUILD.bazel b/br/pkg/utils/BUILD.bazel index e00548ccabca0..d185e669169c4 100644 --- a/br/pkg/utils/BUILD.bazel +++ b/br/pkg/utils/BUILD.bazel @@ -28,6 +28,7 @@ go_library( "//br/pkg/logutil", "//pkg/errno", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", @@ -85,10 +86,11 @@ go_test( deps = [ "//br/pkg/errors", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/types", + "//pkg/planner/core/resolve", "//pkg/testkit/testsetup", "//pkg/types", "//pkg/util/chunk", diff --git a/br/pkg/utils/db_test.go b/br/pkg/utils/db_test.go index a3d67a6a7667e..b5e08d99c7113 100644 --- a/br/pkg/utils/db_test.go +++ b/br/pkg/utils/db_test.go @@ -9,9 +9,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/sqlexec" @@ -20,7 +21,7 @@ import ( type mockRestrictedSQLExecutor struct { rows []chunk.Row - fields []*ast.ResultField + fields []*resolve.ResultField errHappen bool } @@ -28,11 +29,11 @@ func (m *mockRestrictedSQLExecutor) ParseWithParams(ctx context.Context, sql str return nil, nil } -func (m *mockRestrictedSQLExecutor) ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*ast.ResultField, error) { +func (m *mockRestrictedSQLExecutor) ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*resolve.ResultField, error) { return nil, nil, nil } -func (m *mockRestrictedSQLExecutor) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...any) ([]chunk.Row, []*ast.ResultField, error) { +func (m *mockRestrictedSQLExecutor) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...any) ([]chunk.Row, []*resolve.ResultField, error) { if m.errHappen { return nil, nil, errors.New("injected error") } @@ -68,7 +69,7 @@ func TestGc(t *testing.T) { // | tikv | 172.16.6.46:3460 | gc.ratio-threshold | 1.1 | // | tikv | 172.16.6.47:3460 | gc.ratio-threshold | 1.1 | // +------+-------------------+--------------------+-------+ - fields := make([]*ast.ResultField, 4) + fields := make([]*resolve.ResultField, 4) tps := []*types.FieldType{ types.NewFieldType(mysql.TypeString), types.NewFieldType(mysql.TypeString), @@ -76,7 +77,7 @@ func TestGc(t *testing.T) { types.NewFieldType(mysql.TypeString), } for i := 0; i < len(tps); i++ { - rf := new(ast.ResultField) + rf := new(resolve.ResultField) rf.Column = new(model.ColumnInfo) rf.Column.FieldType = *tps[i] fields[i] = rf @@ -114,7 +115,7 @@ func TestRegionSplitInfo(t *testing.T) { // | tikv | 127.0.0.1:20161 | coprocessor.region-split-keys | 100000 | // +------+-------------------+-------------------------------+--------+ - fields := make([]*ast.ResultField, 4) + fields := make([]*resolve.ResultField, 4) tps := []*types.FieldType{ types.NewFieldType(mysql.TypeString), types.NewFieldType(mysql.TypeString), @@ -122,7 +123,7 @@ func TestRegionSplitInfo(t *testing.T) { types.NewFieldType(mysql.TypeString), } for i := 0; i < len(tps); i++ { - rf := new(ast.ResultField) + rf := new(resolve.ResultField) rf.Column = new(model.ColumnInfo) rf.Column.FieldType = *tps[i] fields[i] = rf diff --git a/br/pkg/utils/schema.go b/br/pkg/utils/schema.go index 3857abcc25bdb..47ea86dcc9370 100644 --- a/br/pkg/utils/schema.go +++ b/br/pkg/utils/schema.go @@ -6,7 +6,8 @@ import ( "fmt" "strings" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" ) @@ -31,7 +32,7 @@ func EncloseDBAndTable(database, table string) string { } // IsTemplateSysDB checks wheterh the dbname is temporary system database(__TiDB_BR_Temporary_mysql or __TiDB_BR_Temporary_sys). -func IsTemplateSysDB(dbname model.CIStr) bool { +func IsTemplateSysDB(dbname pmodel.CIStr) bool { return dbname.O == temporaryDBNamePrefix+mysql.SystemDB || dbname.O == temporaryDBNamePrefix+mysql.SysDB } @@ -42,12 +43,12 @@ func IsSysDB(dbLowerName string) bool { } // TemporaryDBName makes a 'private' database name. -func TemporaryDBName(db string) model.CIStr { - return model.NewCIStr(temporaryDBNamePrefix + db) +func TemporaryDBName(db string) pmodel.CIStr { + return pmodel.NewCIStr(temporaryDBNamePrefix + db) } // GetSysDBName get the original name of system DB -func GetSysDBName(tempDB model.CIStr) (string, bool) { +func GetSysDBName(tempDB pmodel.CIStr) (string, bool) { if ok := strings.HasPrefix(tempDB.O, temporaryDBNamePrefix); !ok { return tempDB.O, false } @@ -55,7 +56,7 @@ func GetSysDBName(tempDB model.CIStr) (string, bool) { } // GetSysDBCIStrName get the CIStr name of system DB -func GetSysDBCIStrName(tempDB model.CIStr) (model.CIStr, bool) { +func GetSysDBCIStrName(tempDB pmodel.CIStr) (pmodel.CIStr, bool) { if ok := strings.HasPrefix(tempDB.O, temporaryDBNamePrefix); !ok { return tempDB, false } diff --git a/br/pkg/version/BUILD.bazel b/br/pkg/version/BUILD.bazel index b9b0f7632ddba..81b31e133ddb2 100644 --- a/br/pkg/version/BUILD.bazel +++ b/br/pkg/version/BUILD.bazel @@ -9,7 +9,7 @@ go_library( "//br/pkg/errors", "//br/pkg/logutil", "//br/pkg/version/build", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/util/dbutil", "//pkg/util/engine", "@com_github_coreos_go_semver//semver", @@ -30,7 +30,7 @@ go_test( shard_count = 10, deps = [ "//br/pkg/version/build", - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_coreos_go_semver//semver", "@com_github_data_dog_go_sqlmock//:go-sqlmock", "@com_github_pingcap_kvproto//pkg/metapb", diff --git a/br/pkg/version/version.go b/br/pkg/version/version.go index ec6be983891cf..4b0f3c34cdbad 100644 --- a/br/pkg/version/version.go +++ b/br/pkg/version/version.go @@ -17,7 +17,7 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/dbutil" "github.com/pingcap/tidb/pkg/util/engine" pd "github.com/tikv/pd/client" diff --git a/br/pkg/version/version_test.go b/br/pkg/version/version_test.go index af611e2fff922..4e7a2966e7dac 100644 --- a/br/pkg/version/version_test.go +++ b/br/pkg/version/version_test.go @@ -13,7 +13,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/stretchr/testify/require" pd "github.com/tikv/pd/client" ) diff --git a/br/tests/br_key_locked/BUILD.bazel b/br/tests/br_key_locked/BUILD.bazel index c160486e62af7..e1e62ab1a4210 100644 --- a/br/tests/br_key_locked/BUILD.bazel +++ b/br/tests/br_key_locked/BUILD.bazel @@ -13,7 +13,7 @@ go_library( "//br/pkg/task", "//pkg/config", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/store/driver", "//pkg/tablecodec", "//pkg/util/codec", diff --git a/br/tests/br_key_locked/locker.go b/br/tests/br_key_locked/locker.go index f8e22c4f3d786..edba247f8ff7c 100644 --- a/br/tests/br_key_locked/locker.go +++ b/br/tests/br_key_locked/locker.go @@ -38,7 +38,7 @@ import ( "github.com/pingcap/tidb/br/pkg/task" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/driver" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/tikv/client-go/v2/oracle" diff --git a/cmd/importer/BUILD.bazel b/cmd/importer/BUILD.bazel index 46a6a04bbb301..bf0fc5857c225 100644 --- a/cmd/importer/BUILD.bazel +++ b/cmd/importer/BUILD.bazel @@ -16,9 +16,9 @@ go_library( visibility = ["//visibility:private"], deps = [ "//pkg/ddl", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/planner/core", "//pkg/statistics", diff --git a/cmd/importer/parser.go b/cmd/importer/parser.go index fc82045380c97..158bfda152215 100644 --- a/cmd/importer/parser.go +++ b/cmd/importer/parser.go @@ -22,9 +22,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" _ "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/mock" diff --git a/cmd/importer/stats.go b/cmd/importer/stats.go index 6c71d8c48328a..b338aa7641385 100644 --- a/cmd/importer/stats.go +++ b/cmd/importer/stats.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" stats "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/storage" "github.com/pingcap/tidb/pkg/statistics/handle/util" diff --git a/dumpling/export/BUILD.bazel b/dumpling/export/BUILD.bazel index 3bbbff08eeba5..211de117c7660 100644 --- a/dumpling/export/BUILD.bazel +++ b/dumpling/export/BUILD.bazel @@ -36,6 +36,7 @@ go_library( "//pkg/config", "//pkg/errno", "//pkg/infoschema/context", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/format", diff --git a/dumpling/export/sql.go b/dumpling/export/sql.go index 690ef65fe054f..db924d5309528 100644 --- a/dumpling/export/sql.go +++ b/dumpling/export/sql.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/tidb/dumpling/log" dbconfig "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" pd "github.com/tikv/pd/client/http" "go.uber.org/multierr" "go.uber.org/zap" @@ -1548,7 +1549,7 @@ func GetDBInfo(db *sql.Conn, tables map[string]map[string]struct{}) ([]*model.DB } last := len(schemas) - 1 if last < 0 || schemas[last].Name.O != tableSchema { - dbInfo := &model.DBInfo{Name: model.CIStr{O: tableSchema}} + dbInfo := &model.DBInfo{Name: pmodel.CIStr{O: tableSchema}} dbInfo.Deprecated.Tables = make([]*model.TableInfo, 0, len(tables[tableSchema])) schemas = append(schemas, dbInfo) last++ @@ -1560,14 +1561,14 @@ func GetDBInfo(db *sql.Conn, tables map[string]map[string]struct{}) ([]*model.DB for partitionName, partitionID := range ptm { partition.Definitions = append(partition.Definitions, model.PartitionDefinition{ ID: partitionID, - Name: model.CIStr{O: partitionName}, + Name: pmodel.CIStr{O: partitionName}, }) } } } schemas[last].Deprecated.Tables = append(schemas[last].Deprecated.Tables, &model.TableInfo{ ID: tidbTableID, - Name: model.CIStr{O: tableName}, + Name: pmodel.CIStr{O: tableName}, Partition: partition, }) return nil diff --git a/lightning/pkg/importer/BUILD.bazel b/lightning/pkg/importer/BUILD.bazel index af898abb2ca62..0dfb42e114c84 100644 --- a/lightning/pkg/importer/BUILD.bazel +++ b/lightning/pkg/importer/BUILD.bazel @@ -52,9 +52,9 @@ go_library( "//pkg/lightning/verification", "//pkg/lightning/worker", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/planner/core", "//pkg/session", @@ -150,6 +150,7 @@ go_test( "//pkg/lightning/worker", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/lightning/pkg/importer/check_info_test.go b/lightning/pkg/importer/check_info_test.go index 17c129f9e6aa6..ed02f15f23149 100644 --- a/lightning/pkg/importer/check_info_test.go +++ b/lightning/pkg/importer/check_info_test.go @@ -33,9 +33,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/mydump" "github.com/pingcap/tidb/pkg/lightning/worker" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" tmock "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" diff --git a/lightning/pkg/importer/chunk_process.go b/lightning/pkg/importer/chunk_process.go index 802e7c15d4b6c..b196e59ca92c2 100644 --- a/lightning/pkg/importer/chunk_process.go +++ b/lightning/pkg/importer/chunk_process.go @@ -36,7 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/mydump" verify "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/lightning/worker" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/driver/txn" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/lightning/pkg/importer/chunk_process_test.go b/lightning/pkg/importer/chunk_process_test.go index 81ab219b5da26..c539b353624dd 100644 --- a/lightning/pkg/importer/chunk_process_test.go +++ b/lightning/pkg/importer/chunk_process_test.go @@ -41,9 +41,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/mydump" "github.com/pingcap/tidb/pkg/lightning/worker" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" tmock "github.com/pingcap/tidb/pkg/util/mock" diff --git a/lightning/pkg/importer/dup_detect.go b/lightning/pkg/importer/dup_detect.go index e955e22ed3477..287863970e001 100644 --- a/lightning/pkg/importer/dup_detect.go +++ b/lightning/pkg/importer/dup_detect.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/duplicate" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" diff --git a/lightning/pkg/importer/get_pre_info.go b/lightning/pkg/importer/get_pre_info.go index 9d5dbf6979d73..09d45de143630 100644 --- a/lightning/pkg/importer/get_pre_info.go +++ b/lightning/pkg/importer/get_pre_info.go @@ -43,9 +43,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/mydump" "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/lightning/worker" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" _ "github.com/pingcap/tidb/pkg/planner/core" // to setup expression.EvalAstExpr. Otherwise we cannot parse the default value "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" diff --git a/lightning/pkg/importer/import.go b/lightning/pkg/importer/import.go index dfb441afdc8dc..ead280501d3ce 100644 --- a/lightning/pkg/importer/import.go +++ b/lightning/pkg/importer/import.go @@ -56,7 +56,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/tikv" "github.com/pingcap/tidb/pkg/lightning/worker" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/driver" diff --git a/lightning/pkg/importer/import_test.go b/lightning/pkg/importer/import_test.go index 0150aa05a9e74..1dbc692a4c13a 100644 --- a/lightning/pkg/importer/import_test.go +++ b/lightning/pkg/importer/import_test.go @@ -31,9 +31,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/errormanager" "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" tmock "github.com/pingcap/tidb/pkg/util/mock" router "github.com/pingcap/tidb/pkg/util/table-router" diff --git a/lightning/pkg/importer/meta_manager_test.go b/lightning/pkg/importer/meta_manager_test.go index 8146d64c8c83a..e0c335e72cb82 100644 --- a/lightning/pkg/importer/meta_manager_test.go +++ b/lightning/pkg/importer/meta_manager_test.go @@ -32,9 +32,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" tmock "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" diff --git a/lightning/pkg/importer/mock/BUILD.bazel b/lightning/pkg/importer/mock/BUILD.bazel index 5840e63976110..3724f015eb38b 100644 --- a/lightning/pkg/importer/mock/BUILD.bazel +++ b/lightning/pkg/importer/mock/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//lightning/pkg/importer/opts", "//pkg/errno", "//pkg/lightning/mydump", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/util/dbterror", "//pkg/util/filter", @@ -27,6 +28,7 @@ go_test( flaky = True, deps = [ "//lightning/pkg/importer", + "//pkg/meta/model", "//pkg/parser/model", "@com_github_stretchr_testify//require", ], diff --git a/lightning/pkg/importer/mock/mock.go b/lightning/pkg/importer/mock/mock.go index b032d2ac9c5ac..a5979998954cd 100644 --- a/lightning/pkg/importer/mock/mock.go +++ b/lightning/pkg/importer/mock/mock.go @@ -24,7 +24,8 @@ import ( ropts "github.com/pingcap/tidb/lightning/pkg/importer/opts" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/lightning/mydump" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/filter" pdhttp "github.com/tikv/pd/client/http" @@ -217,7 +218,7 @@ func (t *TargetInfo) SetTableInfo(schemaName string, tableName string, tblInfo * func (t *TargetInfo) FetchRemoteDBModels(_ context.Context) ([]*model.DBInfo, error) { resultInfos := []*model.DBInfo{} for dbName := range t.dbTblInfoMap { - resultInfos = append(resultInfos, &model.DBInfo{Name: model.NewCIStr(dbName)}) + resultInfos = append(resultInfos, &model.DBInfo{Name: pmodel.NewCIStr(dbName)}) } return resultInfos, nil } diff --git a/lightning/pkg/importer/mock/mock_test.go b/lightning/pkg/importer/mock/mock_test.go index 061052dc7d13a..84cf2a88a4e76 100644 --- a/lightning/pkg/importer/mock/mock_test.go +++ b/lightning/pkg/importer/mock/mock_test.go @@ -20,7 +20,8 @@ import ( "testing" "github.com/pingcap/tidb/lightning/pkg/importer" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -163,16 +164,16 @@ func TestMockTargetInfoBasic(t *testing.T) { &TableInfo{ TableModel: &model.TableInfo{ ID: 1, - Name: model.NewCIStr("testtbl1"), + Name: pmodel.NewCIStr("testtbl1"), Columns: []*model.ColumnInfo{ { ID: 1, - Name: model.NewCIStr("c_1"), + Name: pmodel.NewCIStr("c_1"), Offset: 0, }, { ID: 2, - Name: model.NewCIStr("c_2"), + Name: pmodel.NewCIStr("c_2"), Offset: 1, }, }, diff --git a/lightning/pkg/importer/precheck_impl.go b/lightning/pkg/importer/precheck_impl.go index d36afc0254978..b0fc797eff2e7 100644 --- a/lightning/pkg/importer/precheck_impl.go +++ b/lightning/pkg/importer/precheck_impl.go @@ -39,7 +39,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/mydump" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" diff --git a/lightning/pkg/importer/table_import.go b/lightning/pkg/importer/table_import.go index ccc0fcc088b3b..658d17430ebc6 100644 --- a/lightning/pkg/importer/table_import.go +++ b/lightning/pkg/importer/table_import.go @@ -47,7 +47,7 @@ import ( verify "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/lightning/worker" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/lightning/pkg/importer/table_import_test.go b/lightning/pkg/importer/table_import_test.go index ba28b155a8586..aeb024863952f 100644 --- a/lightning/pkg/importer/table_import_test.go +++ b/lightning/pkg/importer/table_import_test.go @@ -52,9 +52,10 @@ import ( "github.com/pingcap/tidb/pkg/lightning/mydump" "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/lightning/worker" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" @@ -1559,12 +1560,12 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Columns: []*model.ColumnInfo{ { // colA has the default value - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), DefaultIsExpr: true, }, { // colB doesn't have the default value - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(), }, }, @@ -1608,7 +1609,7 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Columns: []*model.ColumnInfo{ { // colB has the default value - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), DefaultIsExpr: true, }, }, @@ -1659,7 +1660,7 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Columns: []*model.ColumnInfo{ { // colB has the default value - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), DefaultIsExpr: true, }, }, @@ -1711,12 +1712,12 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Columns: []*model.ColumnInfo{ { // colB has the default value - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), DefaultIsExpr: true, }, { // colC doesn't have the default value - Name: model.NewCIStr("colC"), + Name: pmodel.NewCIStr("colC"), FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(), }, }, @@ -1767,12 +1768,12 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Columns: []*model.ColumnInfo{ { // colB doesn't have the default value - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(), }, { // colC has the default value - Name: model.NewCIStr("colC"), + Name: pmodel.NewCIStr("colC"), DefaultIsExpr: true, }, }, @@ -1857,7 +1858,7 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Columns: []*model.ColumnInfo{ { // colB has the default value - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), DefaultIsExpr: true, }, }, @@ -1916,10 +1917,10 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Core: &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), }, { - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), }, }, }, @@ -1975,10 +1976,10 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Core: &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), }, { - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), }, }, }, @@ -2023,10 +2024,10 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Core: &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), }, { - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), }, }, }, @@ -2071,10 +2072,10 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Core: &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), }, { - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), }, }, }, @@ -2136,14 +2137,14 @@ func (s *tableRestoreSuite) TestSchemaIsValid() { Core: &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), }, { - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), DefaultIsExpr: true, }, { - Name: model.NewCIStr("colC"), + Name: pmodel.NewCIStr("colC"), }, }, }, @@ -2259,11 +2260,11 @@ func (s *tableRestoreSuite) TestGBKEncodedSchemaIsValid() { Core: &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("colA"), + Name: pmodel.NewCIStr("colA"), FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(), }, { - Name: model.NewCIStr("colB"), + Name: pmodel.NewCIStr("colB"), FieldType: types.NewFieldTypeBuilder().SetType(0).SetFlag(1).Build(), }, }, diff --git a/lightning/pkg/importer/tidb.go b/lightning/pkg/importer/tidb.go index a732bee093b5f..c14747f37cd4c 100644 --- a/lightning/pkg/importer/tidb.go +++ b/lightning/pkg/importer/tidb.go @@ -30,8 +30,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/metric" "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/tikv/client-go/v2/util" diff --git a/lightning/pkg/importer/tidb_test.go b/lightning/pkg/importer/tidb_test.go index e10f3bede6c3b..2a2c60b960ad3 100644 --- a/lightning/pkg/importer/tidb_test.go +++ b/lightning/pkg/importer/tidb_test.go @@ -28,8 +28,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/checkpoints" "github.com/pingcap/tidb/pkg/lightning/metric" "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" tmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/util/mock" "github.com/pingcap/tidb/pkg/util/promutil" diff --git a/pkg/autoid_service/BUILD.bazel b/pkg/autoid_service/BUILD.bazel index 79e3f41701450..54054f5362fb0 100644 --- a/pkg/autoid_service/BUILD.bazel +++ b/pkg/autoid_service/BUILD.bazel @@ -11,9 +11,9 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/owner", - "//pkg/parser/model", "//pkg/util/etcd", "//pkg/util/logutil", "//pkg/util/mathutil", diff --git a/pkg/autoid_service/autoid.go b/pkg/autoid_service/autoid.go index dbbe7b8ee3353..64f1e216f2952 100644 --- a/pkg/autoid_service/autoid.go +++ b/pkg/autoid_service/autoid.go @@ -29,9 +29,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" autoid1 "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util/etcd" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/mathutil" diff --git a/pkg/bindinfo/BUILD.bazel b/pkg/bindinfo/BUILD.bazel index 5732b1a6f82d4..55b8c67a178c9 100644 --- a/pkg/bindinfo/BUILD.bazel +++ b/pkg/bindinfo/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//pkg/parser/format", "//pkg/parser/mysql", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/sessionstates", "//pkg/sessionctx/stmtctx", @@ -71,6 +72,7 @@ go_test( "//pkg/bindinfo/norm", "//pkg/config", "//pkg/domain", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", diff --git a/pkg/bindinfo/capture_test.go b/pkg/bindinfo/capture_test.go index dfda50c3d12ed..97cc2e51ca7ad 100644 --- a/pkg/bindinfo/capture_test.go +++ b/pkg/bindinfo/capture_test.go @@ -26,9 +26,10 @@ import ( "github.com/pingcap/tidb/pkg/bindinfo/norm" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" utilparser "github.com/pingcap/tidb/pkg/util/parser" "github.com/pingcap/tidb/pkg/util/stmtsummary" @@ -289,7 +290,7 @@ func TestCapturePlanBaselineIgnoreTiFlash(t *testing.T) { // Create virtual tiflash replica info. domSession := domain.GetDomain(tk.Session()) is := domSession.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/bindinfo/tests/BUILD.bazel b/pkg/bindinfo/tests/BUILD.bazel index 0629e045c6169..c131284da79e7 100644 --- a/pkg/bindinfo/tests/BUILD.bazel +++ b/pkg/bindinfo/tests/BUILD.bazel @@ -15,6 +15,7 @@ go_test( "//pkg/bindinfo/internal", "//pkg/bindinfo/norm", "//pkg/domain", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/parser/terror", diff --git a/pkg/bindinfo/tests/bind_test.go b/pkg/bindinfo/tests/bind_test.go index 9dd948861bb3c..23abf63392fef 100644 --- a/pkg/bindinfo/tests/bind_test.go +++ b/pkg/bindinfo/tests/bind_test.go @@ -26,8 +26,9 @@ import ( "github.com/pingcap/tidb/pkg/bindinfo/internal" "github.com/pingcap/tidb/pkg/bindinfo/norm" "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util" @@ -570,7 +571,7 @@ func TestBindingWithIsolationRead(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/bindinfo/util.go b/pkg/bindinfo/util.go index b48d73911126d..050c606bc5af2 100644 --- a/pkg/bindinfo/util.go +++ b/pkg/bindinfo/util.go @@ -18,7 +18,7 @@ import ( "context" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/sqlexec" @@ -31,7 +31,7 @@ func exec(sctx sessionctx.Context, sql string, args ...any) (sqlexec.RecordSet, } // execRows is a helper function to execute sql and return rows and fields. -func execRows(sctx sessionctx.Context, sql string, args ...any) (rows []chunk.Row, fields []*ast.ResultField, err error) { +func execRows(sctx sessionctx.Context, sql string, args ...any) (rows []chunk.Row, fields []*resolve.ResultField, err error) { sqlExec := sctx.GetRestrictedSQLExecutor() return sqlExec.ExecRestrictedSQL(kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, args...) diff --git a/pkg/ddl/BUILD.bazel b/pkg/ddl/BUILD.bazel index b49e72df37bbf..3ac253225d7c3 100644 --- a/pkg/ddl/BUILD.bazel +++ b/pkg/ddl/BUILD.bazel @@ -106,6 +106,7 @@ go_library( "//pkg/lightning/config", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/owner", "//pkg/parser", @@ -296,6 +297,7 @@ go_test( "//pkg/lightning/backend/external", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", diff --git a/pkg/ddl/add_column.go b/pkg/ddl/add_column.go index 7b55caf5160ca..396c2cc04c2f9 100644 --- a/pkg/ddl/add_column.go +++ b/pkg/ddl/add_column.go @@ -31,10 +31,11 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" field_types "github.com/pingcap/tidb/pkg/parser/types" @@ -542,7 +543,7 @@ func columnDefToCol(ctx sessionctx.Context, offset int, colDef *ast.ColumnDef, o } col.GeneratedExprString = sb.String() col.GeneratedStored = v.Stored - _, dependColNames, err := findDependedColumnNames(model.NewCIStr(""), model.NewCIStr(""), colDef) + _, dependColNames, err := findDependedColumnNames(pmodel.NewCIStr(""), pmodel.NewCIStr(""), colDef) if err != nil { return nil, nil, errors.Trace(err) } diff --git a/pkg/ddl/backfilling.go b/pkg/ddl/backfilling.go index 49fa029d7e59a..978b398045c0e 100644 --- a/pkg/ddl/backfilling.go +++ b/pkg/ddl/backfilling.go @@ -34,8 +34,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" exprctx "github.com/pingcap/tidb/pkg/expression/context" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -631,7 +632,7 @@ func loadDDLReorgVars(ctx context.Context, sessPool *sess.Pool) error { return ddlutil.LoadDDLReorgVars(ctx, sCtx) } -func makeupDecodeColMap(dbName model.CIStr, t table.Table) (map[int64]decoder.Column, error) { +func makeupDecodeColMap(dbName pmodel.CIStr, t table.Table) (map[int64]decoder.Column, error) { writableColInfos := make([]*model.ColumnInfo, 0, len(t.WritableCols())) for _, col := range t.WritableCols() { writableColInfos = append(writableColInfos, col.ColumnInfo) diff --git a/pkg/ddl/backfilling_dist_executor.go b/pkg/ddl/backfilling_dist_executor.go index 7afb0f770233d..bc37b650987c0 100644 --- a/pkg/ddl/backfilling_dist_executor.go +++ b/pkg/ddl/backfilling_dist_executor.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/disttask/framework/taskexecutor/execute" "github.com/pingcap/tidb/pkg/lightning/backend/external" "github.com/pingcap/tidb/pkg/lightning/common" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/ddl/backfilling_dist_scheduler.go b/pkg/ddl/backfilling_dist_scheduler.go index 1f8a8ce205b67..77fda5858d2cd 100644 --- a/pkg/ddl/backfilling_dist_scheduler.go +++ b/pkg/ddl/backfilling_dist_scheduler.go @@ -37,7 +37,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/backend/local" "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/helper" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util/backoff" diff --git a/pkg/ddl/backfilling_dist_scheduler_test.go b/pkg/ddl/backfilling_dist_scheduler_test.go index 6e4f1f32c9264..e5d5c01b776e8 100644 --- a/pkg/ddl/backfilling_dist_scheduler_test.go +++ b/pkg/ddl/backfilling_dist_scheduler_test.go @@ -29,7 +29,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/lightning/backend/external" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -52,7 +53,7 @@ func TestBackfillingSchedulerLocalMode(t *testing.T) { "PARTITION p2 VALUES LESS THAN (1000),\n" + "PARTITION p3 VALUES LESS THAN MAXVALUE\n);") task := createAddIndexTask(t, dom, "test", "tp1", proto.Backfill, false) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp1")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp1")) require.NoError(t, err) tblInfo := tbl.Meta() @@ -283,9 +284,9 @@ func createAddIndexTask(t *testing.T, tblName string, taskType proto.TaskType, useGlobalSort bool) *proto.Task { - db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr(dbName)) + db, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr(dbName)) require.True(t, ok) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tblName)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tblName)) require.NoError(t, err) tblInfo := tbl.Meta() defaultSQLMode, err := mysql.GetSQLMode(mysql.DefaultSQLMode) diff --git a/pkg/ddl/backfilling_import_cloud.go b/pkg/ddl/backfilling_import_cloud.go index db7e04c31d8c1..a3bf8579f90a8 100644 --- a/pkg/ddl/backfilling_import_cloud.go +++ b/pkg/ddl/backfilling_import_cloud.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/backend/external" "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/lightning/config" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util/logutil" diff --git a/pkg/ddl/backfilling_operators.go b/pkg/ddl/backfilling_operators.go index 616432d80ef62..fa9c833178130 100644 --- a/pkg/ddl/backfilling_operators.go +++ b/pkg/ddl/backfilling_operators.go @@ -35,8 +35,8 @@ import ( "github.com/pingcap/tidb/pkg/disttask/operator" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lightning/backend/external" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/resourcemanager/pool/workerpool" "github.com/pingcap/tidb/pkg/resourcemanager/util" diff --git a/pkg/ddl/backfilling_read_index.go b/pkg/ddl/backfilling_read_index.go index a902a81a8a040..ed9166a7627f4 100644 --- a/pkg/ddl/backfilling_read_index.go +++ b/pkg/ddl/backfilling_read_index.go @@ -30,8 +30,8 @@ import ( "github.com/pingcap/tidb/pkg/disttask/operator" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lightning/backend/external" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table" tidblogutil "github.com/pingcap/tidb/pkg/util/logutil" "github.com/prometheus/client_golang/prometheus" diff --git a/pkg/ddl/backfilling_scheduler.go b/pkg/ddl/backfilling_scheduler.go index d37f6ccb7c15a..dee04e0a8eaf6 100644 --- a/pkg/ddl/backfilling_scheduler.go +++ b/pkg/ddl/backfilling_scheduler.go @@ -27,7 +27,7 @@ import ( distsqlctx "github.com/pingcap/tidb/pkg/distsql/context" "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/ddl/backfilling_test.go b/pkg/ddl/backfilling_test.go index a44be213dd7bc..0557315d20787 100644 --- a/pkg/ddl/backfilling_test.go +++ b/pkg/ddl/backfilling_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl/ingest" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" contextutil "github.com/pingcap/tidb/pkg/util/context" "github.com/pingcap/tidb/pkg/util/mock" diff --git a/pkg/ddl/bdr.go b/pkg/ddl/bdr.go index 86c71473097d8..7bbf26d62ddcb 100644 --- a/pkg/ddl/bdr.go +++ b/pkg/ddl/bdr.go @@ -15,6 +15,7 @@ package ddl import ( + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/types" ) @@ -82,3 +83,37 @@ func deniedByBDRWhenModifyColumn(newFieldType, oldFieldType types.FieldType, opt return true } + +// DeniedByBDR checks whether the DDL is denied by BDR. +func DeniedByBDR(role ast.BDRRole, action model.ActionType, job *model.Job) (denied bool) { + ddlType, ok := model.ActionBDRMap[action] + switch role { + case ast.BDRRolePrimary: + if !ok { + return true + } + + // Can't add unique index on primary role. + if job != nil && (action == model.ActionAddIndex || action == model.ActionAddPrimaryKey) && + len(job.Args) >= 1 && job.Args[0].(bool) { + // job.Args[0] is unique when job.Type is ActionAddIndex or ActionAddPrimaryKey. + return true + } + + if ddlType == model.SafeDDL || ddlType == model.UnmanagementDDL { + return false + } + case ast.BDRRoleSecondary: + if !ok { + return true + } + if ddlType == model.UnmanagementDDL { + return false + } + default: + // if user do not set bdr role, we will not deny any ddl as `none` + return false + } + + return true +} diff --git a/pkg/ddl/bdr_test.go b/pkg/ddl/bdr_test.go index baecc65a88e0e..1ec1a69be35cf 100644 --- a/pkg/ddl/bdr_test.go +++ b/pkg/ddl/bdr_test.go @@ -15,11 +15,14 @@ package ddl import ( + "fmt" "testing" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -124,3 +127,390 @@ func TestDeniedByBDRWhenModifyColumn(t *testing.T) { }) } } + +func TestDeniedByBDR(t *testing.T) { + testCases := []struct { + role ast.BDRRole + action model.ActionType + expected bool + }{ + // Roles for ActionCreateSchema + {ast.BDRRolePrimary, model.ActionCreateSchema, false}, + {ast.BDRRoleSecondary, model.ActionCreateSchema, true}, + {ast.BDRRoleNone, model.ActionCreateSchema, false}, + + // Roles for ActionDropSchema + {ast.BDRRolePrimary, model.ActionDropSchema, true}, + {ast.BDRRoleSecondary, model.ActionDropSchema, true}, + {ast.BDRRoleNone, model.ActionDropSchema, false}, + + // Roles for ActionCreateTable + {ast.BDRRolePrimary, model.ActionCreateTable, false}, + {ast.BDRRoleSecondary, model.ActionCreateTable, true}, + {ast.BDRRoleNone, model.ActionCreateTable, false}, + + // Roles for ActionDropTable + {ast.BDRRolePrimary, model.ActionDropTable, true}, + {ast.BDRRoleSecondary, model.ActionDropTable, true}, + {ast.BDRRoleNone, model.ActionDropTable, false}, + + // Roles for ActionAddColumn + {ast.BDRRolePrimary, model.ActionAddColumn, false}, + {ast.BDRRoleSecondary, model.ActionAddColumn, true}, + {ast.BDRRoleNone, model.ActionAddColumn, false}, + + // Roles for ActionDropColumn + {ast.BDRRolePrimary, model.ActionDropColumn, true}, + {ast.BDRRoleSecondary, model.ActionDropColumn, true}, + {ast.BDRRoleNone, model.ActionDropColumn, false}, + + // Roles for ActionAddIndex + {ast.BDRRolePrimary, model.ActionAddIndex, false}, + {ast.BDRRoleSecondary, model.ActionAddIndex, true}, + {ast.BDRRoleNone, model.ActionAddIndex, false}, + + // Roles for ActionDropIndex + {ast.BDRRolePrimary, model.ActionDropIndex, false}, + {ast.BDRRoleSecondary, model.ActionDropIndex, true}, + {ast.BDRRoleNone, model.ActionDropIndex, false}, + + // Roles for ActionAddForeignKey + {ast.BDRRolePrimary, model.ActionAddForeignKey, true}, + {ast.BDRRoleSecondary, model.ActionAddForeignKey, true}, + {ast.BDRRoleNone, model.ActionAddForeignKey, false}, + + // Roles for ActionDropForeignKey + {ast.BDRRolePrimary, model.ActionDropForeignKey, true}, + {ast.BDRRoleSecondary, model.ActionDropForeignKey, true}, + {ast.BDRRoleNone, model.ActionDropForeignKey, false}, + + // Roles for ActionTruncateTable + {ast.BDRRolePrimary, model.ActionTruncateTable, true}, + {ast.BDRRoleSecondary, model.ActionTruncateTable, true}, + {ast.BDRRoleNone, model.ActionTruncateTable, false}, + + // Roles for ActionModifyColumn + {ast.BDRRolePrimary, model.ActionModifyColumn, false}, + {ast.BDRRoleSecondary, model.ActionModifyColumn, true}, + {ast.BDRRoleNone, model.ActionModifyColumn, false}, + + // Roles for ActionRebaseAutoID + {ast.BDRRolePrimary, model.ActionRebaseAutoID, true}, + {ast.BDRRoleSecondary, model.ActionRebaseAutoID, true}, + {ast.BDRRoleNone, model.ActionRebaseAutoID, false}, + + // Roles for ActionRenameTable + {ast.BDRRolePrimary, model.ActionRenameTable, true}, + {ast.BDRRoleSecondary, model.ActionRenameTable, true}, + {ast.BDRRoleNone, model.ActionRenameTable, false}, + + // Roles for ActionSetDefaultValue + {ast.BDRRolePrimary, model.ActionSetDefaultValue, false}, + {ast.BDRRoleSecondary, model.ActionSetDefaultValue, true}, + {ast.BDRRoleNone, model.ActionSetDefaultValue, false}, + + // Roles for ActionShardRowID + {ast.BDRRolePrimary, model.ActionShardRowID, true}, + {ast.BDRRoleSecondary, model.ActionShardRowID, true}, + {ast.BDRRoleNone, model.ActionShardRowID, false}, + + // Roles for ActionModifyTableComment + {ast.BDRRolePrimary, model.ActionModifyTableComment, false}, + {ast.BDRRoleSecondary, model.ActionModifyTableComment, true}, + {ast.BDRRoleNone, model.ActionModifyTableComment, false}, + + // Roles for ActionRenameIndex + {ast.BDRRolePrimary, model.ActionRenameIndex, false}, + {ast.BDRRoleSecondary, model.ActionRenameIndex, true}, + {ast.BDRRoleNone, model.ActionRenameIndex, false}, + + // Roles for ActionAddTablePartition + {ast.BDRRolePrimary, model.ActionAddTablePartition, false}, + {ast.BDRRoleSecondary, model.ActionAddTablePartition, true}, + {ast.BDRRoleNone, model.ActionAddTablePartition, false}, + + // Roles for ActionDropTablePartition + {ast.BDRRolePrimary, model.ActionDropTablePartition, true}, + {ast.BDRRoleSecondary, model.ActionDropTablePartition, true}, + {ast.BDRRoleNone, model.ActionDropTablePartition, false}, + + // Roles for ActionCreateView + {ast.BDRRolePrimary, model.ActionCreateView, false}, + {ast.BDRRoleSecondary, model.ActionCreateView, true}, + {ast.BDRRoleNone, model.ActionCreateView, false}, + + // Roles for ActionModifyTableCharsetAndCollate + {ast.BDRRolePrimary, model.ActionModifyTableCharsetAndCollate, true}, + {ast.BDRRoleSecondary, model.ActionModifyTableCharsetAndCollate, true}, + {ast.BDRRoleNone, model.ActionModifyTableCharsetAndCollate, false}, + + // Roles for ActionTruncateTablePartition + {ast.BDRRolePrimary, model.ActionTruncateTablePartition, true}, + {ast.BDRRoleSecondary, model.ActionTruncateTablePartition, true}, + {ast.BDRRoleNone, model.ActionTruncateTablePartition, false}, + + // Roles for ActionDropView + {ast.BDRRolePrimary, model.ActionDropView, false}, + {ast.BDRRoleSecondary, model.ActionDropView, true}, + {ast.BDRRoleNone, model.ActionDropView, false}, + + // Roles for ActionRecoverTable + {ast.BDRRolePrimary, model.ActionRecoverTable, true}, + {ast.BDRRoleSecondary, model.ActionRecoverTable, true}, + {ast.BDRRoleNone, model.ActionRecoverTable, false}, + + // Roles for ActionModifySchemaCharsetAndCollate + {ast.BDRRolePrimary, model.ActionModifySchemaCharsetAndCollate, true}, + {ast.BDRRoleSecondary, model.ActionModifySchemaCharsetAndCollate, true}, + {ast.BDRRoleNone, model.ActionModifySchemaCharsetAndCollate, false}, + + // Roles for ActionLockTable + {ast.BDRRolePrimary, model.ActionLockTable, true}, + {ast.BDRRoleSecondary, model.ActionLockTable, true}, + {ast.BDRRoleNone, model.ActionLockTable, false}, + + // Roles for ActionUnlockTable + {ast.BDRRolePrimary, model.ActionUnlockTable, true}, + {ast.BDRRoleSecondary, model.ActionUnlockTable, true}, + {ast.BDRRoleNone, model.ActionUnlockTable, false}, + + // Roles for ActionRepairTable + {ast.BDRRolePrimary, model.ActionRepairTable, true}, + {ast.BDRRoleSecondary, model.ActionRepairTable, true}, + {ast.BDRRoleNone, model.ActionRepairTable, false}, + + // Roles for ActionSetTiFlashReplica + {ast.BDRRolePrimary, model.ActionSetTiFlashReplica, true}, + {ast.BDRRoleSecondary, model.ActionSetTiFlashReplica, true}, + {ast.BDRRoleNone, model.ActionSetTiFlashReplica, false}, + + // Roles for ActionUpdateTiFlashReplicaStatus + {ast.BDRRolePrimary, model.ActionUpdateTiFlashReplicaStatus, true}, + {ast.BDRRoleSecondary, model.ActionUpdateTiFlashReplicaStatus, true}, + {ast.BDRRoleNone, model.ActionUpdateTiFlashReplicaStatus, false}, + + // Roles for ActionAddPrimaryKey + {ast.BDRRolePrimary, model.ActionAddPrimaryKey, true}, + {ast.BDRRoleSecondary, model.ActionAddPrimaryKey, true}, + {ast.BDRRoleNone, model.ActionAddPrimaryKey, false}, + + // Roles for ActionDropPrimaryKey + {ast.BDRRolePrimary, model.ActionDropPrimaryKey, false}, + {ast.BDRRoleSecondary, model.ActionDropPrimaryKey, true}, + {ast.BDRRoleNone, model.ActionDropPrimaryKey, false}, + + // Roles for ActionCreateSequence + {ast.BDRRolePrimary, model.ActionCreateSequence, true}, + {ast.BDRRoleSecondary, model.ActionCreateSequence, true}, + {ast.BDRRoleNone, model.ActionCreateSequence, false}, + + // Roles for ActionAlterSequence + {ast.BDRRolePrimary, model.ActionAlterSequence, true}, + {ast.BDRRoleSecondary, model.ActionAlterSequence, true}, + {ast.BDRRoleNone, model.ActionAlterSequence, false}, + + // Roles for ActionDropSequence + {ast.BDRRolePrimary, model.ActionDropSequence, true}, + {ast.BDRRoleSecondary, model.ActionDropSequence, true}, + {ast.BDRRoleNone, model.ActionDropSequence, false}, + + // Roles for ActionModifyTableAutoIDCache + {ast.BDRRolePrimary, model.ActionModifyTableAutoIDCache, true}, + {ast.BDRRoleSecondary, model.ActionModifyTableAutoIDCache, true}, + {ast.BDRRoleNone, model.ActionModifyTableAutoIDCache, false}, + + // Roles for ActionRebaseAutoRandomBase + {ast.BDRRolePrimary, model.ActionRebaseAutoRandomBase, true}, + {ast.BDRRoleSecondary, model.ActionRebaseAutoRandomBase, true}, + {ast.BDRRoleNone, model.ActionRebaseAutoRandomBase, false}, + + // Roles for ActionAlterIndexVisibility + {ast.BDRRolePrimary, model.ActionAlterIndexVisibility, false}, + {ast.BDRRoleSecondary, model.ActionAlterIndexVisibility, true}, + {ast.BDRRoleNone, model.ActionAlterIndexVisibility, false}, + + // Roles for ActionExchangeTablePartition + {ast.BDRRolePrimary, model.ActionExchangeTablePartition, true}, + {ast.BDRRoleSecondary, model.ActionExchangeTablePartition, true}, + {ast.BDRRoleNone, model.ActionExchangeTablePartition, false}, + + // Roles for ActionAddCheckConstraint + {ast.BDRRolePrimary, model.ActionAddCheckConstraint, true}, + {ast.BDRRoleSecondary, model.ActionAddCheckConstraint, true}, + {ast.BDRRoleNone, model.ActionAddCheckConstraint, false}, + + // Roles for ActionDropCheckConstraint + {ast.BDRRolePrimary, model.ActionDropCheckConstraint, true}, + {ast.BDRRoleSecondary, model.ActionDropCheckConstraint, true}, + {ast.BDRRoleNone, model.ActionDropCheckConstraint, false}, + + // Roles for ActionAlterCheckConstraint + {ast.BDRRolePrimary, model.ActionAlterCheckConstraint, true}, + {ast.BDRRoleSecondary, model.ActionAlterCheckConstraint, true}, + {ast.BDRRoleNone, model.ActionAlterCheckConstraint, false}, + + // Roles for ActionRenameTables + {ast.BDRRolePrimary, model.ActionRenameTables, true}, + {ast.BDRRoleSecondary, model.ActionRenameTables, true}, + {ast.BDRRoleNone, model.ActionRenameTables, false}, + + // Roles for ActionAlterTableAttributes + {ast.BDRRolePrimary, model.ActionAlterTableAttributes, true}, + {ast.BDRRoleSecondary, model.ActionAlterTableAttributes, true}, + {ast.BDRRoleNone, model.ActionAlterTableAttributes, false}, + + // Roles for ActionAlterTablePartitionAttributes + {ast.BDRRolePrimary, model.ActionAlterTablePartitionAttributes, true}, + {ast.BDRRoleSecondary, model.ActionAlterTablePartitionAttributes, true}, + {ast.BDRRoleNone, model.ActionAlterTablePartitionAttributes, false}, + + // Roles for ActionCreatePlacementPolicy + {ast.BDRRolePrimary, model.ActionCreatePlacementPolicy, false}, + {ast.BDRRoleSecondary, model.ActionCreatePlacementPolicy, false}, + {ast.BDRRoleNone, model.ActionCreatePlacementPolicy, false}, + + // Roles for ActionAlterPlacementPolicy + {ast.BDRRolePrimary, model.ActionAlterPlacementPolicy, false}, + {ast.BDRRoleSecondary, model.ActionAlterPlacementPolicy, false}, + {ast.BDRRoleNone, model.ActionAlterPlacementPolicy, false}, + + // Roles for ActionDropPlacementPolicy + {ast.BDRRolePrimary, model.ActionDropPlacementPolicy, false}, + {ast.BDRRoleSecondary, model.ActionDropPlacementPolicy, false}, + {ast.BDRRoleNone, model.ActionDropPlacementPolicy, false}, + + // Roles for ActionAlterTablePartitionPlacement + {ast.BDRRolePrimary, model.ActionAlterTablePartitionPlacement, true}, + {ast.BDRRoleSecondary, model.ActionAlterTablePartitionPlacement, true}, + {ast.BDRRoleNone, model.ActionAlterTablePartitionPlacement, false}, + + // Roles for ActionModifySchemaDefaultPlacement + {ast.BDRRolePrimary, model.ActionModifySchemaDefaultPlacement, true}, + {ast.BDRRoleSecondary, model.ActionModifySchemaDefaultPlacement, true}, + {ast.BDRRoleNone, model.ActionModifySchemaDefaultPlacement, false}, + + // Roles for ActionAlterTablePlacement + {ast.BDRRolePrimary, model.ActionAlterTablePlacement, true}, + {ast.BDRRoleSecondary, model.ActionAlterTablePlacement, true}, + {ast.BDRRoleNone, model.ActionAlterTablePlacement, false}, + + // Roles for ActionAlterCacheTable + {ast.BDRRolePrimary, model.ActionAlterCacheTable, true}, + {ast.BDRRoleSecondary, model.ActionAlterCacheTable, true}, + {ast.BDRRoleNone, model.ActionAlterCacheTable, false}, + + // Roles for ActionAlterTableStatsOptions + {ast.BDRRolePrimary, model.ActionAlterTableStatsOptions, true}, + {ast.BDRRoleSecondary, model.ActionAlterTableStatsOptions, true}, + {ast.BDRRoleNone, model.ActionAlterTableStatsOptions, false}, + + // Roles for ActionAlterNoCacheTable + {ast.BDRRolePrimary, model.ActionAlterNoCacheTable, true}, + {ast.BDRRoleSecondary, model.ActionAlterNoCacheTable, true}, + {ast.BDRRoleNone, model.ActionAlterNoCacheTable, false}, + + // Roles for ActionCreateTables + {ast.BDRRolePrimary, model.ActionCreateTables, false}, + {ast.BDRRoleSecondary, model.ActionCreateTables, true}, + {ast.BDRRoleNone, model.ActionCreateTables, false}, + + // Roles for ActionMultiSchemaChange + {ast.BDRRolePrimary, model.ActionMultiSchemaChange, true}, + {ast.BDRRoleSecondary, model.ActionMultiSchemaChange, true}, + {ast.BDRRoleNone, model.ActionMultiSchemaChange, false}, + + // Roles for ActionFlashbackCluster + {ast.BDRRolePrimary, model.ActionFlashbackCluster, true}, + {ast.BDRRoleSecondary, model.ActionFlashbackCluster, true}, + {ast.BDRRoleNone, model.ActionFlashbackCluster, false}, + + // Roles for ActionRecoverSchema + {ast.BDRRolePrimary, model.ActionRecoverSchema, true}, + {ast.BDRRoleSecondary, model.ActionRecoverSchema, true}, + {ast.BDRRoleNone, model.ActionRecoverSchema, false}, + + // Roles for ActionReorganizePartition + {ast.BDRRolePrimary, model.ActionReorganizePartition, true}, + {ast.BDRRoleSecondary, model.ActionReorganizePartition, true}, + {ast.BDRRoleNone, model.ActionReorganizePartition, false}, + + // Roles for ActionAlterTTLInfo + {ast.BDRRolePrimary, model.ActionAlterTTLInfo, false}, + {ast.BDRRoleSecondary, model.ActionAlterTTLInfo, true}, + {ast.BDRRoleNone, model.ActionAlterTTLInfo, false}, + + // Roles for ActionAlterTTLRemove + {ast.BDRRolePrimary, model.ActionAlterTTLRemove, false}, + {ast.BDRRoleSecondary, model.ActionAlterTTLRemove, true}, + {ast.BDRRoleNone, model.ActionAlterTTLRemove, false}, + + // Roles for ActionCreateResourceGroup + {ast.BDRRolePrimary, model.ActionCreateResourceGroup, false}, + {ast.BDRRoleSecondary, model.ActionCreateResourceGroup, false}, + {ast.BDRRoleNone, model.ActionCreateResourceGroup, false}, + + // Roles for ActionAlterResourceGroup + {ast.BDRRolePrimary, model.ActionAlterResourceGroup, false}, + {ast.BDRRoleSecondary, model.ActionAlterResourceGroup, false}, + {ast.BDRRoleNone, model.ActionAlterResourceGroup, false}, + + // Roles for ActionDropResourceGroup + {ast.BDRRolePrimary, model.ActionDropResourceGroup, false}, + {ast.BDRRoleSecondary, model.ActionDropResourceGroup, false}, + {ast.BDRRoleNone, model.ActionDropResourceGroup, false}, + + // Roles for ActionAlterTablePartitioning + {ast.BDRRolePrimary, model.ActionAlterTablePartitioning, true}, + {ast.BDRRoleSecondary, model.ActionAlterTablePartitioning, true}, + {ast.BDRRoleNone, model.ActionAlterTablePartitioning, false}, + + // Roles for ActionRemovePartitioning + {ast.BDRRolePrimary, model.ActionRemovePartitioning, true}, + {ast.BDRRoleSecondary, model.ActionRemovePartitioning, true}, + {ast.BDRRoleNone, model.ActionRemovePartitioning, false}, + } + + for _, tc := range testCases { + assert.Equal(t, tc.expected, DeniedByBDR(tc.role, tc.action, nil), fmt.Sprintf("role: %v, action: %v", tc.role, tc.action)) + } + + // test special cases + testCases2 := []struct { + role ast.BDRRole + action model.ActionType + job *model.Job + expected bool + }{ + { + role: ast.BDRRolePrimary, + action: model.ActionAddPrimaryKey, + job: &model.Job{ + Type: model.ActionAddPrimaryKey, + Args: []any{true}, + }, + expected: true, + }, + { + role: ast.BDRRolePrimary, + action: model.ActionAddIndex, + job: &model.Job{ + Type: model.ActionAddIndex, + Args: []any{true}, + }, + expected: true, + }, + { + role: ast.BDRRolePrimary, + action: model.ActionAddIndex, + job: &model.Job{ + Type: model.ActionAddIndex, + Args: []any{false}, + }, + expected: false, + }, + } + + for _, tc := range testCases2 { + assert.Equal(t, tc.expected, DeniedByBDR(tc.role, tc.action, tc.job), fmt.Sprintf("role: %v, action: %v", tc.role, tc.action)) + } +} diff --git a/pkg/ddl/bench_test.go b/pkg/ddl/bench_test.go index f778e4a5c6ec6..bfd2f47a1a5b1 100644 --- a/pkg/ddl/bench_test.go +++ b/pkg/ddl/bench_test.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/copr" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/testkit" @@ -40,7 +41,7 @@ func BenchmarkExtractDatumByOffsets(b *testing.B) { for i := 0; i < 8; i++ { tk.MustExec("insert into t values (?, ?)", i, i) } - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(b, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("idx") @@ -80,7 +81,7 @@ func BenchmarkGenerateIndexKV(b *testing.B) { for i := 0; i < 8; i++ { tk.MustExec("insert into t values (?, ?)", i, i) } - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(b, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("idx") diff --git a/pkg/ddl/cancel_test.go b/pkg/ddl/cancel_test.go index 97272226ecd91..c414f8cad0477 100644 --- a/pkg/ddl/cancel_test.go +++ b/pkg/ddl/cancel_test.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/cluster.go b/pkg/ddl/cluster.go index 6dd816aebc939..93e8e4849abc9 100644 --- a/pkg/ddl/cluster.go +++ b/pkg/ddl/cluster.go @@ -34,8 +34,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util" diff --git a/pkg/ddl/cluster_test.go b/pkg/ddl/cluster_test.go index 9003d25646f3b..d7a6feeb50946 100644 --- a/pkg/ddl/cluster_test.go +++ b/pkg/ddl/cluster_test.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/column.go b/pkg/ddl/column.go index 629e6f77d121c..3c02b2e48464a 100644 --- a/pkg/ddl/column.go +++ b/pkg/ddl/column.go @@ -33,8 +33,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -230,7 +231,7 @@ func checkDropColumn(jobCtx *jobContext, t *meta.Meta, job *model.Job) (*model.T return nil, nil, nil, false, errors.Trace(err) } - var colName model.CIStr + var colName pmodel.CIStr var ifExists bool // indexIDs is used to make sure we don't truncate args when decoding the rawArgs. var indexIDs []int64 @@ -259,7 +260,7 @@ func checkDropColumn(jobCtx *jobContext, t *meta.Meta, job *model.Job) (*model.T return tblInfo, colInfo, idxInfos, false, nil } -func isDroppableColumn(tblInfo *model.TableInfo, colName model.CIStr) error { +func isDroppableColumn(tblInfo *model.TableInfo, colName pmodel.CIStr) error { if ok, dep, isHidden := hasDependentByGeneratedColumn(tblInfo, colName); ok { if isHidden { return dbterror.ErrDependentByFunctionalIndex.GenWithStackByArgs(dep) @@ -294,7 +295,7 @@ func onSetDefaultValue(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver in return updateColumnDefaultValue(jobCtx, t, job, newCol, &newCol.Name) } -func setIdxIDName(idxInfo *model.IndexInfo, newID int64, newName model.CIStr) { +func setIdxIDName(idxInfo *model.IndexInfo, newID int64, newName pmodel.CIStr) { idxInfo.ID = newID idxInfo.Name = newName } @@ -328,7 +329,7 @@ func removeChangingColAndIdxs(tblInfo *model.TableInfo, changingColID int64) { } func replaceOldColumn(tblInfo *model.TableInfo, oldCol, changingCol *model.ColumnInfo, - newName model.CIStr) *model.ColumnInfo { + newName pmodel.CIStr) *model.ColumnInfo { tblInfo.MoveColumnInfo(changingCol.Offset, len(tblInfo.Columns)-1) changingCol = updateChangingCol(changingCol, newName, oldCol.Offset) tblInfo.Columns[oldCol.Offset] = changingCol @@ -359,7 +360,7 @@ func replaceOldIndexes(tblInfo *model.TableInfo, changingIdxs []*model.IndexInfo idxName := getChangingIndexOriginName(cIdx) for i, idx := range tblInfo.Indices { if strings.EqualFold(idxName, idx.Name.O) { - cIdx.Name = model.NewCIStr(idxName) + cIdx.Name = pmodel.NewCIStr(idxName) tblInfo.Indices[i] = cIdx break } @@ -369,7 +370,7 @@ func replaceOldIndexes(tblInfo *model.TableInfo, changingIdxs []*model.IndexInfo // updateNewIdxColsNameOffset updates the name&offset of the index column. func updateNewIdxColsNameOffset(changingIdxs []*model.IndexInfo, - oldName model.CIStr, changingCol *model.ColumnInfo) { + oldName pmodel.CIStr, changingCol *model.ColumnInfo) { for _, idx := range changingIdxs { for _, col := range idx.Columns { if col.Name.L == oldName.L { @@ -380,7 +381,7 @@ func updateNewIdxColsNameOffset(changingIdxs []*model.IndexInfo, } // filterIndexesToRemove filters out the indexes that can be removed. -func filterIndexesToRemove(changingIdxs []*model.IndexInfo, colName model.CIStr, tblInfo *model.TableInfo) []*model.IndexInfo { +func filterIndexesToRemove(changingIdxs []*model.IndexInfo, colName pmodel.CIStr, tblInfo *model.TableInfo) []*model.IndexInfo { indexesToRemove := make([]*model.IndexInfo, 0, len(changingIdxs)) for _, idx := range changingIdxs { var hasOtherChangingCol bool @@ -401,7 +402,7 @@ func filterIndexesToRemove(changingIdxs []*model.IndexInfo, colName model.CIStr, return indexesToRemove } -func updateChangingCol(col *model.ColumnInfo, newName model.CIStr, newOffset int) *model.ColumnInfo { +func updateChangingCol(col *model.ColumnInfo, newName pmodel.CIStr, newOffset int) *model.ColumnInfo { col.Name = newName col.ChangeStateInfo = nil col.Offset = newOffset @@ -995,7 +996,7 @@ func applyNewAutoRandomBits(jobCtx *jobContext, m *meta.Meta, dbInfo *model.DBIn // checkForNullValue ensure there are no null values of the column of this table. // `isDataTruncated` indicates whether the new field and the old field type are the same, in order to be compatible with mysql. -func checkForNullValue(ctx context.Context, sctx sessionctx.Context, isDataTruncated bool, schema, table model.CIStr, newCol *model.ColumnInfo, oldCols ...*model.ColumnInfo) error { +func checkForNullValue(ctx context.Context, sctx sessionctx.Context, isDataTruncated bool, schema, table pmodel.CIStr, newCol *model.ColumnInfo, oldCols ...*model.ColumnInfo) error { needCheckNullValue := false for _, oldCol := range oldCols { if oldCol.GetType() != mysql.TypeTimestamp && newCol.GetType() == mysql.TypeTimestamp { @@ -1036,7 +1037,7 @@ func checkForNullValue(ctx context.Context, sctx sessionctx.Context, isDataTrunc return nil } -func updateColumnDefaultValue(jobCtx *jobContext, t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldColName *model.CIStr) (ver int64, _ error) { +func updateColumnDefaultValue(jobCtx *jobContext, t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldColName *pmodel.CIStr) (ver int64, _ error) { tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) diff --git a/pkg/ddl/column_change_test.go b/pkg/ddl/column_change_test.go index f637ed5a89b69..830a8f088a2ee 100644 --- a/pkg/ddl/column_change_test.go +++ b/pkg/ddl/column_change_test.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/ddl/column_modify_test.go b/pkg/ddl/column_modify_test.go index c453e6370f503..db172fc47d15f 100644 --- a/pkg/ddl/column_modify_test.go +++ b/pkg/ddl/column_modify_test.go @@ -27,8 +27,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/store/mockstore" @@ -160,7 +161,7 @@ AddLoop: defer tk.MustExec("drop table test_on_update_c;") tk.MustExec("alter table test_on_update_c add column c3 timestamp null default '2017-02-11' on update current_timestamp;") is := domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_on_update_c")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_on_update_c")) require.NoError(t, err) tblInfo := tbl.Meta() colC := tblInfo.Columns[2] @@ -170,7 +171,7 @@ AddLoop: tk.MustExec("create table test_on_update_d (c1 int, c2 datetime);") tk.MustExec("alter table test_on_update_d add column c3 datetime on update current_timestamp;") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_on_update_d")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_on_update_d")) require.NoError(t, err) tblInfo = tbl.Meta() colC = tblInfo.Columns[2] @@ -305,7 +306,7 @@ func TestChangeColumn(t *testing.T) { // for no default flag tk.MustExec("alter table t3 change d dd bigint not null") is := domain.GetDomain(tk.Session()).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) tblInfo := tbl.Meta() colD := tblInfo.Columns[2] @@ -313,7 +314,7 @@ func TestChangeColumn(t *testing.T) { // for the following definitions: 'not null', 'null', 'default value' and 'comment' tk.MustExec("alter table t3 change b b varchar(20) null default 'c' comment 'my comment'") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) tblInfo = tbl.Meta() colB := tblInfo.Columns[1] @@ -325,7 +326,7 @@ func TestChangeColumn(t *testing.T) { tk.MustExec("alter table t3 add column c timestamp not null") tk.MustExec("alter table t3 change c c timestamp null default '2017-02-11' comment 'col c comment' on update current_timestamp") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) tblInfo = tbl.Meta() colC := tblInfo.Columns[3] @@ -339,7 +340,7 @@ func TestChangeColumn(t *testing.T) { tk.MustExec("create table t (k char(10), v int, INDEX(k(7)));") tk.MustExec("alter table t change column k k tinytext") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) // for failing tests @@ -381,7 +382,7 @@ func TestVirtualColumnDDL(t *testing.T) { tk.MustExec("use test") tk.MustExec(`create global temporary table test_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored) on commit delete rows;`) is := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_gv_ddl")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_gv_ddl")) require.NoError(t, err) testCases := []struct { generatedExprString string @@ -405,7 +406,7 @@ func TestVirtualColumnDDL(t *testing.T) { // for local temporary table tk.MustExec(`create temporary table test_local_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored);`) is = sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_local_gv_ddl")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_local_gv_ddl")) require.NoError(t, err) for i, column := range tbl.Meta().Columns { require.Equal(t, testCases[i].generatedExprString, column.GeneratedExprString) @@ -533,7 +534,7 @@ func TestColumnTypeChangeGenUniqueChangingName(t *testing.T) { if job.SchemaState == model.StateDeleteOnly && job.Type == model.ActionModifyColumn { var ( _newCol *model.ColumnInfo - _oldColName *model.CIStr + _oldColName *pmodel.CIStr _pos = &ast.ColumnPosition{} _modifyColumnTp byte _updatedAutoRandomBits uint64 @@ -587,7 +588,7 @@ func TestColumnTypeChangeGenUniqueChangingName(t *testing.T) { if (job.Query == query1 || job.Query == query2) && job.SchemaState == model.StateDeleteOnly && job.Type == model.ActionModifyColumn { var ( _newCol *model.ColumnInfo - _oldColName *model.CIStr + _oldColName *pmodel.CIStr _pos = &ast.ColumnPosition{} _modifyColumnTp byte _updatedAutoRandomBits uint64 diff --git a/pkg/ddl/column_test.go b/pkg/ddl/column_test.go index caa33dc883314..24508517a60bc 100644 --- a/pkg/ddl/column_test.go +++ b/pkg/ddl/column_test.go @@ -25,7 +25,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/mockstore" @@ -106,7 +107,7 @@ func testDropTable(tk *testkit.TestKit, t *testing.T, dbName, tblName string, do idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) id := int64(idi) require.NoError(t, dom.Reload()) - _, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tblName)) + _, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tblName)) require.Error(t, err) return id } diff --git a/pkg/ddl/column_type_change_test.go b/pkg/ddl/column_type_change_test.go index 42ef263952afe..2452809a72c77 100644 --- a/pkg/ddl/column_type_change_test.go +++ b/pkg/ddl/column_type_change_test.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/store/helper" diff --git a/pkg/ddl/constraint.go b/pkg/ddl/constraint.go index c1919aef1bc9a..d665a39bce834 100644 --- a/pkg/ddl/constraint.go +++ b/pkg/ddl/constraint.go @@ -23,9 +23,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/dbterror" @@ -198,7 +199,7 @@ func checkDropCheckConstraint(t *meta.Meta, job *model.Job) (*model.TableInfo, * return nil, nil, errors.Trace(err) } - var constrName model.CIStr + var constrName pmodel.CIStr err = job.DecodeArgs(&constrName) if err != nil { job.State = model.JobStateCancelled @@ -282,7 +283,7 @@ func checkAlterCheckConstraint(t *meta.Meta, job *model.Job) (*model.DBInfo, *mo var ( enforced bool - constrName model.CIStr + constrName pmodel.CIStr ) err = job.DecodeArgs(&constrName, &enforced) if err != nil { @@ -303,8 +304,8 @@ func allocateConstraintID(tblInfo *model.TableInfo) int64 { return tblInfo.MaxConstraintID } -func buildConstraintInfo(tblInfo *model.TableInfo, dependedCols []model.CIStr, constr *ast.Constraint, state model.SchemaState) (*model.ConstraintInfo, error) { - constraintName := model.NewCIStr(constr.Name) +func buildConstraintInfo(tblInfo *model.TableInfo, dependedCols []pmodel.CIStr, constr *ast.Constraint, state model.SchemaState) (*model.ConstraintInfo, error) { + constraintName := pmodel.NewCIStr(constr.Name) if err := checkTooLongConstraint(constraintName); err != nil { return nil, errors.Trace(err) } @@ -335,7 +336,7 @@ func buildConstraintInfo(tblInfo *model.TableInfo, dependedCols []model.CIStr, c return constraintInfo, nil } -func checkTooLongConstraint(constr model.CIStr) error { +func checkTooLongConstraint(constr pmodel.CIStr) error { if len(constr.L) > mysql.MaxConstraintIdentifierLen { return dbterror.ErrTooLongIdent.GenWithStackByArgs(constr) } @@ -400,13 +401,13 @@ func setNameForConstraintInfo(tableLowerName string, namesMap map[string]bool, i cnt++ constrName = fmt.Sprintf("%s%d", constraintPrefix, cnt) } - constrInfo.Name = model.NewCIStr(constrName) + constrInfo.Name = pmodel.NewCIStr(constrName) } } } // IsColumnDroppableWithCheckConstraint check whether the column in check-constraint whose dependent col is more than 1 -func IsColumnDroppableWithCheckConstraint(col model.CIStr, tblInfo *model.TableInfo) error { +func IsColumnDroppableWithCheckConstraint(col pmodel.CIStr, tblInfo *model.TableInfo) error { for _, cons := range tblInfo.Constraints { if len(cons.ConstraintCols) > 1 { for _, colName := range cons.ConstraintCols { @@ -420,7 +421,7 @@ func IsColumnDroppableWithCheckConstraint(col model.CIStr, tblInfo *model.TableI } // IsColumnRenameableWithCheckConstraint check whether the column is referenced in check-constraint -func IsColumnRenameableWithCheckConstraint(col model.CIStr, tblInfo *model.TableInfo) error { +func IsColumnRenameableWithCheckConstraint(col pmodel.CIStr, tblInfo *model.TableInfo) error { for _, cons := range tblInfo.Constraints { for _, colName := range cons.ConstraintCols { if colName.L == col.L { diff --git a/pkg/ddl/constraint_test.go b/pkg/ddl/constraint_test.go index 02047441f2974..65499b07857ef 100644 --- a/pkg/ddl/constraint_test.go +++ b/pkg/ddl/constraint_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/testkit" diff --git a/pkg/ddl/copr/BUILD.bazel b/pkg/ddl/copr/BUILD.bazel index b9a4357ed53f9..de29cc1539250 100644 --- a/pkg/ddl/copr/BUILD.bazel +++ b/pkg/ddl/copr/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/expression", "//pkg/expression/context", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/table/tables", "//pkg/types", @@ -27,6 +28,7 @@ go_test( deps = [ "//pkg/expression", "//pkg/expression/contextstatic", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/types", diff --git a/pkg/ddl/copr/copr_ctx.go b/pkg/ddl/copr/copr_ctx.go index 023f7bdbca034..b4471f1b35cb3 100644 --- a/pkg/ddl/copr/copr_ctx.go +++ b/pkg/ddl/copr/copr_ctx.go @@ -21,7 +21,8 @@ import ( exprctx "github.com/pingcap/tidb/pkg/expression/context" // make sure mock.MockInfoschema is initialized to make sure the test pass _ "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" ) @@ -122,7 +123,7 @@ func NewCopContextBase( } expColInfos, _, err := expression.ColumnInfos2ColumnsAndNames(exprCtx, - model.CIStr{} /* unused */, tblInfo.Name, colInfos, tblInfo) + pmodel.CIStr{} /* unused */, tblInfo.Name, colInfos, tblInfo) if err != nil { return nil, err } diff --git a/pkg/ddl/copr/copr_ctx_test.go b/pkg/ddl/copr/copr_ctx_test.go index a4d84d1130562..5dd6e5f9eb79b 100644 --- a/pkg/ddl/copr/copr_ctx_test.go +++ b/pkg/ddl/copr/copr_ctx_test.go @@ -20,7 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/contextstatic" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/mock" @@ -34,7 +35,7 @@ func TestNewCopContextSingleIndex(t *testing.T) { mockColInfos = append(mockColInfos, &model.ColumnInfo{ ID: int64(i), Offset: i, - Name: model.NewCIStr(fmt.Sprintf("c%d", i)), + Name: pmodel.NewCIStr(fmt.Sprintf("c%d", i)), FieldType: *types.NewFieldType(1), State: model.StatePublic, }) @@ -68,18 +69,18 @@ func TestNewCopContextSingleIndex(t *testing.T) { var idxCols []*model.IndexColumn for _, cn := range tt.cols { idxCols = append(idxCols, &model.IndexColumn{ - Name: model.NewCIStr(cn), + Name: pmodel.NewCIStr(cn), Offset: findColByName(cn).Offset, }) } mockIdxInfo := &model.IndexInfo{ ID: int64(i), - Name: model.NewCIStr(fmt.Sprintf("i%d", i)), + Name: pmodel.NewCIStr(fmt.Sprintf("i%d", i)), Columns: idxCols, State: model.StatePublic, } mockTableInfo := &model.TableInfo{ - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), Columns: mockColInfos, Indices: []*model.IndexInfo{mockIdxInfo}, PKIsHandle: tt.pkType == pkTypePKHandle, @@ -92,11 +93,11 @@ func TestNewCopContextSingleIndex(t *testing.T) { mockTableInfo.Indices = append(mockTableInfo.Indices, &model.IndexInfo{ Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("c2"), + Name: pmodel.NewCIStr("c2"), Offset: 2, }, { - Name: model.NewCIStr("c4"), + Name: pmodel.NewCIStr("c4"), Offset: 4, }, }, diff --git a/pkg/ddl/create_table.go b/pkg/ddl/create_table.go index d25800058d032..e436336e85185 100644 --- a/pkg/ddl/create_table.go +++ b/pkg/ddl/create_table.go @@ -32,9 +32,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" field_types "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/sessionctx" @@ -372,7 +373,7 @@ func findTableIDFromInfoSchema(is infoschema.InfoSchema, schemaID int64, tableNa if !ok { return 0, infoschema.ErrDatabaseNotExists.GenWithStackByArgs("") } - tbl, err := is.TableByName(context.Background(), schema.Name, model.NewCIStr(tableName)) + tbl, err := is.TableByName(context.Background(), schema.Name, pmodel.NewCIStr(tableName)) if err != nil { return 0, err } @@ -458,7 +459,7 @@ func checkTableInfoValidWithStmt(ctx sessionctx.Context, tbInfo *model.TableInfo return nil } -func checkGeneratedColumn(ctx sessionctx.Context, schemaName model.CIStr, tableName model.CIStr, colDefs []*ast.ColumnDef) error { +func checkGeneratedColumn(ctx sessionctx.Context, schemaName pmodel.CIStr, tableName pmodel.CIStr, colDefs []*ast.ColumnDef) error { var colName2Generation = make(map[string]columnGenerationInDDL, len(colDefs)) var exists bool var autoIncrementColumn string @@ -796,7 +797,7 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err // TODO: Refine this error. return errors.New("table option auto_id_cache overflows int64") } - tbInfo.AutoIdCache = int64(op.UintValue) + tbInfo.AutoIDCache = int64(op.UintValue) case ast.TableOptionAutoRandomBase: tbInfo.AutoRandID = int64(op.UintValue) case ast.TableOptionComment: @@ -821,7 +822,7 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err // We don't handle charset and collate here since they're handled in `GetCharsetAndCollateInTableOption`. case ast.TableOptionPlacementPolicy: tbInfo.PlacementPolicyRef = &model.PolicyRefInfo{ - Name: model.NewCIStr(op.StrValue), + Name: pmodel.NewCIStr(op.StrValue), } case ast.TableOptionTTL, ast.TableOptionTTLEnable, ast.TableOptionTTLJobInterval: if ttlOptionsHandled { @@ -954,7 +955,7 @@ func setEmptyConstraintName(namesMap map[string]bool, constr *ast.Constraint) { } } -func checkConstraintNames(tableName model.CIStr, constraints []*ast.Constraint) error { +func checkConstraintNames(tableName pmodel.CIStr, constraints []*ast.Constraint) error { constrNames := map[string]bool{} fkNames := map[string]bool{} @@ -1132,7 +1133,7 @@ func BuildTableInfoWithLike(ctx sessionctx.Context, ident ast.Ident, referTblInf func renameCheckConstraint(tblInfo *model.TableInfo) { for _, cons := range tblInfo.Constraints { - cons.Name = model.NewCIStr("") + cons.Name = pmodel.NewCIStr("") cons.Table = tblInfo.Name } setNameForConstraintInfo(tblInfo.Name.L, map[string]bool{}, tblInfo.Constraints) @@ -1141,7 +1142,7 @@ func renameCheckConstraint(tblInfo *model.TableInfo) { // BuildTableInfo creates a TableInfo. func BuildTableInfo( ctx sessionctx.Context, - tableName model.CIStr, + tableName pmodel.CIStr, cols []*table.Column, constraints []*ast.Constraint, charset string, @@ -1164,7 +1165,7 @@ func BuildTableInfo( foreignKeyID := tbInfo.MaxForeignKeyID for _, constr := range constraints { // Build hidden columns if necessary. - hiddenCols, err := buildHiddenColumnInfoWithCheck(ctx, constr.Keys, model.NewCIStr(constr.Name), tbInfo, tblColumns) + hiddenCols, err := buildHiddenColumnInfoWithCheck(ctx, constr.Keys, pmodel.NewCIStr(constr.Name), tbInfo, tblColumns) if err != nil { return nil, err } @@ -1176,17 +1177,17 @@ func BuildTableInfo( tblColumns = append(tblColumns, table.ToColumn(hiddenCol)) } // Check clustered on non-primary key. - if constr.Option != nil && constr.Option.PrimaryKeyTp != model.PrimaryKeyTypeDefault && + if constr.Option != nil && constr.Option.PrimaryKeyTp != pmodel.PrimaryKeyTypeDefault && constr.Tp != ast.ConstraintPrimaryKey { return nil, dbterror.ErrUnsupportedClusteredSecondaryKey } if constr.Tp == ast.ConstraintForeignKey { - var fkName model.CIStr + var fkName pmodel.CIStr foreignKeyID++ if constr.Name != "" { - fkName = model.NewCIStr(constr.Name) + fkName = pmodel.NewCIStr(constr.Name) } else { - fkName = model.NewCIStr(fmt.Sprintf("fk_%d", foreignKeyID)) + fkName = pmodel.NewCIStr(fmt.Sprintf("fk_%d", foreignKeyID)) } if model.FindFKInfoByName(tbInfo.ForeignKeys, fkName.L) != nil { return nil, infoschema.ErrCannotAddForeign @@ -1258,16 +1259,16 @@ func BuildTableInfo( if ok, err := table.IsSupportedExpr(constr); !ok { return nil, err } - var dependedCols []model.CIStr + var dependedCols []pmodel.CIStr dependedColsMap := findDependentColsInExpr(constr.Expr) if !constr.InColumn { - dependedCols = make([]model.CIStr, 0, len(dependedColsMap)) + dependedCols = make([]pmodel.CIStr, 0, len(dependedColsMap)) for k := range dependedColsMap { if _, ok := existedColsMap[k]; !ok { // The table constraint depended on a non-existed column. return nil, dbterror.ErrTableCheckConstraintReferUnknown.GenWithStackByArgs(constr.Name, k) } - dependedCols = append(dependedCols, model.NewCIStr(k)) + dependedCols = append(dependedCols, pmodel.NewCIStr(k)) } } else { // Check the column-type constraint dependency. @@ -1283,7 +1284,7 @@ func BuildTableInfo( if _, ok := dependedColsMap[constr.InColumnName]; !ok { return nil, dbterror.ErrColumnCheckConstraintReferOther.GenWithStackByArgs(constr.Name) } - dependedCols = []model.CIStr{model.NewCIStr(constr.InColumnName)} + dependedCols = []pmodel.CIStr{pmodel.NewCIStr(constr.InColumnName)} } } // check auto-increment column @@ -1312,7 +1313,7 @@ func BuildTableInfo( idxInfo, err := BuildIndexInfo( ctx, tbInfo.Columns, - model.NewCIStr(indexName), + pmodel.NewCIStr(indexName), primary, unique, constr.Keys, @@ -1341,7 +1342,7 @@ func BuildTableInfo( func precheckBuildHiddenColumnInfo( indexPartSpecifications []*ast.IndexPartSpecification, - indexName model.CIStr, + indexName pmodel.CIStr, ) error { for i, idxPart := range indexPartSpecifications { if idxPart.Expr == nil { @@ -1360,7 +1361,7 @@ func precheckBuildHiddenColumnInfo( return nil } -func buildHiddenColumnInfoWithCheck(ctx sessionctx.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName model.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error) { +func buildHiddenColumnInfoWithCheck(ctx sessionctx.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName pmodel.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error) { if err := precheckBuildHiddenColumnInfo(indexPartSpecifications, indexName); err != nil { return nil, err } @@ -1368,13 +1369,13 @@ func buildHiddenColumnInfoWithCheck(ctx sessionctx.Context, indexPartSpecificati } // BuildHiddenColumnInfo builds hidden column info. -func BuildHiddenColumnInfo(ctx sessionctx.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName model.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error) { +func BuildHiddenColumnInfo(ctx sessionctx.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName pmodel.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error) { hiddenCols := make([]*model.ColumnInfo, 0, len(indexPartSpecifications)) for i, idxPart := range indexPartSpecifications { if idxPart.Expr == nil { continue } - idxPart.Column = &ast.ColumnName{Name: model.NewCIStr(fmt.Sprintf("%s_%s_%d", expressionIndexPrefix, indexName, i))} + idxPart.Column = &ast.ColumnName{Name: pmodel.NewCIStr(fmt.Sprintf("%s_%s_%d", expressionIndexPrefix, indexName, i))} // Check whether the hidden columns have existed. col := table.FindCol(existCols, idxPart.Column.Name.L) if col != nil { @@ -1502,7 +1503,7 @@ func isSingleIntPK(constr *ast.Constraint, lastCol *model.ColumnInfo) bool { // ShouldBuildClusteredIndex is used to determine whether the CREATE TABLE statement should build a clustered index table. func ShouldBuildClusteredIndex(ctx sessionctx.Context, opt *ast.IndexOption, isSingleIntPK bool) bool { - if opt == nil || opt.PrimaryKeyTp == model.PrimaryKeyTypeDefault { + if opt == nil || opt.PrimaryKeyTp == pmodel.PrimaryKeyTypeDefault { switch ctx.GetSessionVars().EnableClusteredIndex { case variable.ClusteredIndexDefModeOn: return true @@ -1512,7 +1513,7 @@ func ShouldBuildClusteredIndex(ctx sessionctx.Context, opt *ast.IndexOption, isS return false } } - return opt.PrimaryKeyTp == model.PrimaryKeyTypeClustered + return opt.PrimaryKeyTp == pmodel.PrimaryKeyTypeClustered } // BuildViewInfo builds a ViewInfo structure from an ast.CreateViewStmt. diff --git a/pkg/ddl/db_cache_test.go b/pkg/ddl/db_cache_test.go index 4c648d5f1ed3f..c19b1813367fa 100644 --- a/pkg/ddl/db_cache_test.go +++ b/pkg/ddl/db_cache_test.go @@ -20,8 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/util/dbterror" diff --git a/pkg/ddl/db_change_failpoints_test.go b/pkg/ddl/db_change_failpoints_test.go index 23059e004f5eb..b543833390114 100644 --- a/pkg/ddl/db_change_failpoints_test.go +++ b/pkg/ddl/db_change_failpoints_test.go @@ -25,8 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/ddl" ddlutil "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/util" @@ -67,7 +68,7 @@ func TestModifyColumnTypeArgs(t *testing.T) { var ( _newCol *model.ColumnInfo - _oldColName *model.CIStr + _oldColName *pmodel.CIStr _modifyColumnTp byte _updatedAutoRandomBits uint64 changingCol *model.ColumnInfo diff --git a/pkg/ddl/db_change_test.go b/pkg/ddl/db_change_test.go index 25a8249a5b9cc..1e9f595210a03 100644 --- a/pkg/ddl/db_change_test.go +++ b/pkg/ddl/db_change_test.go @@ -28,9 +28,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/session" sessiontypes "github.com/pingcap/tidb/pkg/session/types" diff --git a/pkg/ddl/db_integration_test.go b/pkg/ddl/db_integration_test.go index e675b1696e9af..ae397fc44ea9d 100644 --- a/pkg/ddl/db_integration_test.go +++ b/pkg/ddl/db_integration_test.go @@ -35,9 +35,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/session" @@ -515,7 +516,7 @@ func TestChangingTableCharset(t *testing.T) { ddlChecker.Disable() // Mock table info with charset is "". Old TiDB maybe create table with charset is "". - db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr("test")) + db, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr("test")) require.True(t, ok) tbl := external.GetTableByName(t, tk, "test", "t") tblInfo := tbl.Meta().Clone() @@ -757,7 +758,7 @@ func TestCaseInsensitiveCharsetAndCollate(t *testing.T) { tk.MustExec("create table t5(a varchar(20)) ENGINE=InnoDB DEFAULT CHARSET=UTF8MB4 COLLATE=UTF8MB4_GENERAL_CI;") tk.MustExec("insert into t5 values ('特克斯和凯科斯群岛')") - db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr("test_charset_collate")) + db, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr("test_charset_collate")) require.True(t, ok) tbl := external.GetTableByName(t, tk, "test_charset_collate", "t5") tblInfo := tbl.Meta().Clone() @@ -808,7 +809,7 @@ func TestZeroFillCreateTable(t *testing.T) { tk.MustExec("drop table if exists abc;") tk.MustExec("create table abc(y year, z tinyint(10) zerofill, primary key(y));") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("abc")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("abc")) require.NoError(t, err) var yearCol, zCol *model.ColumnInfo for _, col := range tbl.Meta().Columns { @@ -1024,7 +1025,7 @@ func TestResolveCharset(t *testing.T) { tk.MustExec(`CREATE TABLE resolve_charset (a varchar(255) DEFAULT NULL) DEFAULT CHARSET=latin1`) ctx := tk.Session() is := domain.GetDomain(ctx).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("resolve_charset")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("resolve_charset")) require.NoError(t, err) require.Equal(t, "latin1", tbl.Cols()[0].GetCharset()) tk.MustExec("INSERT INTO resolve_charset VALUES('鰈')") @@ -1034,14 +1035,14 @@ func TestResolveCharset(t *testing.T) { tk.MustExec(`CREATE TABLE resolve_charset (a varchar(255) DEFAULT NULL) DEFAULT CHARSET=latin1`) is = domain.GetDomain(ctx).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("resolve_charset"), model.NewCIStr("resolve_charset")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("resolve_charset"), pmodel.NewCIStr("resolve_charset")) require.NoError(t, err) require.Equal(t, "latin1", tbl.Cols()[0].GetCharset()) require.Equal(t, "latin1", tbl.Meta().Charset) tk.MustExec(`CREATE TABLE resolve_charset1 (a varchar(255) DEFAULT NULL)`) is = domain.GetDomain(ctx).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("resolve_charset"), model.NewCIStr("resolve_charset1")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("resolve_charset"), pmodel.NewCIStr("resolve_charset1")) require.NoError(t, err) require.Equal(t, "binary", tbl.Cols()[0].GetCharset()) require.Equal(t, "binary", tbl.Meta().Charset) @@ -1130,7 +1131,7 @@ func TestAlterColumn(t *testing.T) { tk.MustQuery("select a from test_alter_column").Check(testkit.Rows("111")) ctx := tk.Session() is := domain.GetDomain(ctx).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_alter_column")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_alter_column")) require.NoError(t, err) tblInfo := tbl.Meta() colA := tblInfo.Columns[0] @@ -1140,7 +1141,7 @@ func TestAlterColumn(t *testing.T) { tk.MustExec("insert into test_alter_column set b = 'b', c = 'bb'") tk.MustQuery("select a from test_alter_column").Check(testkit.Rows("111", "222")) is = domain.GetDomain(ctx).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_alter_column")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_alter_column")) require.NoError(t, err) tblInfo = tbl.Meta() colA = tblInfo.Columns[0] @@ -1150,7 +1151,7 @@ func TestAlterColumn(t *testing.T) { tk.MustExec("insert into test_alter_column set c = 'cc'") tk.MustQuery("select b from test_alter_column").Check(testkit.Rows("a", "b", "")) is = domain.GetDomain(ctx).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_alter_column")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_alter_column")) require.NoError(t, err) tblInfo = tbl.Meta() colC := tblInfo.Columns[2] @@ -1160,7 +1161,7 @@ func TestAlterColumn(t *testing.T) { tk.MustExec("insert into test_alter_column set a = 123") tk.MustQuery("select c from test_alter_column").Check(testkit.Rows("aa", "bb", "cc", "xx")) is = domain.GetDomain(ctx).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_alter_column")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_alter_column")) require.NoError(t, err) tblInfo = tbl.Meta() colC = tblInfo.Columns[2] @@ -1415,7 +1416,7 @@ func TestTreatOldVersionUTF8AsUTF8MB4(t *testing.T) { ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) // Mock old version table info with column charset is utf8. - db, ok := domain.GetDomain(tk.Session()).InfoSchema().SchemaByName(model.NewCIStr("test")) + db, ok := domain.GetDomain(tk.Session()).InfoSchema().SchemaByName(pmodel.NewCIStr("test")) tbl := external.GetTableByName(t, tk, "test", "t") tblInfo := tbl.Meta().Clone() tblInfo.Version = model.TableInfoVersion0 @@ -1705,7 +1706,7 @@ func TestChangingDBCharset(t *testing.T) { // Make sure the table schema is the new schema. err := dom.Reload() require.NoError(t, err) - dbInfo, ok := dom.InfoSchema().SchemaByName(model.NewCIStr(dbName)) + dbInfo, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr(dbName)) require.Equal(t, true, ok) require.Equal(t, chs, dbInfo.Charset) require.Equal(t, coll, dbInfo.Collate) @@ -1888,7 +1889,7 @@ func TestAddExpressionIndex(t *testing.T) { tk.MustExec("alter table t add index idx((a+b));") tk.MustQuery("SELECT * FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE WHERE table_name = 't'").Check(testkit.Rows()) - tblInfo, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) columns := tblInfo.Meta().Columns require.Equal(t, 3, len(columns)) @@ -1897,7 +1898,7 @@ func TestAddExpressionIndex(t *testing.T) { tk.MustQuery("select * from t;").Check(testkit.Rows("1 2.1")) tk.MustExec("alter table t add index idx_multi((a+b),(a+1), b);") - tblInfo, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) columns = tblInfo.Meta().Columns require.Equal(t, 5, len(columns)) @@ -1907,7 +1908,7 @@ func TestAddExpressionIndex(t *testing.T) { tk.MustQuery("select * from t;").Check(testkit.Rows("1 2.1")) tk.MustExec("alter table t drop index idx;") - tblInfo, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) columns = tblInfo.Meta().Columns require.Equal(t, 4, len(columns)) @@ -1915,7 +1916,7 @@ func TestAddExpressionIndex(t *testing.T) { tk.MustQuery("select * from t;").Check(testkit.Rows("1 2.1")) tk.MustExec("alter table t drop index idx_multi;") - tblInfo, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) columns = tblInfo.Meta().Columns require.Equal(t, 2, len(columns)) @@ -2740,7 +2741,7 @@ func TestDropTemporaryTable(t *testing.T) { sessionVars := tk.Session().GetSessionVars() sessVarsTempTable := sessionVars.LocalTemporaryTables localTemporaryTable := sessVarsTempTable.(*infoschema.SessionTables) - tbl, exist := localTemporaryTable.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("a_local_temp_table_7")) + tbl, exist := localTemporaryTable.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("a_local_temp_table_7")) require.True(t, exist) tblInfo := tbl.Meta() tablePrefix := tablecodec.EncodeTablePrefix(tblInfo.ID) @@ -2842,7 +2843,7 @@ func TestTruncateLocalTemporaryTable(t *testing.T) { // truncate temporary table will clear session data localTemporaryTables := tk.Session().GetSessionVars().LocalTemporaryTables.(*infoschema.SessionTables) - tb1, exist := localTemporaryTables.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tb1, exist := localTemporaryTables.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) tbl1Info := tb1.Meta() tablePrefix := tablecodec.EncodeTablePrefix(tbl1Info.ID) endTablePrefix := tablecodec.EncodeTablePrefix(tbl1Info.ID + 1) @@ -3040,7 +3041,7 @@ func TestIssue52680(t *testing.T) { tk.MustQuery("select * from issue52680").Check(testkit.Rows("1", "2")) is := dom.InfoSchema() - ti, err := is.TableInfoByName(model.NewCIStr("test"), model.NewCIStr("issue52680")) + ti, err := is.TableInfoByName(pmodel.NewCIStr("test"), pmodel.NewCIStr("issue52680")) require.NoError(t, err) ddlutil.EmulatorGCDisable() @@ -3084,7 +3085,7 @@ func TestIssue52680(t *testing.T) { )) is = dom.InfoSchema() - ti1, err := is.TableInfoByName(model.NewCIStr("test"), model.NewCIStr("issue52680")) + ti1, err := is.TableInfoByName(pmodel.NewCIStr("test"), pmodel.NewCIStr("issue52680")) require.NoError(t, err) require.Equal(t, ti1.ID, ti.ID) diff --git a/pkg/ddl/db_table_test.go b/pkg/ddl/db_table_test.go index 8201a958b5fab..580b5013dfedc 100644 --- a/pkg/ddl/db_table_test.go +++ b/pkg/ddl/db_table_test.go @@ -32,8 +32,9 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/mockstore" @@ -314,10 +315,10 @@ func TestCreateTableWithInfo(t *testing.T) { require.NotNil(t, d) info := []*model.TableInfo{{ ID: 42042, // Note, we must ensure the table ID is globally unique! - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), }} - require.NoError(t, d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), info, ddl.WithOnExist(ddl.OnExistError), ddl.WithIDAllocated(true))) + require.NoError(t, d.BatchCreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), info, ddl.WithOnExist(ddl.OnExistError), ddl.WithIDAllocated(true))) tk.MustQuery("select tidb_table_id from information_schema.tables where table_name = 't'").Check(testkit.Rows("42042")) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) @@ -332,10 +333,10 @@ func TestCreateTableWithInfo(t *testing.T) { require.NoError(t, err) info = []*model.TableInfo{{ ID: 42, - Name: model.NewCIStr("tt"), + Name: pmodel.NewCIStr("tt"), }} tk.Session().SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), info, ddl.WithOnExist(ddl.OnExistError))) + require.NoError(t, d.BatchCreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), info, ddl.WithOnExist(ddl.OnExistError))) idGen, ok := tk.MustQuery("select tidb_table_id from information_schema.tables where table_name = 'tt'").Rows()[0][0].(string) require.True(t, ok) idGenNum, err := strconv.ParseInt(idGen, 10, 64) @@ -354,18 +355,18 @@ func TestBatchCreateTable(t *testing.T) { d := dom.DDLExecutor() infos := []*model.TableInfo{} infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_1"), + Name: pmodel.NewCIStr("tables_1"), }) infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_2"), + Name: pmodel.NewCIStr("tables_2"), }) infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_3"), + Name: pmodel.NewCIStr("tables_3"), }) // correct name tk.Session().SetValue(sessionctx.QueryString, "skip") - err := d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), infos, ddl.WithOnExist(ddl.OnExistError)) + err := d.BatchCreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), infos, ddl.WithOnExist(ddl.OnExistError)) require.NoError(t, err) tk.MustQuery("show tables like '%tables_%'").Check(testkit.Rows("tables_1", "tables_2", "tables_3")) @@ -378,23 +379,23 @@ func TestBatchCreateTable(t *testing.T) { // c.Assert(job[6], Matches, "[^,]+,[^,]+,[^,]+") // duplicated name - infos[1].Name = model.NewCIStr("tables_1") + infos[1].Name = pmodel.NewCIStr("tables_1") tk.Session().SetValue(sessionctx.QueryString, "skip") - err = d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), infos, ddl.WithOnExist(ddl.OnExistError)) + err = d.BatchCreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), infos, ddl.WithOnExist(ddl.OnExistError)) require.True(t, terror.ErrorEqual(err, infoschema.ErrTableExists)) newinfo := &model.TableInfo{ - Name: model.NewCIStr("tables_4"), + Name: pmodel.NewCIStr("tables_4"), } { colNum := 2 cols := make([]*model.ColumnInfo, colNum) - viewCols := make([]model.CIStr, colNum) + viewCols := make([]pmodel.CIStr, colNum) var stmtBuffer bytes.Buffer stmtBuffer.WriteString("SELECT ") for i := range cols { col := &model.ColumnInfo{ - Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)), + Name: pmodel.NewCIStr(fmt.Sprintf("c%d", i+1)), Offset: i, State: model.StatePublic, } @@ -404,12 +405,12 @@ func TestBatchCreateTable(t *testing.T) { } stmtBuffer.WriteString("1 FROM t") newinfo.Columns = cols - newinfo.View = &model.ViewInfo{Cols: viewCols, Security: model.SecurityDefiner, Algorithm: model.AlgorithmMerge, SelectStmt: stmtBuffer.String(), CheckOption: model.CheckOptionCascaded, Definer: &auth.UserIdentity{CurrentUser: true}} + newinfo.View = &model.ViewInfo{Cols: viewCols, Security: pmodel.SecurityDefiner, Algorithm: pmodel.AlgorithmMerge, SelectStmt: stmtBuffer.String(), CheckOption: pmodel.CheckOptionCascaded, Definer: &auth.UserIdentity{CurrentUser: true}} } tk.Session().SetValue(sessionctx.QueryString, "skip") tk.Session().SetValue(sessionctx.QueryString, "skip") - err = d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), []*model.TableInfo{newinfo}, ddl.WithOnExist(ddl.OnExistError)) + err = d.BatchCreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), []*model.TableInfo{newinfo}, ddl.WithOnExist(ddl.OnExistError)) require.NoError(t, err) } @@ -426,12 +427,12 @@ func TestTableLock(t *testing.T) { tk.MustExec("lock tables t1 write") tk.MustExec("insert into t1 values(NULL)") tk.MustExec("unlock tables") - checkTableLock(t, tk, "test", "t1", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) tk.MustExec("lock tables t1 write") tk.MustExec("insert into t1 values(NULL)") tk.MustExec("unlock tables") - checkTableLock(t, tk, "test", "t1", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) tk.MustExec("drop table if exists t1") @@ -473,12 +474,12 @@ func TestTableLocksLostCommit(t *testing.T) { tk.MustExec("unlock tables") } -func checkTableLock(t *testing.T, tk *testkit.TestKit, dbName, tableName string, lockTp model.TableLockType) { +func checkTableLock(t *testing.T, tk *testkit.TestKit, dbName, tableName string, lockTp pmodel.TableLockType) { tb := external.GetTableByName(t, tk, dbName, tableName) dom := domain.GetDomain(tk.Session()) err := dom.Reload() require.NoError(t, err) - if lockTp != model.TableLockNone { + if lockTp != pmodel.TableLockNone { require.NotNil(t, tb.Meta().Lock) require.Equal(t, lockTp, tb.Meta().Lock.Tp) require.Equal(t, model.TableLockStatePublic, tb.Meta().Lock.State) @@ -554,31 +555,31 @@ func TestLockTables(t *testing.T) { // Test lock 1 table. tk.MustExec("lock tables t1 write") - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) // still locked after truncate. tk.MustExec("truncate table t1") - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) // should unlock the new table id. tk.MustExec("unlock tables") - checkTableLock(t, tk, "test", "t1", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) tk.MustExec("lock tables t1 read") - checkTableLock(t, tk, "test", "t1", model.TableLockRead) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockRead) tk.MustExec("lock tables t1 write") - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) // Test lock multi tables. tk.MustExec("lock tables t1 write, t2 read") - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) - checkTableLock(t, tk, "test", "t2", model.TableLockRead) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) + checkTableLock(t, tk, "test", "t2", pmodel.TableLockRead) tk.MustExec("lock tables t1 read, t2 write") - checkTableLock(t, tk, "test", "t1", model.TableLockRead) - checkTableLock(t, tk, "test", "t2", model.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockRead) + checkTableLock(t, tk, "test", "t2", pmodel.TableLockWrite) tk.MustExec("lock tables t2 write") - checkTableLock(t, tk, "test", "t2", model.TableLockWrite) - checkTableLock(t, tk, "test", "t1", model.TableLockNone) + checkTableLock(t, tk, "test", "t2", pmodel.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) tk.MustExec("lock tables t1 write") - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) - checkTableLock(t, tk, "test", "t2", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) + checkTableLock(t, tk, "test", "t2", pmodel.TableLockNone) tk2 := testkit.NewTestKit(t, store) tk2.MustExec("use test") @@ -696,15 +697,15 @@ func TestLockTables(t *testing.T) { tk.MustExec("lock table t1 write, t2 write") tk2.MustGetDBError("lock tables t1 write, t2 read", infoschema.ErrTableLocked) tk2.MustExec("admin cleanup table lock t1,t2") - checkTableLock(t, tk, "test", "t1", model.TableLockNone) - checkTableLock(t, tk, "test", "t2", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) + checkTableLock(t, tk, "test", "t2", pmodel.TableLockNone) // cleanup unlocked table. tk2.MustExec("admin cleanup table lock t1,t2") - checkTableLock(t, tk, "test", "t1", model.TableLockNone) - checkTableLock(t, tk, "test", "t2", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) + checkTableLock(t, tk, "test", "t2", pmodel.TableLockNone) tk2.MustExec("lock tables t1 write, t2 read") - checkTableLock(t, tk2, "test", "t1", model.TableLockWrite) - checkTableLock(t, tk2, "test", "t2", model.TableLockRead) + checkTableLock(t, tk2, "test", "t1", pmodel.TableLockWrite) + checkTableLock(t, tk2, "test", "t2", pmodel.TableLockRead) tk.MustExec("unlock tables") tk2.MustExec("unlock tables") @@ -722,7 +723,7 @@ func TestTablesLockDelayClean(t *testing.T) { tk.MustExec("create table t2 (a int)") tk.MustExec("lock tables t1 write") - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) config.UpdateGlobal(func(conf *config.Config) { conf.DelayCleanTableLock = 100 }) @@ -733,10 +734,10 @@ func TestTablesLockDelayClean(t *testing.T) { tk.Session().Close() }) time.Sleep(50 * time.Millisecond) - checkTableLock(t, tk, "test", "t1", model.TableLockWrite) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockWrite) wg.Wait() require.True(t, time.Since(startTime).Seconds() > 0.1) - checkTableLock(t, tk, "test", "t1", model.TableLockNone) + checkTableLock(t, tk, "test", "t1", pmodel.TableLockNone) config.UpdateGlobal(func(conf *config.Config) { conf.DelayCleanTableLock = 0 }) diff --git a/pkg/ddl/db_test.go b/pkg/ddl/db_test.go index f5244592daab4..4898e0322f33d 100644 --- a/pkg/ddl/db_test.go +++ b/pkg/ddl/db_test.go @@ -33,10 +33,11 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" parsertypes "github.com/pingcap/tidb/pkg/parser/types" @@ -321,7 +322,7 @@ func TestForbidCacheTableForSystemTable(t *testing.T) { for _, one := range sysTables { err := tk.ExecToErr(fmt.Sprintf("alter table `%s` cache", one)) if db == "MySQL" || db == "SYS" { - tbl, err1 := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(one)) + tbl, err1 := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(one)) require.NoError(t, err1) if tbl.Meta().View != nil { require.ErrorIs(t, err, dbterror.ErrWrongObject) @@ -647,10 +648,10 @@ func TestSnapshotVersion(t *testing.T) { require.Equal(t, is.SchemaMetaVersion(), currSnapIs.SchemaMetaVersion()) // for GetSnapshotMeta - dbInfo, ok := currSnapIs.SchemaByName(model.NewCIStr("test2")) + dbInfo, ok := currSnapIs.SchemaByName(pmodel.NewCIStr("test2")) require.True(t, ok) - tbl, err := currSnapIs.TableByName(context.Background(), model.NewCIStr("test2"), model.NewCIStr("t")) + tbl, err := currSnapIs.TableByName(context.Background(), pmodel.NewCIStr("test2"), pmodel.NewCIStr("t")) require.NoError(t, err) m := dom.GetSnapshotMeta(snapTS) diff --git a/pkg/ddl/ddl.go b/pkg/ddl/ddl.go index d846296352fef..a671504d717bf 100644 --- a/pkg/ddl/ddl.go +++ b/pkg/ddl/ddl.go @@ -46,9 +46,10 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -989,7 +990,7 @@ type RecoverSchemaInfo struct { LoadTablesOnExecute bool DropJobID int64 SnapshotTS uint64 - OldSchemaName model.CIStr + OldSchemaName pmodel.CIStr } // delayForAsyncCommit sleeps `SafeWindow + AllowedClockDrift` before a DDL job finishes. diff --git a/pkg/ddl/ddl_history.go b/pkg/ddl/ddl_history.go index 1455f39a2db76..4f4616e9918ac 100644 --- a/pkg/ddl/ddl_history.go +++ b/pkg/ddl/ddl_history.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "go.uber.org/zap" diff --git a/pkg/ddl/ddl_history_test.go b/pkg/ddl/ddl_history_test.go index d47dcc7b0cf24..b0d3a64c3827d 100644 --- a/pkg/ddl/ddl_history_test.go +++ b/pkg/ddl/ddl_history_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/session" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" ) diff --git a/pkg/ddl/ddl_running_jobs.go b/pkg/ddl/ddl_running_jobs.go index d54c6582b54c6..a1b77d51416bb 100644 --- a/pkg/ddl/ddl_running_jobs.go +++ b/pkg/ddl/ddl_running_jobs.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/intest" ) diff --git a/pkg/ddl/ddl_running_jobs_test.go b/pkg/ddl/ddl_running_jobs_test.go index dce4fb630fef7..d8e5274ff2f6f 100644 --- a/pkg/ddl/ddl_running_jobs_test.go +++ b/pkg/ddl/ddl_running_jobs_test.go @@ -24,7 +24,7 @@ import ( "strings" "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/stretchr/testify/require" ) diff --git a/pkg/ddl/ddl_test.go b/pkg/ddl/ddl_test.go index 99e8169fd8e37..a96b1cd7ce893 100644 --- a/pkg/ddl/ddl_test.go +++ b/pkg/ddl/ddl_test.go @@ -22,10 +22,11 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -166,7 +167,7 @@ func TestFieldCase(t *testing.T) { colObjects := make([]*model.ColumnInfo, len(fields)) for i, name := range fields { colObjects[i] = &model.ColumnInfo{ - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), } } err := checkDuplicateColumn(colObjects) diff --git a/pkg/ddl/ddl_tiflash_api.go b/pkg/ddl/ddl_tiflash_api.go index 9813ce612f7e3..57fb2c6a1fdd8 100644 --- a/pkg/ddl/ddl_tiflash_api.go +++ b/pkg/ddl/ddl_tiflash_api.go @@ -34,7 +34,7 @@ import ( ddlutil "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/ddl/delete_range.go b/pkg/ddl/delete_range.go index bafa01c847977..6682803b94df9 100644 --- a/pkg/ddl/delete_range.go +++ b/pkg/ddl/delete_range.go @@ -28,7 +28,8 @@ import ( sess "github.com/pingcap/tidb/pkg/ddl/session" "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/tablecodec" @@ -383,7 +384,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, wrapper DelRangeExecWrap } } case model.ActionDropColumn: - var colName model.CIStr + var colName pmodel.CIStr var ifExists bool var indexIDs []int64 var partitionIDs []int64 diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 9c6599dc1fb67..b7b6354551155 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -42,10 +42,11 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/privilege" @@ -142,14 +143,14 @@ type Executor interface { // in-place. If you want to keep using `info`, please call Clone() first. CreateTableWithInfo( ctx sessionctx.Context, - schema model.CIStr, + schema pmodel.CIStr, info *model.TableInfo, involvingRef []model.InvolvingSchemaInfo, cs ...CreateTableOption) error // BatchCreateTableWithInfo is like CreateTableWithInfo, but can handle multiple tables. BatchCreateTableWithInfo(ctx sessionctx.Context, - schema model.CIStr, + schema pmodel.CIStr, info []*model.TableInfo, cs ...CreateTableOption) error @@ -218,7 +219,7 @@ func (e *executor) CreateSchema(ctx sessionctx.Context, stmt *ast.CreateDatabase explicitCollation = true case ast.DatabaseOptionPlacementPolicy: placementPolicyRef = &model.PolicyRefInfo{ - Name: model.NewCIStr(val.Value), + Name: pmodel.NewCIStr(val.Value), } } } @@ -701,7 +702,7 @@ func (e *executor) AlterSchema(sctx sessionctx.Context, stmt *ast.AlterDatabaseS toCollate = info.Name isAlterCharsetAndCollate = true case ast.DatabaseOptionPlacementPolicy: - placementPolicyRef = &model.PolicyRefInfo{Name: model.NewCIStr(val.Value)} + placementPolicyRef = &model.PolicyRefInfo{Name: pmodel.NewCIStr(val.Value)} case ast.DatabaseSetTiFlashReplica: tiflashReplica = val.TiFlashReplica } @@ -807,35 +808,35 @@ func (e *executor) RecoverSchema(ctx sessionctx.Context, recoverSchemaInfo *Reco return errors.Trace(err) } -func checkTooLongSchema(schema model.CIStr) error { +func checkTooLongSchema(schema pmodel.CIStr) error { if utf8.RuneCountInString(schema.L) > mysql.MaxDatabaseNameLength { return dbterror.ErrTooLongIdent.GenWithStackByArgs(schema) } return nil } -func checkTooLongTable(table model.CIStr) error { +func checkTooLongTable(table pmodel.CIStr) error { if utf8.RuneCountInString(table.L) > mysql.MaxTableNameLength { return dbterror.ErrTooLongIdent.GenWithStackByArgs(table) } return nil } -func checkTooLongIndex(index model.CIStr) error { +func checkTooLongIndex(index pmodel.CIStr) error { if utf8.RuneCountInString(index.L) > mysql.MaxIndexIdentifierLen { return dbterror.ErrTooLongIdent.GenWithStackByArgs(index) } return nil } -func checkTooLongColumn(col model.CIStr) error { +func checkTooLongColumn(col pmodel.CIStr) error { if utf8.RuneCountInString(col.L) > mysql.MaxColumnNameLength { return dbterror.ErrTooLongIdent.GenWithStackByArgs(col) } return nil } -func checkTooLongForeignKey(fk model.CIStr) error { +func checkTooLongForeignKey(fk pmodel.CIStr) error { if utf8.RuneCountInString(fk.L) > mysql.MaxForeignKeyIdentifierLen { return dbterror.ErrTooLongIdent.GenWithStackByArgs(fk) } @@ -940,7 +941,7 @@ func checkInvisibleIndexOnPK(tblInfo *model.TableInfo) error { // checkGlobalIndex check if the index is allowed to have global index func checkGlobalIndex(ctx sessionctx.Context, tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error { pi := tblInfo.GetPartitionInfo() - isPartitioned := pi != nil && pi.Type != model.PartitionTypeNone + isPartitioned := pi != nil && pi.Type != pmodel.PartitionTypeNone if indexInfo.Global { if !isPartitioned { // Makes no sense with LOCAL/GLOBAL index for non-partitioned tables, since we don't support @@ -1037,7 +1038,7 @@ func (e *executor) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) ( // WARNING: it may return a nil job, which means you don't need to submit any DDL job. func (e *executor) createTableWithInfoJob( ctx sessionctx.Context, - dbName model.CIStr, + dbName pmodel.CIStr, tbInfo *model.TableInfo, involvingRef []model.InvolvingSchemaInfo, onExist OnExist, @@ -1179,7 +1180,7 @@ func (e *executor) createTableWithInfoPost( func (e *executor) CreateTableWithInfo( ctx sessionctx.Context, - dbName model.CIStr, + dbName pmodel.CIStr, tbInfo *model.TableInfo, involvingRef []model.InvolvingSchemaInfo, cs ...CreateTableOption, @@ -1213,7 +1214,7 @@ func (e *executor) CreateTableWithInfo( } func (e *executor) BatchCreateTableWithInfo(ctx sessionctx.Context, - dbName model.CIStr, + dbName pmodel.CIStr, infos []*model.TableInfo, cs ...CreateTableOption, ) error { @@ -1757,9 +1758,9 @@ func (e *executor) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt case ast.AlterTableDropColumn: err = e.DropColumn(sctx, ident, spec) case ast.AlterTableDropIndex: - err = e.dropIndex(sctx, ident, model.NewCIStr(spec.Name), spec.IfExists, false) + err = e.dropIndex(sctx, ident, pmodel.NewCIStr(spec.Name), spec.IfExists, false) case ast.AlterTableDropPrimaryKey: - err = e.dropIndex(sctx, ident, model.NewCIStr(mysql.PrimaryKeyName), spec.IfExists, false) + err = e.dropIndex(sctx, ident, pmodel.NewCIStr(mysql.PrimaryKeyName), spec.IfExists, false) case ast.AlterTableRenameIndex: err = e.RenameIndex(sctx, ident, spec) case ast.AlterTableDropPartition, ast.AlterTableDropFirstPartition: @@ -1778,7 +1779,7 @@ func (e *executor) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt TableLocks: []ast.TableLock{ { Table: tName, - Type: model.TableLockReadOnly, + Type: pmodel.TableLockReadOnly, }, }, } @@ -1790,31 +1791,31 @@ func (e *executor) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt constr := spec.Constraint switch spec.Constraint.Tp { case ast.ConstraintKey, ast.ConstraintIndex: - err = e.createIndex(sctx, ident, ast.IndexKeyTypeNone, model.NewCIStr(constr.Name), + err = e.createIndex(sctx, ident, ast.IndexKeyTypeNone, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option, constr.IfNotExists) case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey: - err = e.createIndex(sctx, ident, ast.IndexKeyTypeUnique, model.NewCIStr(constr.Name), + err = e.createIndex(sctx, ident, ast.IndexKeyTypeUnique, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option, false) // IfNotExists should be not applied case ast.ConstraintForeignKey: // NOTE: we do not handle `symbol` and `index_name` well in the parser and we do not check ForeignKey already exists, // so we just also ignore the `if not exists` check. - err = e.CreateForeignKey(sctx, ident, model.NewCIStr(constr.Name), spec.Constraint.Keys, spec.Constraint.Refer) + err = e.CreateForeignKey(sctx, ident, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, spec.Constraint.Refer) case ast.ConstraintPrimaryKey: - err = e.CreatePrimaryKey(sctx, ident, model.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option) + err = e.CreatePrimaryKey(sctx, ident, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option) case ast.ConstraintFulltext: sctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrTableCantHandleFt) case ast.ConstraintCheck: if !variable.EnableCheckConstraint.Load() { sctx.GetSessionVars().StmtCtx.AppendWarning(errCheckConstraintIsOff) } else { - err = e.CreateCheckConstraint(sctx, ident, model.NewCIStr(constr.Name), spec.Constraint) + err = e.CreateCheckConstraint(sctx, ident, pmodel.NewCIStr(constr.Name), spec.Constraint) } default: // Nothing to do now. } case ast.AlterTableDropForeignKey: // NOTE: we do not check `if not exists` and `if exists` for ForeignKey now. - err = e.DropForeignKey(sctx, ident, model.NewCIStr(spec.Name)) + err = e.DropForeignKey(sctx, ident, pmodel.NewCIStr(spec.Name)) case ast.AlterTableModifyColumn: err = e.ModifyColumn(ctx, sctx, ident, spec) case ast.AlterTableChangeColumn: @@ -1867,7 +1868,7 @@ func (e *executor) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt handledCharsetOrCollate = true case ast.TableOptionPlacementPolicy: placementPolicyRef = &model.PolicyRefInfo{ - Name: model.NewCIStr(opt.StrValue), + Name: pmodel.NewCIStr(opt.StrValue), } case ast.TableOptionEngine: case ast.TableOptionRowFormat: @@ -1908,13 +1909,13 @@ func (e *executor) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt if !variable.EnableCheckConstraint.Load() { sctx.GetSessionVars().StmtCtx.AppendWarning(errCheckConstraintIsOff) } else { - err = e.AlterCheckConstraint(sctx, ident, model.NewCIStr(spec.Constraint.Name), spec.Constraint.Enforced) + err = e.AlterCheckConstraint(sctx, ident, pmodel.NewCIStr(spec.Constraint.Name), spec.Constraint.Enforced) } case ast.AlterTableDropCheck: if !variable.EnableCheckConstraint.Load() { sctx.GetSessionVars().StmtCtx.AppendWarning(errCheckConstraintIsOff) } else { - err = e.DropCheckConstraint(sctx, ident, model.NewCIStr(spec.Constraint.Name)) + err = e.DropCheckConstraint(sctx, ident, pmodel.NewCIStr(spec.Constraint.Name)) } case ast.AlterTableWithValidation: sctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrUnsupportedAlterTableWithValidation) @@ -2270,7 +2271,7 @@ func (e *executor) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, s if pi == nil { return errors.Trace(dbterror.ErrPartitionMgmtOnNonpartitioned) } - if pi.Type == model.PartitionTypeHash || pi.Type == model.PartitionTypeKey { + if pi.Type == pmodel.PartitionTypeHash || pi.Type == pmodel.PartitionTypeKey { // Add partition for hash/key is actually a reorganize partition // operation and not a metadata only change! switch spec.Tp { @@ -2288,7 +2289,7 @@ func (e *executor) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, s if err != nil { return errors.Trace(err) } - if pi.Type == model.PartitionTypeList { + if pi.Type == pmodel.PartitionTypeList { // TODO: make sure that checks in ddl_api and ddl_worker is the same. err = checkAddListPartitions(meta) if err != nil { @@ -2351,7 +2352,7 @@ func (e *executor) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, s // getReorganizedDefinitions return the definitions as they would look like after the REORGANIZE PARTITION is done. func getReorganizedDefinitions(pi *model.PartitionInfo, firstPartIdx, lastPartIdx int, idMap map[int]struct{}) []model.PartitionDefinition { tmpDefs := make([]model.PartitionDefinition, 0, len(pi.Definitions)+len(pi.AddingDefinitions)-len(idMap)) - if pi.Type == model.PartitionTypeList { + if pi.Type == pmodel.PartitionTypeList { replaced := false for i := range pi.Definitions { if _, ok := idMap[i]; ok { @@ -2403,12 +2404,12 @@ func getReplacedPartitionIDs(names []string, pi *model.PartitionInfo) (firstPart } } switch pi.Type { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: if len(idMap) != (lastPartIdx - firstPartIdx + 1) { return 0, 0, nil, errors.Trace(dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs( "REORGANIZE PARTITION of RANGE; not adjacent partitions")) } - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: if len(idMap) != len(pi.Definitions) { return 0, 0, nil, errors.Trace(dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs( "REORGANIZE PARTITION of HASH/RANGE; must reorganize all partitions")) @@ -2420,10 +2421,10 @@ func getReplacedPartitionIDs(names []string, pi *model.PartitionInfo) (firstPart func getPartitionInfoTypeNone() *model.PartitionInfo { return &model.PartitionInfo{ - Type: model.PartitionTypeNone, + Type: pmodel.PartitionTypeNone, Enable: true, Definitions: []model.PartitionDefinition{{ - Name: model.NewCIStr("pFullTable"), + Name: pmodel.NewCIStr("pFullTable"), Comment: "Intermediate partition during ALTER TABLE ... PARTITION BY ...", }}, Num: 1, @@ -2499,8 +2500,8 @@ func (e *executor) ReorganizePartitions(ctx sessionctx.Context, ident ast.Ident, return dbterror.ErrPartitionMgmtOnNonpartitioned } switch pi.Type { - case model.PartitionTypeRange, model.PartitionTypeList: - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeRange, pmodel.PartitionTypeList: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: if spec.Tp != ast.AlterTableCoalescePartitions && spec.Tp != ast.AlterTableAddPartitions { return errors.Trace(dbterror.ErrUnsupportedReorganizePartition) @@ -2573,13 +2574,13 @@ func (e *executor) RemovePartitioning(ctx sessionctx.Context, ident ast.Ident, s newSpec.Tp = spec.Tp defs := make([]*ast.PartitionDefinition, 1) defs[0] = &ast.PartitionDefinition{} - defs[0].Name = model.NewCIStr("CollapsedPartitions") + defs[0].Name = pmodel.NewCIStr("CollapsedPartitions") newSpec.PartDefinitions = defs partNames := make([]string, len(pi.Definitions)) for i := range pi.Definitions { partNames[i] = pi.Definitions[i].Name.L } - meta.Partition.Type = model.PartitionTypeNone + meta.Partition.Type = pmodel.PartitionTypeNone partInfo, err := BuildAddedPartitionInfo(ctx.GetExprCtx(), meta, newSpec) if err != nil { return errors.Trace(err) @@ -2626,7 +2627,7 @@ func checkReorgPartitionDefs(ctx sessionctx.Context, action model.ActionType, tb return errors.Trace(err) } if action == model.ActionReorganizePartition { - if pi.Type == model.PartitionTypeRange { + if pi.Type == pmodel.PartitionTypeRange { if lastPartIdx == len(pi.Definitions)-1 { // Last partition dropped, OK to change the end range // Also includes MAXVALUE @@ -2694,7 +2695,7 @@ func (e *executor) CoalescePartitions(sctx sessionctx.Context, ident ast.Ident, } switch pi.Type { - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: return e.hashPartitionManagement(sctx, ident, spec, pi) // Coalesce partition can only be used on hash/key partitions. @@ -2705,7 +2706,7 @@ func (e *executor) CoalescePartitions(sctx sessionctx.Context, ident ast.Ident, func (e *executor) hashPartitionManagement(sctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec, pi *model.PartitionInfo) error { newSpec := *spec - newSpec.PartitionNames = make([]model.CIStr, len(pi.Definitions)) + newSpec.PartitionNames = make([]pmodel.CIStr, len(pi.Definitions)) for i := 0; i < len(pi.Definitions); i++ { // reorganize ALL partitions into the new number of partitions newSpec.PartitionNames[i] = pi.Definitions[i].Name @@ -3203,7 +3204,7 @@ func checkIsDroppableColumn(ctx sessionctx.Context, is infoschema.InfoSchema, sc } // checkDropColumnWithPartitionConstraint is used to check the partition constraint of the drop column. -func checkDropColumnWithPartitionConstraint(t table.Table, colName model.CIStr) error { +func checkDropColumnWithPartitionConstraint(t table.Table, colName pmodel.CIStr) error { if t.Meta().Partition == nil { return nil } @@ -3269,7 +3270,7 @@ func checkModifyCharsetAndCollation(toCharset, toCollate, origCharset, origColla return nil } -func (e *executor) getModifiableColumnJob(ctx context.Context, sctx sessionctx.Context, ident ast.Ident, originalColName model.CIStr, +func (e *executor) getModifiableColumnJob(ctx context.Context, sctx sessionctx.Context, ident ast.Ident, originalColName pmodel.CIStr, spec *ast.AlterTableSpec) (*model.Job, error) { is := e.infoCache.GetLatest() schema, ok := is.SchemaByName(ident.Schema) @@ -3509,8 +3510,8 @@ func (e *executor) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident return errors.Trace(err) } tbInfo := tb.Meta() - if (newCache == 1 && tbInfo.AutoIdCache != 1) || - (newCache != 1 && tbInfo.AutoIdCache == 1) { + if (newCache == 1 && tbInfo.AutoIDCache != 1) || + (newCache != 1 && tbInfo.AutoIDCache == 1) { return fmt.Errorf("Can't Alter AUTO_ID_CACHE between 1 and non-1, the underlying implementation is different") } @@ -3519,7 +3520,7 @@ func (e *executor) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident TableID: tb.Meta().ID, SchemaName: schema.Name.L, TableName: tb.Meta().Name.L, - Type: model.ActionModifyTableAutoIdCache, + Type: model.ActionModifyTableAutoIDCache, BinlogInfo: &model.HistoryInfo{}, Args: []any{newCache}, CDCWriteSource: ctx.GetSessionVars().CDCWriteSource, @@ -3597,7 +3598,7 @@ func shouldModifyTiFlashReplica(tbReplicaInfo *model.TiFlashReplicaInfo, replica } // addHypoTiFlashReplicaIntoCtx adds this hypothetical tiflash replica into this ctx. -func (*executor) setHypoTiFlashReplica(ctx sessionctx.Context, schemaName, tableName model.CIStr, replicaInfo *ast.TiFlashReplicaSpec) error { +func (*executor) setHypoTiFlashReplica(ctx sessionctx.Context, schemaName, tableName pmodel.CIStr, replicaInfo *ast.TiFlashReplicaSpec) error { sctx := ctx.GetSessionVars() if sctx.HypoTiFlashReplicas == nil { sctx.HypoTiFlashReplicas = make(map[string]map[string]struct{}) @@ -3746,7 +3747,7 @@ func (e *executor) AlterTableRemoveTTL(ctx sessionctx.Context, ident ast.Ident) return nil } -func isTableTiFlashSupported(dbName model.CIStr, tbl *model.TableInfo) error { +func isTableTiFlashSupported(dbName pmodel.CIStr, tbl *model.TableInfo) error { // Memory tables and system tables are not supported by TiFlash if util.IsMemOrSysDB(dbName.L) { return errors.Trace(dbterror.ErrUnsupportedTiFlashOperationForSysOrMemTable) @@ -4274,12 +4275,12 @@ func (e *executor) renameTable(ctx sessionctx.Context, oldIdent, newIdent ast.Id func (e *executor) renameTables(ctx sessionctx.Context, oldIdents, newIdents []ast.Ident, isAlterTable bool) error { is := e.infoCache.GetLatest() - oldTableNames := make([]*model.CIStr, 0, len(oldIdents)) - tableNames := make([]*model.CIStr, 0, len(oldIdents)) + oldTableNames := make([]*pmodel.CIStr, 0, len(oldIdents)) + tableNames := make([]*pmodel.CIStr, 0, len(oldIdents)) oldSchemaIDs := make([]int64, 0, len(oldIdents)) newSchemaIDs := make([]int64, 0, len(oldIdents)) tableIDs := make([]int64, 0, len(oldIdents)) - oldSchemaNames := make([]*model.CIStr, 0, len(oldIdents)) + oldSchemaNames := make([]*pmodel.CIStr, 0, len(oldIdents)) involveSchemaInfo := make([]model.InvolvingSchemaInfo, 0, len(oldIdents)*2) var schemas []*model.DBInfo @@ -4413,7 +4414,7 @@ func getIdentKey(ident ast.Ident) string { } // GetName4AnonymousIndex returns a valid name for anonymous index. -func GetName4AnonymousIndex(t table.Table, colName model.CIStr, idxName model.CIStr) model.CIStr { +func GetName4AnonymousIndex(t table.Table, colName pmodel.CIStr, idxName pmodel.CIStr) pmodel.CIStr { // `id` is used to indicated the index name's suffix. id := 2 l := len(t.Indices()) @@ -4424,14 +4425,14 @@ func GetName4AnonymousIndex(t table.Table, colName model.CIStr, idxName model.CI id = 3 } if strings.EqualFold(indexName.L, mysql.PrimaryKeyName) { - indexName = model.NewCIStr(fmt.Sprintf("%s_%d", colName.O, id)) + indexName = pmodel.NewCIStr(fmt.Sprintf("%s_%d", colName.O, id)) id = 3 } for i := 0; i < l; i++ { if t.Indices()[i].Meta().Name.L == indexName.L { - indexName = model.NewCIStr(fmt.Sprintf("%s_%d", colName.O, id)) + indexName = pmodel.NewCIStr(fmt.Sprintf("%s_%d", colName.O, id)) if err := checkTooLongIndex(indexName); err != nil { - indexName = GetName4AnonymousIndex(t, model.NewCIStr(colName.O[:30]), model.NewCIStr(fmt.Sprintf("%s_%d", colName.O[:30], 2))) + indexName = GetName4AnonymousIndex(t, pmodel.NewCIStr(colName.O[:30]), pmodel.NewCIStr(fmt.Sprintf("%s_%d", colName.O[:30], 2))) } i = -1 id++ @@ -4440,9 +4441,9 @@ func GetName4AnonymousIndex(t table.Table, colName model.CIStr, idxName model.CI return indexName } -func (e *executor) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexName model.CIStr, +func (e *executor) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexName pmodel.CIStr, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption) error { - if indexOption != nil && indexOption.PrimaryKeyTp == model.PrimaryKeyTypeClustered { + if indexOption != nil && indexOption.PrimaryKeyTp == pmodel.PrimaryKeyTypeClustered { return dbterror.ErrUnsupportedModifyPrimaryKey.GenWithStack("Adding clustered primary key is not supported. " + "Please consider adding NONCLUSTERED primary key instead") } @@ -4455,7 +4456,7 @@ func (e *executor) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexN return dbterror.ErrTooLongIdent.GenWithStackByArgs(mysql.PrimaryKeyName) } - indexName = model.NewCIStr(mysql.PrimaryKeyName) + indexName = pmodel.NewCIStr(mysql.PrimaryKeyName) if indexInfo := t.Meta().FindIndexByName(indexName.L); indexInfo != nil || // If the table's PKIsHandle is true, it also means that this table has a primary key. t.Meta().PKIsHandle { @@ -4539,12 +4540,12 @@ func (e *executor) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexN func (e *executor) CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error { ident := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} - return e.createIndex(ctx, ident, stmt.KeyType, model.NewCIStr(stmt.IndexName), + return e.createIndex(ctx, ident, stmt.KeyType, pmodel.NewCIStr(stmt.IndexName), stmt.IndexPartSpecifications, stmt.IndexOption, stmt.IfNotExists) } // addHypoIndexIntoCtx adds this index as a hypo-index into this ctx. -func (*executor) addHypoIndexIntoCtx(ctx sessionctx.Context, schemaName, tableName model.CIStr, indexInfo *model.IndexInfo) error { +func (*executor) addHypoIndexIntoCtx(ctx sessionctx.Context, schemaName, tableName pmodel.CIStr, indexInfo *model.IndexInfo) error { sctx := ctx.GetSessionVars() indexName := indexInfo.Name @@ -4565,7 +4566,7 @@ func (*executor) addHypoIndexIntoCtx(ctx sessionctx.Context, schemaName, tableNa return nil } -func (e *executor) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr, +func (e *executor) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.IndexKeyType, indexName pmodel.CIStr, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool) error { // not support Spatial and FullText index if keyType == ast.IndexKeyTypeFullText || keyType == ast.IndexKeyTypeSpatial { @@ -4582,11 +4583,11 @@ func (e *executor) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast } // Deal with anonymous index. if len(indexName.L) == 0 { - colName := model.NewCIStr("expression_index") + colName := pmodel.NewCIStr("expression_index") if indexPartSpecifications[0].Column != nil { colName = indexPartSpecifications[0].Column.Name } - indexName = GetName4AnonymousIndex(t, colName, model.NewCIStr("")) + indexName = GetName4AnonymousIndex(t, colName, pmodel.NewCIStr("")) } if indexInfo := t.Meta().FindIndexByName(indexName.L); indexInfo != nil { @@ -4673,7 +4674,7 @@ func (e *executor) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast } } - if indexOption != nil && indexOption.Tp == model.IndexTypeHypo { // for hypo-index + if indexOption != nil && indexOption.Tp == pmodel.IndexTypeHypo { // for hypo-index indexInfo, err := BuildIndexInfo(ctx, tblInfo.Columns, indexName, false, unique, indexPartSpecifications, indexOption, model.StatePublic) if err != nil { @@ -4759,7 +4760,7 @@ func newReorgMetaFromVariables(job *model.Job, sctx sessionctx.Context) (*model. // LastReorgMetaFastReorgDisabled is used for test. var LastReorgMetaFastReorgDisabled bool -func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer *ast.ReferenceDef, cols []*table.Column) (*model.FKInfo, error) { +func buildFKInfo(fkName pmodel.CIStr, keys []*ast.IndexPartSpecification, refer *ast.ReferenceDef, cols []*table.Column) (*model.FKInfo, error) { if len(keys) != len(refer.IndexPartSpecifications) { return nil, infoschema.ErrForeignKeyNotMatch.GenWithStackByArgs(fkName, "Key reference and table reference don't match") } @@ -4787,7 +4788,7 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * Name: fkName, RefSchema: refer.Table.Schema, RefTable: refer.Table.Name, - Cols: make([]model.CIStr, len(keys)), + Cols: make([]pmodel.CIStr, len(keys)), } if variable.EnableForeignKey.Load() { fkInfo.Version = model.FKVersion1 @@ -4808,12 +4809,12 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * // Check wrong reference options of foreign key on stored generated columns switch refer.OnUpdate.ReferOpt { - case model.ReferOptionCascade, model.ReferOptionSetNull, model.ReferOptionSetDefault: + case pmodel.ReferOptionCascade, pmodel.ReferOptionSetNull, pmodel.ReferOptionSetDefault: //nolint: gosec return nil, dbterror.ErrWrongFKOptionForGeneratedColumn.GenWithStackByArgs("ON UPDATE " + refer.OnUpdate.ReferOpt.String()) } switch refer.OnDelete.ReferOpt { - case model.ReferOptionSetNull, model.ReferOptionSetDefault: + case pmodel.ReferOptionSetNull, pmodel.ReferOptionSetDefault: //nolint: gosec return nil, dbterror.ErrWrongFKOptionForGeneratedColumn.GenWithStackByArgs("ON DELETE " + refer.OnDelete.ReferOpt.String()) } @@ -4822,11 +4823,11 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * // Check wrong reference options of foreign key on base columns of stored generated columns if _, ok := baseCols[col.Name.L]; ok { switch refer.OnUpdate.ReferOpt { - case model.ReferOptionCascade, model.ReferOptionSetNull, model.ReferOptionSetDefault: + case pmodel.ReferOptionCascade, pmodel.ReferOptionSetNull, pmodel.ReferOptionSetDefault: return nil, infoschema.ErrCannotAddForeign } switch refer.OnDelete.ReferOpt { - case model.ReferOptionCascade, model.ReferOptionSetNull, model.ReferOptionSetDefault: + case pmodel.ReferOptionCascade, pmodel.ReferOptionSetNull, pmodel.ReferOptionSetDefault: return nil, infoschema.ErrCannotAddForeign } } @@ -4835,13 +4836,13 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * if col == nil { return nil, dbterror.ErrKeyColumnDoesNotExits.GenWithStackByArgs(key.Column.Name) } - if mysql.HasNotNullFlag(col.GetFlag()) && (refer.OnDelete.ReferOpt == model.ReferOptionSetNull || refer.OnUpdate.ReferOpt == model.ReferOptionSetNull) { + if mysql.HasNotNullFlag(col.GetFlag()) && (refer.OnDelete.ReferOpt == pmodel.ReferOptionSetNull || refer.OnUpdate.ReferOpt == pmodel.ReferOptionSetNull) { return nil, infoschema.ErrForeignKeyColumnNotNull.GenWithStackByArgs(col.Name.O, fkName) } fkInfo.Cols[i] = key.Column.Name } - fkInfo.RefCols = make([]model.CIStr, len(refer.IndexPartSpecifications)) + fkInfo.RefCols = make([]pmodel.CIStr, len(refer.IndexPartSpecifications)) for i, key := range refer.IndexPartSpecifications { if err := checkTooLongColumn(key.Column.Name); err != nil { return nil, err @@ -4855,7 +4856,7 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * return fkInfo, nil } -func (e *executor) CreateForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName model.CIStr, keys []*ast.IndexPartSpecification, refer *ast.ReferenceDef) error { +func (e *executor) CreateForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName pmodel.CIStr, keys []*ast.IndexPartSpecification, refer *ast.ReferenceDef) error { is := e.infoCache.GetLatest() schema, ok := is.SchemaByName(ti.Schema) if !ok { @@ -4871,7 +4872,7 @@ func (e *executor) CreateForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName } if fkName.L == "" { - fkName = model.NewCIStr(fmt.Sprintf("fk_%d", t.Meta().MaxForeignKeyID+1)) + fkName = pmodel.NewCIStr(fmt.Sprintf("fk_%d", t.Meta().MaxForeignKeyID+1)) } err = checkFKDupName(t.Meta(), fkName) if err != nil { @@ -4932,7 +4933,7 @@ func (e *executor) CreateForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName return errors.Trace(err) } -func (e *executor) DropForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName model.CIStr) error { +func (e *executor) DropForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName pmodel.CIStr) error { is := e.infoCache.GetLatest() schema, ok := is.SchemaByName(ti.Schema) if !ok { @@ -4963,7 +4964,7 @@ func (e *executor) DropForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName m func (e *executor) DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error { ti := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} - err := e.dropIndex(ctx, ti, model.NewCIStr(stmt.IndexName), stmt.IfExists, stmt.IsHypo) + err := e.dropIndex(ctx, ti, pmodel.NewCIStr(stmt.IndexName), stmt.IfExists, stmt.IsHypo) if (infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err)) && stmt.IfExists { err = nil } @@ -4971,7 +4972,7 @@ func (e *executor) DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) er } // dropHypoIndexFromCtx drops this hypo-index from this ctx. -func (*executor) dropHypoIndexFromCtx(ctx sessionctx.Context, schema, table, index model.CIStr, ifExists bool) error { +func (*executor) dropHypoIndexFromCtx(ctx sessionctx.Context, schema, table, index pmodel.CIStr, ifExists bool) error { sctx := ctx.GetSessionVars() if sctx.HypoIndexes != nil && sctx.HypoIndexes[schema.L] != nil && @@ -4988,7 +4989,7 @@ func (*executor) dropHypoIndexFromCtx(ctx sessionctx.Context, schema, table, ind // dropIndex drops the specified index. // isHypo is used to indicate whether this operation is for a hypo-index. -func (e *executor) dropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CIStr, ifExists, isHypo bool) error { +func (e *executor) dropIndex(ctx sessionctx.Context, ti ast.Ident, indexName pmodel.CIStr, ifExists, isHypo bool) error { is := e.infoCache.GetLatest() schema, ok := is.SchemaByName(ti.Schema) if !ok { @@ -5054,7 +5055,7 @@ func (e *executor) dropIndex(ctx sessionctx.Context, ti ast.Ident, indexName mod } // CheckIsDropPrimaryKey checks if we will drop PK, there are many PK implementations so we provide a helper function. -func CheckIsDropPrimaryKey(indexName model.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error) { +func CheckIsDropPrimaryKey(indexName pmodel.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error) { var isPK bool if indexName.L == strings.ToLower(mysql.PrimaryKeyName) && // Before we fixed #14243, there might be a general index named `primary` but not a primary key. @@ -5108,9 +5109,9 @@ func validateCommentLength(ec errctx.Context, sqlMode mysql.SQLMode, name string func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error) { numParts := uint64(0) switch meta.Partition.Type { - case model.PartitionTypeNone: + case pmodel.PartitionTypeNone: // OK - case model.PartitionTypeList: + case pmodel.PartitionTypeList: if len(spec.PartDefinitions) == 0 { return nil, ast.ErrPartitionsMustBeDefined.GenWithStackByArgs(meta.Partition.Type) } @@ -5119,7 +5120,7 @@ func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, return nil, err } - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: if spec.Tp == ast.AlterTableAddLastPartition { err := buildAddedPartitionDefs(ctx, meta, spec) if err != nil { @@ -5131,7 +5132,7 @@ func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, return nil, ast.ErrPartitionsMustBeDefined.GenWithStackByArgs(meta.Partition.Type) } } - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: switch spec.Tp { case ast.AlterTableRemovePartitioning: numParts = 1 @@ -5542,7 +5543,7 @@ func (e *executor) DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceSt return e.dropTableObject(ctx, stmt.Sequences, stmt.IfExists, sequenceObject) } -func (e *executor) AlterIndexVisibility(ctx sessionctx.Context, ident ast.Ident, indexName model.CIStr, visibility ast.IndexVisibility) error { +func (e *executor) AlterIndexVisibility(ctx sessionctx.Context, ident ast.Ident, indexName pmodel.CIStr, visibility ast.IndexVisibility) error { schema, tb, err := e.getSchemaAndTableByIdent(ident) if err != nil { return err @@ -5662,7 +5663,7 @@ func (e *executor) AlterTablePartitionOptions(ctx sessionctx.Context, ident ast. switch op.Tp { case ast.TableOptionPlacementPolicy: policyRefInfo = &model.PolicyRefInfo{ - Name: model.NewCIStr(op.StrValue), + Name: pmodel.NewCIStr(op.StrValue), } default: return errors.Trace(errors.New("unknown partition option")) @@ -6080,7 +6081,7 @@ func (e *executor) AlterTableNoCache(ctx sessionctx.Context, ti ast.Ident) (err return e.DoDDLJob(ctx, job) } -func (e *executor) CreateCheckConstraint(ctx sessionctx.Context, ti ast.Ident, constrName model.CIStr, constr *ast.Constraint) error { +func (e *executor) CreateCheckConstraint(ctx sessionctx.Context, ti ast.Ident, constrName pmodel.CIStr, constr *ast.Constraint) error { schema, t, err := e.getSchemaAndTableByIdent(ti) if err != nil { return errors.Trace(err) @@ -6108,13 +6109,13 @@ func (e *executor) CreateCheckConstraint(ctx sessionctx.Context, ti ast.Ident, c } dependedColsMap := findDependentColsInExpr(constr.Expr) - dependedCols := make([]model.CIStr, 0, len(dependedColsMap)) + dependedCols := make([]pmodel.CIStr, 0, len(dependedColsMap)) for k := range dependedColsMap { if _, ok := existedColsMap[k]; !ok { // The table constraint depended on a non-existed column. return dbterror.ErrBadField.GenWithStackByArgs(k, "check constraint "+constr.Name+" expression") } - dependedCols = append(dependedCols, model.NewCIStr(k)) + dependedCols = append(dependedCols, pmodel.NewCIStr(k)) } // build constraint meta info. @@ -6153,7 +6154,7 @@ func (e *executor) CreateCheckConstraint(ctx sessionctx.Context, ti ast.Ident, c return errors.Trace(err) } -func (e *executor) DropCheckConstraint(ctx sessionctx.Context, ti ast.Ident, constrName model.CIStr) error { +func (e *executor) DropCheckConstraint(ctx sessionctx.Context, ti ast.Ident, constrName pmodel.CIStr) error { is := e.infoCache.GetLatest() schema, ok := is.SchemaByName(ti.Schema) if !ok { @@ -6186,7 +6187,7 @@ func (e *executor) DropCheckConstraint(ctx sessionctx.Context, ti ast.Ident, con return errors.Trace(err) } -func (e *executor) AlterCheckConstraint(ctx sessionctx.Context, ti ast.Ident, constrName model.CIStr, enforced bool) error { +func (e *executor) AlterCheckConstraint(ctx sessionctx.Context, ti ast.Ident, constrName pmodel.CIStr, enforced bool) error { is := e.infoCache.GetLatest() schema, ok := is.SchemaByName(ti.Schema) if !ok { diff --git a/pkg/ddl/executor_nokit_test.go b/pkg/ddl/executor_nokit_test.go index b6cff86b8b7b9..e1c5fb416b77b 100644 --- a/pkg/ddl/executor_nokit_test.go +++ b/pkg/ddl/executor_nokit_test.go @@ -20,7 +20,8 @@ import ( "strings" "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -72,14 +73,14 @@ func TestMergeCreateTableJobsOfSameSchema(t *testing.T) { SchemaID: 1, Type: model.ActionCreateTable, BinlogInfo: &model.HistoryInfo{}, - Args: []any{&model.TableInfo{Name: model.CIStr{O: "t1", L: "t1"}}, false}, + Args: []any{&model.TableInfo{Name: pmodel.CIStr{O: "t1", L: "t1"}}, false}, Query: "create table db1.t1 (c1 int, c2 int)", }, false) job2 := NewJobWrapper(&model.Job{ SchemaID: 1, Type: model.ActionCreateTable, BinlogInfo: &model.HistoryInfo{}, - Args: []any{&model.TableInfo{Name: model.CIStr{O: "t2", L: "t2"}}, &model.TableInfo{}}, + Args: []any{&model.TableInfo{Name: pmodel.CIStr{O: "t2", L: "t2"}}, &model.TableInfo{}}, Query: "create table db1.t2 (c1 int, c2 int);", }, false) job, err := mergeCreateTableJobsOfSameSchema([]*JobWrapper{job1, job2}) @@ -101,10 +102,10 @@ func TestMergeCreateTableJobs(t *testing.T) { t.Run("non create table are not merged", func(t *testing.T) { jobWs := []*JobWrapper{ {Job: &model.Job{SchemaName: "db", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t1")}, false}}}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t1")}, false}}}, {Job: &model.Job{SchemaName: "db", Type: model.ActionAddColumn}}, {Job: &model.Job{SchemaName: "db", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t2")}, false}}}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t2")}, false}}}, } newWs, err := mergeCreateTableJobs(jobWs) require.NoError(t, err) @@ -122,9 +123,9 @@ func TestMergeCreateTableJobs(t *testing.T) { t.Run("jobs of pre allocated ids are not merged", func(t *testing.T) { jobWs := []*JobWrapper{ {Job: &model.Job{SchemaName: "db", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t1")}, false}}, IDAllocated: true}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t1")}, false}}, IDAllocated: true}, {Job: &model.Job{SchemaName: "db", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t2")}, false}}}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t2")}, false}}}, } newWs, err := mergeCreateTableJobs(jobWs) slices.SortFunc(newWs, func(a, b *JobWrapper) int { @@ -142,7 +143,7 @@ func TestMergeCreateTableJobs(t *testing.T) { {Job: &model.Job{SchemaName: "db", Type: model.ActionCreateTable, Args: []any{&model.TableInfo{ForeignKeys: []*model.FKInfo{{}}}, false}}}, {Job: &model.Job{SchemaName: "db", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t2")}, false}}}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t2")}, false}}}, } newWs, err := mergeCreateTableJobs(jobWs) slices.SortFunc(newWs, func(a, b *JobWrapper) int { @@ -158,9 +159,9 @@ func TestMergeCreateTableJobs(t *testing.T) { t.Run("jobs of different schema are not merged", func(t *testing.T) { jobWs := []*JobWrapper{ {Job: &model.Job{SchemaName: "db1", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t1")}, false}}}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t1")}, false}}}, {Job: &model.Job{SchemaName: "db2", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t2")}, false}}}, + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t2")}, false}}}, } newWs, err := mergeCreateTableJobs(jobWs) slices.SortFunc(newWs, func(a, b *JobWrapper) int { @@ -183,12 +184,12 @@ func TestMergeCreateTableJobs(t *testing.T) { for i := 0; i < cnt; i++ { tblName := fmt.Sprintf("t%d", i) jobWs = append(jobWs, NewJobWrapper(&model.Job{SchemaName: db, Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr(tblName)}, false}}, false)) + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr(tblName)}, false}}, false)) } } jobWs = append(jobWs, NewJobWrapper(&model.Job{SchemaName: "dbx", Type: model.ActionAddColumn}, false)) jobWs = append(jobWs, NewJobWrapper(&model.Job{SchemaName: "dbxx", Type: model.ActionCreateTable, - Args: []any{&model.TableInfo{Name: model.NewCIStr("t1")}, false}}, true)) + Args: []any{&model.TableInfo{Name: pmodel.NewCIStr("t1")}, false}}, true)) jobWs = append(jobWs, NewJobWrapper(&model.Job{SchemaName: "dbxxx", Type: model.ActionCreateTable, Args: []any{&model.TableInfo{ForeignKeys: []*model.FKInfo{{}}}, false}}, false)) newWs, err := mergeCreateTableJobs(jobWs) diff --git a/pkg/ddl/executor_test.go b/pkg/ddl/executor_test.go index 16f7aa1ecff05..33f9db782ce2c 100644 --- a/pkg/ddl/executor_test.go +++ b/pkg/ddl/executor_test.go @@ -26,7 +26,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" sessiontypes "github.com/pingcap/tidb/pkg/session/types" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" @@ -259,7 +260,7 @@ func TestHandleLockTable(t *testing.T) { se := tk.Session().(sessionctx.Context) require.False(t, se.HasLockedTables()) - checkTableLocked := func(tblID int64, tp model.TableLockType) { + checkTableLocked := func(tblID int64, tp pmodel.TableLockType) { locked, lockType := se.CheckTableLocked(tblID) require.True(t, locked) require.Equal(t, tp, lockType) @@ -282,28 +283,28 @@ func TestHandleLockTable(t *testing.T) { t.Run("ddl success", func(t *testing.T) { se.ReleaseAllTableLocks() require.False(t, se.HasLockedTables()) - se.AddTableLock([]model.TableLockTpInfo{{SchemaID: 1, TableID: 1, Tp: model.TableLockRead}}) + se.AddTableLock([]model.TableLockTpInfo{{SchemaID: 1, TableID: 1, Tp: pmodel.TableLockRead}}) ddl.HandleLockTablesOnSuccessSubmit(tk.Session(), jobW) require.Len(t, se.GetAllTableLocks(), 2) - checkTableLocked(1, model.TableLockRead) - checkTableLocked(2, model.TableLockRead) + checkTableLocked(1, pmodel.TableLockRead) + checkTableLocked(2, pmodel.TableLockRead) ddl.HandleLockTablesOnFinish(se, jobW, nil) require.Len(t, se.GetAllTableLocks(), 1) - checkTableLocked(2, model.TableLockRead) + checkTableLocked(2, pmodel.TableLockRead) }) t.Run("ddl fail", func(t *testing.T) { se.ReleaseAllTableLocks() require.False(t, se.HasLockedTables()) - se.AddTableLock([]model.TableLockTpInfo{{SchemaID: 1, TableID: 1, Tp: model.TableLockRead}}) + se.AddTableLock([]model.TableLockTpInfo{{SchemaID: 1, TableID: 1, Tp: pmodel.TableLockRead}}) ddl.HandleLockTablesOnSuccessSubmit(tk.Session(), jobW) require.Len(t, se.GetAllTableLocks(), 2) - checkTableLocked(1, model.TableLockRead) - checkTableLocked(2, model.TableLockRead) + checkTableLocked(1, pmodel.TableLockRead) + checkTableLocked(2, pmodel.TableLockRead) ddl.HandleLockTablesOnFinish(se, jobW, errors.New("test error")) require.Len(t, se.GetAllTableLocks(), 1) - checkTableLocked(1, model.TableLockRead) + checkTableLocked(1, pmodel.TableLockRead) }) } diff --git a/pkg/ddl/fail_test.go b/pkg/ddl/fail_test.go index e961aadcaff07..2781b974247a3 100644 --- a/pkg/ddl/fail_test.go +++ b/pkg/ddl/fail_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/stretchr/testify/require" diff --git a/pkg/ddl/foreign_key.go b/pkg/ddl/foreign_key.go index 6317e57a8b039..65a5f01c3ae24 100644 --- a/pkg/ddl/foreign_key.go +++ b/pkg/ddl/foreign_key.go @@ -22,8 +22,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -98,7 +99,7 @@ func onDropForeignKey(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int return ver, errors.Trace(err) } - var fkName model.CIStr + var fkName pmodel.CIStr err = job.DecodeArgs(&fkName) if err != nil { job.State = model.JobStateCancelled @@ -107,7 +108,7 @@ func onDropForeignKey(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int return dropForeignKey(jobCtx, t, job, tblInfo, fkName) } -func dropForeignKey(jobCtx *jobContext, t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, fkName model.CIStr) (ver int64, err error) { +func dropForeignKey(jobCtx *jobContext, t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, fkName pmodel.CIStr) (ver int64, err error) { var fkInfo *model.FKInfo for _, fk := range tblInfo.ForeignKeys { if fk.Name.L == fkName.L { @@ -424,7 +425,7 @@ func checkDropTableHasForeignKeyReferredInOwner(infoCache *infoschema.InfoCache, } func checkTruncateTableHasForeignKeyReferredInOwner(infoCache *infoschema.InfoCache, job *model.Job, tblInfo *model.TableInfo, fkCheck bool) error { - referredFK, err := checkTableHasForeignKeyReferredInOwner(infoCache, job.SchemaName, job.TableName, []ast.Ident{{Name: tblInfo.Name, Schema: model.NewCIStr(job.SchemaName)}}, fkCheck) + referredFK, err := checkTableHasForeignKeyReferredInOwner(infoCache, job.SchemaName, job.TableName, []ast.Ident{{Name: tblInfo.Name, Schema: pmodel.NewCIStr(job.SchemaName)}}, fkCheck) if err != nil { return err } @@ -457,7 +458,7 @@ func checkIndexNeededInForeignKey(is infoschema.InfoSchema, dbName string, tbInf } remainIdxs = append(remainIdxs, idx) } - checkFn := func(cols []model.CIStr) error { + checkFn := func(cols []pmodel.CIStr) error { if !model.IsIndexPrefixCovered(tbInfo, idxInfo, cols...) { return nil } @@ -563,7 +564,7 @@ func (h *foreignKeyHelper) getLoadedTables() []schemaIDAndTableInfo { return tableList } -func (h *foreignKeyHelper) getTableFromStorage(is infoschema.InfoSchema, t *meta.Meta, schema, table model.CIStr) (result schemaIDAndTableInfo, _ error) { +func (h *foreignKeyHelper) getTableFromStorage(is infoschema.InfoSchema, t *meta.Meta, schema, table pmodel.CIStr) (result schemaIDAndTableInfo, _ error) { k := schemaAndTable{schema: schema.L, table: table.L} if info, ok := h.loaded[k]; ok { return info, nil @@ -585,7 +586,7 @@ func (h *foreignKeyHelper) getTableFromStorage(is infoschema.InfoSchema, t *meta return result, nil } -func checkDatabaseHasForeignKeyReferred(ctx context.Context, is infoschema.InfoSchema, schema model.CIStr, fkCheck bool) error { +func checkDatabaseHasForeignKeyReferred(ctx context.Context, is infoschema.InfoSchema, schema pmodel.CIStr, fkCheck bool) error { if !fkCheck { return nil } @@ -619,14 +620,14 @@ func checkDatabaseHasForeignKeyReferredInOwner(jobCtx *jobContext, job *model.Jo return nil } is := jobCtx.infoCache.GetLatest() - err = checkDatabaseHasForeignKeyReferred(jobCtx.ctx, is, model.NewCIStr(job.SchemaName), fkCheck) + err = checkDatabaseHasForeignKeyReferred(jobCtx.ctx, is, pmodel.NewCIStr(job.SchemaName), fkCheck) if err != nil { job.State = model.JobStateCancelled } return errors.Trace(err) } -func checkFKDupName(tbInfo *model.TableInfo, fkName model.CIStr) error { +func checkFKDupName(tbInfo *model.TableInfo, fkName pmodel.CIStr) error { for _, fkInfo := range tbInfo.ForeignKeys { if fkName.L == fkInfo.Name.L { return dbterror.ErrFkDupName.GenWithStackByArgs(fkName.O) diff --git a/pkg/ddl/foreign_key_test.go b/pkg/ddl/foreign_key_test.go index 5441e3df1da62..2a7d2227e6df2 100644 --- a/pkg/ddl/foreign_key_test.go +++ b/pkg/ddl/foreign_key_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/table" @@ -33,17 +34,17 @@ import ( "github.com/stretchr/testify/require" ) -func testCreateForeignKey(t *testing.T, d ddl.ExecutorForTest, ctx sessionctx.Context, dbInfo *model.DBInfo, tblInfo *model.TableInfo, fkName string, keys []string, refTable string, refKeys []string, onDelete model.ReferOptionType, onUpdate model.ReferOptionType) *model.Job { - FKName := model.NewCIStr(fkName) - Keys := make([]model.CIStr, len(keys)) +func testCreateForeignKey(t *testing.T, d ddl.ExecutorForTest, ctx sessionctx.Context, dbInfo *model.DBInfo, tblInfo *model.TableInfo, fkName string, keys []string, refTable string, refKeys []string, onDelete pmodel.ReferOptionType, onUpdate pmodel.ReferOptionType) *model.Job { + FKName := pmodel.NewCIStr(fkName) + Keys := make([]pmodel.CIStr, len(keys)) for i, key := range keys { - Keys[i] = model.NewCIStr(key) + Keys[i] = pmodel.NewCIStr(key) } - RefTable := model.NewCIStr(refTable) - RefKeys := make([]model.CIStr, len(refKeys)) + RefTable := pmodel.NewCIStr(refTable) + RefKeys := make([]pmodel.CIStr, len(refKeys)) for i, key := range refKeys { - RefKeys[i] = model.NewCIStr(key) + RefKeys[i] = pmodel.NewCIStr(key) } fkInfo := &model.FKInfo{ @@ -81,7 +82,7 @@ func testDropForeignKey(t *testing.T, ctx sessionctx.Context, d ddl.ExecutorForT TableName: tblInfo.Name.L, Type: model.ActionDropForeignKey, BinlogInfo: &model.HistoryInfo{}, - Args: []any{model.NewCIStr(foreignKeyName)}, + Args: []any{pmodel.NewCIStr(foreignKeyName)}, } ctx.SetValue(sessionctx.QueryString, "skip") err := d.DoDDLJobWrapper(ctx, ddl.NewJobWrapper(job, true)) @@ -115,10 +116,10 @@ func TestForeignKey(t *testing.T) { require.NoError(t, err) tblInfo.Indices = append(tblInfo.Indices, &model.IndexInfo{ ID: 1, - Name: model.NewCIStr("idx_fk"), - Table: model.NewCIStr("t"), + Name: pmodel.NewCIStr("idx_fk"), + Table: pmodel.NewCIStr("t"), Columns: []*model.IndexColumn{{ - Name: model.NewCIStr("c1"), + Name: pmodel.NewCIStr("c1"), Offset: 0, Length: types.UnspecifiedLength, }}, @@ -151,7 +152,7 @@ func TestForeignKey(t *testing.T) { }) ctx := testkit.NewTestKit(t, store).Session() - job := testCreateForeignKey(t, de, ctx, dbInfo, tblInfo, "c1_fk", []string{"c1"}, "t2", []string{"c1"}, model.ReferOptionCascade, model.ReferOptionSetNull) + job := testCreateForeignKey(t, de, ctx, dbInfo, tblInfo, "c1_fk", []string{"c1"}, "t2", []string{"c1"}, pmodel.ReferOptionCascade, pmodel.ReferOptionSetNull) testCheckJobDone(t, store, job.ID, true) require.NoError(t, err) mu.Lock() diff --git a/pkg/ddl/generated_column.go b/pkg/ddl/generated_column.go index 505db773e0956..1f2e06484e1d3 100644 --- a/pkg/ddl/generated_column.go +++ b/pkg/ddl/generated_column.go @@ -21,8 +21,9 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" @@ -122,7 +123,7 @@ func findPositionRelativeColumn(cols []*table.Column, pos *ast.ColumnPosition) ( // findDependedColumnNames returns a set of string, which indicates // the names of the columns that are depended by colDef. -func findDependedColumnNames(schemaName model.CIStr, tableName model.CIStr, colDef *ast.ColumnDef) (generated bool, colsMap map[string]struct{}, err error) { +func findDependedColumnNames(schemaName pmodel.CIStr, tableName pmodel.CIStr, colDef *ast.ColumnDef) (generated bool, colsMap map[string]struct{}, err error) { colsMap = make(map[string]struct{}) for _, option := range colDef.Options { if option.Tp == ast.ColumnOptionGenerated { @@ -151,7 +152,7 @@ func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName { } // hasDependentByGeneratedColumn checks whether there are other columns depend on this column or not. -func hasDependentByGeneratedColumn(tblInfo *model.TableInfo, colName model.CIStr) (bool, string, bool) { +func hasDependentByGeneratedColumn(tblInfo *model.TableInfo, colName pmodel.CIStr) (bool, string, bool) { for _, col := range tblInfo.Columns { for dep := range col.Dependences { if dep == colName.L { @@ -197,7 +198,7 @@ func (c *generatedColumnChecker) Leave(inNode ast.Node) (node ast.Node, ok bool) // 3. check if the modified expr contains non-deterministic functions // 4. check whether new column refers to any auto-increment columns. // 5. check if the new column is indexed or stored -func checkModifyGeneratedColumn(sctx sessionctx.Context, schemaName model.CIStr, tbl table.Table, oldCol, newCol *table.Column, newColDef *ast.ColumnDef, pos *ast.ColumnPosition) error { +func checkModifyGeneratedColumn(sctx sessionctx.Context, schemaName pmodel.CIStr, tbl table.Table, oldCol, newCol *table.Column, newColDef *ast.ColumnDef, pos *ast.ColumnPosition) error { // rule 1. oldColIsStored := !oldCol.IsGenerated() || oldCol.GeneratedStored newColIsStored := !newCol.IsGenerated() || newCol.GeneratedStored diff --git a/pkg/ddl/index.go b/pkg/ddl/index.go index ade5a5b9fa4f2..34e7861e4fed1 100644 --- a/pkg/ddl/index.go +++ b/pkg/ddl/index.go @@ -46,10 +46,11 @@ import ( "github.com/pingcap/tidb/pkg/lightning/backend" litconfig "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -325,7 +326,7 @@ func calcBytesLengthForDecimal(m int) int { func BuildIndexInfo( ctx sessionctx.Context, allTableColumns []*model.ColumnInfo, - indexName model.CIStr, + indexName pmodel.CIStr, isPrimary bool, isUnique bool, indexPartSpecifications []*ast.IndexPartSpecification, @@ -356,16 +357,16 @@ func BuildIndexInfo( if indexOption.Visibility == ast.IndexVisibilityInvisible { idxInfo.Invisible = true } - if indexOption.Tp == model.IndexTypeInvalid { + if indexOption.Tp == pmodel.IndexTypeInvalid { // Use btree as default index type. - idxInfo.Tp = model.IndexTypeBtree + idxInfo.Tp = pmodel.IndexTypeBtree } else { idxInfo.Tp = indexOption.Tp } idxInfo.Global = indexOption.Global } else { // Use btree as default index type. - idxInfo.Tp = model.IndexTypeBtree + idxInfo.Tp = pmodel.IndexTypeBtree } return idxInfo, nil @@ -416,7 +417,7 @@ func DropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) { } // ValidateRenameIndex checks if index name is ok to be renamed. -func ValidateRenameIndex(from, to model.CIStr, tbl *model.TableInfo) (ignore bool, err error) { +func ValidateRenameIndex(from, to pmodel.CIStr, tbl *model.TableInfo) (ignore bool, err error) { if fromIdx := tbl.FindIndexByName(from.L); fromIdx == nil { return false, errors.Trace(infoschema.ErrKeyNotExists.GenWithStackByArgs(from.O, tbl.Name)) } @@ -459,7 +460,7 @@ func onRenameIndex(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int64, return ver, nil } -func validateAlterIndexVisibility(ctx sessionctx.Context, indexName model.CIStr, invisible bool, tbl *model.TableInfo) (bool, error) { +func validateAlterIndexVisibility(ctx sessionctx.Context, indexName pmodel.CIStr, invisible bool, tbl *model.TableInfo) (bool, error) { var idx *model.IndexInfo if idx = tbl.FindIndexByName(indexName.L); idx == nil || idx.State != model.StatePublic { return false, errors.Trace(infoschema.ErrKeyNotExists.GenWithStackByArgs(indexName.O, tbl.Name)) @@ -493,7 +494,7 @@ func onAlterIndexVisibility(jobCtx *jobContext, t *meta.Meta, job *model.Job) (v return ver, nil } -func setIndexVisibility(tblInfo *model.TableInfo, name model.CIStr, invisible bool) { +func setIndexVisibility(tblInfo *model.TableInfo, name pmodel.CIStr, invisible bool) { for _, idx := range tblInfo.Indices { if idx.Name.L == name.L || (isTempIdxInfo(idx, tblInfo) && getChangingIndexOriginName(idx) == name.O) { idx.Invisible = invisible @@ -530,7 +531,7 @@ func checkPrimaryKeyNotNull(jobCtx *jobContext, w *worker, t *meta.Meta, job *mo return nil, nil } - err = modifyColsFromNull2NotNull(w, dbInfo, tblInfo, nullCols, &model.ColumnInfo{Name: model.NewCIStr("")}, false) + err = modifyColsFromNull2NotNull(w, dbInfo, tblInfo, nullCols, &model.ColumnInfo{Name: pmodel.NewCIStr("")}, false) if err == nil { return nil, nil } @@ -571,7 +572,7 @@ func moveAndUpdateHiddenColumnsToPublic(tblInfo *model.TableInfo, idxInfo *model func decodeAddIndexArgs(job *model.Job) ( uniques []bool, - indexNames []model.CIStr, + indexNames []pmodel.CIStr, indexPartSpecifications [][]*ast.IndexPartSpecification, indexOptions []*ast.IndexOption, hiddenCols [][]*model.ColumnInfo, @@ -579,7 +580,7 @@ func decodeAddIndexArgs(job *model.Job) ( ) { var ( unique bool - indexName model.CIStr + indexName pmodel.CIStr indexPartSpecification []*ast.IndexPartSpecification indexOption *ast.IndexOption hiddenCol []*model.ColumnInfo @@ -587,7 +588,7 @@ func decodeAddIndexArgs(job *model.Job) ( err = job.DecodeArgs(&unique, &indexName, &indexPartSpecification, &indexOption, &hiddenCol) if err == nil { return []bool{unique}, - []model.CIStr{indexName}, + []pmodel.CIStr{indexName}, [][]*ast.IndexPartSpecification{indexPartSpecification}, []*ast.IndexOption{indexOption}, [][]*model.ColumnInfo{hiddenCol}, @@ -619,7 +620,7 @@ func (w *worker) onCreateIndex(jobCtx *jobContext, t *meta.Meta, job *model.Job, } uniques := make([]bool, 1) - indexNames := make([]model.CIStr, 1) + indexNames := make([]pmodel.CIStr, 1) indexPartSpecifications := make([][]*ast.IndexPartSpecification, 1) indexOption := make([]*ast.IndexOption, 1) var sqlMode mysql.SQLMode @@ -1233,7 +1234,7 @@ func checkDropIndex(infoCache *infoschema.InfoCache, t *meta.Meta, job *model.Jo return nil, nil, false, errors.Trace(err) } - indexNames := make([]model.CIStr, 1) + indexNames := make([]pmodel.CIStr, 1) ifExists := make([]bool, 1) if err = job.DecodeArgs(&indexNames[0], &ifExists[0]); err != nil { if err = job.DecodeArgs(&indexNames, &ifExists); err != nil { @@ -1289,8 +1290,8 @@ func checkInvisibleIndexesOnPK(tblInfo *model.TableInfo, indexInfos []*model.Ind return nil } -func checkRenameIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, model.CIStr, model.CIStr, error) { - var from, to model.CIStr +func checkRenameIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, pmodel.CIStr, pmodel.CIStr, error) { + var from, to pmodel.CIStr schemaID := job.SchemaID tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { @@ -1314,9 +1315,9 @@ func checkRenameIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, model.CIS return tblInfo, from, to, errors.Trace(err) } -func checkAlterIndexVisibility(t *meta.Meta, job *model.Job) (*model.TableInfo, model.CIStr, bool, error) { +func checkAlterIndexVisibility(t *meta.Meta, job *model.Job) (*model.TableInfo, pmodel.CIStr, bool, error) { var ( - indexName model.CIStr + indexName pmodel.CIStr invisible bool ) @@ -2552,7 +2553,7 @@ type changingIndex struct { // FindRelatedIndexesToChange finds the indexes that covering the given column. // The normal one will be overridden by the temp one. -func FindRelatedIndexesToChange(tblInfo *model.TableInfo, colName model.CIStr) []changingIndex { +func FindRelatedIndexesToChange(tblInfo *model.TableInfo, colName pmodel.CIStr) []changingIndex { // In multi-schema change jobs that contains several "modify column" sub-jobs, there may be temp indexes for another temp index. // To prevent reorganizing too many indexes, we should create the temp indexes that are really necessary. var normalIdxInfos, tempIdxInfos []changingIndex @@ -2592,7 +2593,7 @@ func isTempIdxInfo(idxInfo *model.IndexInfo, tblInfo *model.TableInfo) bool { return false } -func findIdxCol(idxInfo *model.IndexInfo, colName model.CIStr) int { +func findIdxCol(idxInfo *model.IndexInfo, colName pmodel.CIStr) int { for offset, idxCol := range idxInfo.Columns { if idxCol.Name.L == colName.L { return offset @@ -2601,7 +2602,7 @@ func findIdxCol(idxInfo *model.IndexInfo, colName model.CIStr) int { return -1 } -func renameIndexes(tblInfo *model.TableInfo, from, to model.CIStr) { +func renameIndexes(tblInfo *model.TableInfo, from, to pmodel.CIStr) { for _, idx := range tblInfo.Indices { if idx.Name.L == from.L { idx.Name = to @@ -2612,7 +2613,7 @@ func renameIndexes(tblInfo *model.TableInfo, from, to model.CIStr) { } } -func renameHiddenColumns(tblInfo *model.TableInfo, from, to model.CIStr) { +func renameHiddenColumns(tblInfo *model.TableInfo, from, to pmodel.CIStr) { for _, col := range tblInfo.Columns { if col.Hidden && getExpressionIndexOriginName(col) == from.O { col.Name.L = strings.Replace(col.Name.L, from.L, to.L, 1) diff --git a/pkg/ddl/index_change_test.go b/pkg/ddl/index_change_test.go index 882d2fecffe23..c65f3e5105cbc 100644 --- a/pkg/ddl/index_change_test.go +++ b/pkg/ddl/index_change_test.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/ddl/index_cop.go b/pkg/ddl/index_cop.go index 547ee400d1a41..25ce530bff4d8 100644 --- a/pkg/ddl/index_cop.go +++ b/pkg/ddl/index_cop.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" exprctx "github.com/pingcap/tidb/pkg/expression/context" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/ddl/index_cop_test.go b/pkg/ddl/index_cop_test.go index 7c2f8fb2c15d3..87239a6121404 100644 --- a/pkg/ddl/index_cop_test.go +++ b/pkg/ddl/index_cop_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/copr" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" @@ -37,7 +38,7 @@ func TestAddIndexFetchRowsFromCoprocessor(t *testing.T) { tk.MustExec("use test") testFetchRows := func(db, tb, idx string) ([]kv.Handle, [][]types.Datum) { - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(tb)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(tb)) require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName(idx) diff --git a/pkg/ddl/index_merge_tmp.go b/pkg/ddl/index_merge_tmp.go index c0001250b6c22..89ca8159ace3d 100644 --- a/pkg/ddl/index_merge_tmp.go +++ b/pkg/ddl/index_merge_tmp.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/logutil" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" driver "github.com/pingcap/tidb/pkg/store/driver/txn" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/pkg/ddl/index_modify_test.go b/pkg/ddl/index_modify_test.go index e0a38c17919f6..395007fb4b8d4 100644 --- a/pkg/ddl/index_modify_test.go +++ b/pkg/ddl/index_modify_test.go @@ -30,7 +30,7 @@ import ( testddlutil "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/sessiontxn" diff --git a/pkg/ddl/index_test.go b/pkg/ddl/index_test.go index ba89046bf6fa7..6c5386015c690 100644 --- a/pkg/ddl/index_test.go +++ b/pkg/ddl/index_test.go @@ -18,8 +18,9 @@ import ( "encoding/json" "testing" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -27,7 +28,7 @@ func TestDecodeAddIndexArgsCompatibility(t *testing.T) { cases := []struct { raw json.RawMessage uniques []bool - indexNames []model.CIStr + indexNames []pmodel.CIStr indexPartSpecifications [][]*ast.IndexPartSpecification indexOptions []*ast.IndexOption hiddenCols [][]*model.ColumnInfo @@ -44,16 +45,16 @@ null, [], false]`), uniques: []bool{true}, - indexNames: []model.CIStr{ + indexNames: []pmodel.CIStr{ {O: "t", L: "t"}, }, indexPartSpecifications: [][]*ast.IndexPartSpecification{ { { Column: &ast.ColumnName{ - Schema: model.CIStr{O: "", L: ""}, - Table: model.CIStr{O: "", L: ""}, - Name: model.CIStr{O: "a", L: "a"}, + Schema: pmodel.CIStr{O: "", L: ""}, + Table: pmodel.CIStr{O: "", L: ""}, + Name: pmodel.CIStr{O: "a", L: "a"}, }, Length: -1, Desc: false, @@ -61,9 +62,9 @@ false]`), }, { Column: &ast.ColumnName{ - Schema: model.CIStr{O: "", L: ""}, - Table: model.CIStr{O: "", L: ""}, - Name: model.CIStr{O: "b", L: "b"}, + Schema: pmodel.CIStr{O: "", L: ""}, + Table: pmodel.CIStr{O: "", L: ""}, + Name: pmodel.CIStr{O: "b", L: "b"}, }, Length: -1, Desc: false, @@ -91,16 +92,16 @@ false]`), [[],[]], [false,false]]`), uniques: []bool{false, true}, - indexNames: []model.CIStr{ + indexNames: []pmodel.CIStr{ {O: "t", L: "t"}, {O: "t1", L: "t1"}, }, indexPartSpecifications: [][]*ast.IndexPartSpecification{ { { Column: &ast.ColumnName{ - Schema: model.CIStr{O: "", L: ""}, - Table: model.CIStr{O: "", L: ""}, - Name: model.CIStr{O: "a", L: "a"}, + Schema: pmodel.CIStr{O: "", L: ""}, + Table: pmodel.CIStr{O: "", L: ""}, + Name: pmodel.CIStr{O: "a", L: "a"}, }, Length: -1, Desc: false, @@ -108,9 +109,9 @@ false]`), }, { Column: &ast.ColumnName{ - Schema: model.CIStr{O: "", L: ""}, - Table: model.CIStr{O: "", L: ""}, - Name: model.CIStr{O: "b", L: "b"}, + Schema: pmodel.CIStr{O: "", L: ""}, + Table: pmodel.CIStr{O: "", L: ""}, + Name: pmodel.CIStr{O: "b", L: "b"}, }, Length: -1, Desc: false, @@ -120,9 +121,9 @@ false]`), { { Column: &ast.ColumnName{ - Schema: model.CIStr{O: "", L: ""}, - Table: model.CIStr{O: "", L: ""}, - Name: model.CIStr{O: "a", L: "a"}, + Schema: pmodel.CIStr{O: "", L: ""}, + Table: pmodel.CIStr{O: "", L: ""}, + Name: pmodel.CIStr{O: "a", L: "a"}, }, Length: -1, Desc: false, diff --git a/pkg/ddl/ingest/BUILD.bazel b/pkg/ddl/ingest/BUILD.bazel index d9cffd0c1a3a8..00b6068beec3c 100644 --- a/pkg/ddl/ingest/BUILD.bazel +++ b/pkg/ddl/ingest/BUILD.bazel @@ -33,7 +33,7 @@ go_library( "//pkg/lightning/config", "//pkg/lightning/log", "//pkg/meta", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/sessionctx", @@ -79,7 +79,7 @@ go_test( "//pkg/ddl/session", "//pkg/ddl/testutil", "//pkg/errno", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit", "//pkg/testkit/testfailpoint", "//tests/realtikvtest", diff --git a/pkg/ddl/ingest/backend.go b/pkg/ddl/ingest/backend.go index 5098192551fb1..7158c3fdda072 100644 --- a/pkg/ddl/ingest/backend.go +++ b/pkg/ddl/ingest/backend.go @@ -30,7 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/common" lightning "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/ddl/ingest/integration_test.go b/pkg/ddl/ingest/integration_test.go index e47e955b9450c..9c282baecf811 100644 --- a/pkg/ddl/ingest/integration_test.go +++ b/pkg/ddl/ingest/integration_test.go @@ -25,7 +25,7 @@ import ( ingesttestutil "github.com/pingcap/tidb/pkg/ddl/ingest/testutil" "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/tests/realtikvtest" diff --git a/pkg/ddl/ingest/util.go b/pkg/ddl/ingest/util.go index 9c5eff3ea2122..878a20c56d625 100644 --- a/pkg/ddl/ingest/util.go +++ b/pkg/ddl/ingest/util.go @@ -18,7 +18,7 @@ import ( "github.com/pingcap/errors" ddlutil "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/lightning/common" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" ) diff --git a/pkg/ddl/integration_test.go b/pkg/ddl/integration_test.go index 581e6bad3d5e2..fe7337c7a1104 100644 --- a/pkg/ddl/integration_test.go +++ b/pkg/ddl/integration_test.go @@ -17,7 +17,7 @@ package ddl_test import ( "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/stretchr/testify/require" diff --git a/pkg/ddl/job_scheduler.go b/pkg/ddl/job_scheduler.go index 9528dd103eeb2..d12066a6527bc 100644 --- a/pkg/ddl/job_scheduler.go +++ b/pkg/ddl/job_scheduler.go @@ -41,9 +41,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" pumpcli "github.com/pingcap/tidb/pkg/tidb-binlog/pump_client" diff --git a/pkg/ddl/job_scheduler_testkit_test.go b/pkg/ddl/job_scheduler_testkit_test.go index bc5d11d3d90bf..d1057a5fb0d85 100644 --- a/pkg/ddl/job_scheduler_testkit_test.go +++ b/pkg/ddl/job_scheduler_testkit_test.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/serverstate" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/ddl/job_submitter.go b/pkg/ddl/job_submitter.go index 5a548c899a717..6faf107d146a3 100644 --- a/pkg/ddl/job_submitter.go +++ b/pkg/ddl/job_submitter.go @@ -32,10 +32,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util" @@ -324,11 +324,11 @@ func (s *JobSubmitter) addBatchDDLJobs2Table(jobWs []*JobWrapper) error { if job.CDCWriteSource == 0 && bdrRole != string(ast.BDRRoleNone) { if job.Type == model.ActionMultiSchemaChange && job.MultiSchemaInfo != nil { for _, subJob := range job.MultiSchemaInfo.SubJobs { - if ast.DeniedByBDR(ast.BDRRole(bdrRole), subJob.Type, job) { + if DeniedByBDR(ast.BDRRole(bdrRole), subJob.Type, job) { return dbterror.ErrBDRRestrictedDDL.FastGenByArgs(bdrRole) } } - } else if ast.DeniedByBDR(ast.BDRRole(bdrRole), job.Type, job) { + } else if DeniedByBDR(ast.BDRRole(bdrRole), job.Type, job) { return dbterror.ErrBDRRestrictedDDL.FastGenByArgs(bdrRole) } } diff --git a/pkg/ddl/job_submitter_test.go b/pkg/ddl/job_submitter_test.go index 2a1ab915646ad..32ba5f002d0fe 100644 --- a/pkg/ddl/job_submitter_test.go +++ b/pkg/ddl/job_submitter_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/job_worker.go b/pkg/ddl/job_worker.go index d6da0b8a33309..06f1fd2e2397e 100644 --- a/pkg/ddl/job_worker.go +++ b/pkg/ddl/job_worker.go @@ -35,9 +35,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/binloginfo" @@ -884,7 +884,7 @@ func (w *worker) runOneJobStep( ver, err = w.onShardRowID(jobCtx, t, job) case model.ActionModifyTableComment: ver, err = onModifyTableComment(jobCtx, t, job) - case model.ActionModifyTableAutoIdCache: + case model.ActionModifyTableAutoIDCache: ver, err = onModifyTableAutoIDCache(jobCtx, t, job) case model.ActionAddTablePartition: ver, err = w.onAddTablePartition(jobCtx, t, job) diff --git a/pkg/ddl/job_worker_test.go b/pkg/ddl/job_worker_test.go index 66e3e26227744..e4bf2eb8d4e8b 100644 --- a/pkg/ddl/job_worker_test.go +++ b/pkg/ddl/job_worker_test.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/mock.go b/pkg/ddl/mock.go index 11e1de3669a88..d668618ce6df0 100644 --- a/pkg/ddl/mock.go +++ b/pkg/ddl/mock.go @@ -18,9 +18,9 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" ) diff --git a/pkg/ddl/mock/BUILD.bazel b/pkg/ddl/mock/BUILD.bazel index a58b2710ee4e7..3d0a7170c9979 100644 --- a/pkg/ddl/mock/BUILD.bazel +++ b/pkg/ddl/mock/BUILD.bazel @@ -9,7 +9,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/ddl/mock", visibility = ["//visibility:public"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "@org_uber_go_mock//gomock", ], ) diff --git a/pkg/ddl/mock/systable_manager_mock.go b/pkg/ddl/mock/systable_manager_mock.go index 2b959163b0016..175e4cf1e1e4c 100644 --- a/pkg/ddl/mock/systable_manager_mock.go +++ b/pkg/ddl/mock/systable_manager_mock.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - model "github.com/pingcap/tidb/pkg/parser/model" + model "github.com/pingcap/tidb/pkg/meta/model" gomock "go.uber.org/mock/gomock" ) diff --git a/pkg/ddl/modify_column.go b/pkg/ddl/modify_column.go index 600333670e6b7..2665fdd9c7846 100644 --- a/pkg/ddl/modify_column.go +++ b/pkg/ddl/modify_column.go @@ -31,12 +31,13 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -51,7 +52,7 @@ import ( type modifyingColInfo struct { newCol *model.ColumnInfo - oldColName *model.CIStr + oldColName *pmodel.CIStr modifyColumnTp byte updatedAutoRandomBits uint64 changingCol *model.ColumnInfo @@ -114,7 +115,7 @@ func (w *worker) onModifyColumn(jobCtx *jobContext, t *meta.Meta, job *model.Job changingCol := modifyInfo.changingCol if changingCol == nil { - newColName := model.NewCIStr(genChangingColumnUniqueName(tblInfo, oldCol)) + newColName := pmodel.NewCIStr(genChangingColumnUniqueName(tblInfo, oldCol)) if mysql.HasPriKeyFlag(oldCol.GetFlag()) { job.State = model.JobStateCancelled msg := "this column has primary key flag" @@ -141,7 +142,7 @@ func (w *worker) onModifyColumn(jobCtx *jobContext, t *meta.Meta, job *model.Job // We create a temp index for each normal index. tmpIdx := info.IndexInfo.Clone() tmpIdxName := genChangingIndexUniqueName(tblInfo, info.IndexInfo) - setIdxIDName(tmpIdx, newIdxID, model.NewCIStr(tmpIdxName)) + setIdxIDName(tmpIdx, newIdxID, pmodel.NewCIStr(tmpIdxName)) SetIdxColNameOffset(tmpIdx.Columns[info.Offset], changingCol) tblInfo.Indices = append(tblInfo.Indices, tmpIdx) } else { @@ -351,7 +352,7 @@ func adjustTableInfoAfterModifyColumn( return nil } -func updateFKInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol model.CIStr) { +func updateFKInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol pmodel.CIStr) { if oldCol.L == newCol.L { return } @@ -364,7 +365,7 @@ func updateFKInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol model } } -func updateTTLInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol model.CIStr) { +func updateTTLInfoWhenModifyColumn(tblInfo *model.TableInfo, oldCol, newCol pmodel.CIStr) { if oldCol.L == newCol.L { return } @@ -417,7 +418,7 @@ func adjustForeignKeyChildTableInfoAfterModifyColumn(infoCache *infoschema.InfoC func (w *worker) doModifyColumnTypeWithData( jobCtx *jobContext, t *meta.Meta, job *model.Job, dbInfo *model.DBInfo, tblInfo *model.TableInfo, changingCol, oldCol *model.ColumnInfo, - colName model.CIStr, pos *ast.ColumnPosition, rmIdxIDs []int64) (ver int64, _ error) { + colName pmodel.CIStr, pos *ast.ColumnPosition, rmIdxIDs []int64) (ver int64, _ error) { var err error originalState := changingCol.State targetCol := changingCol.Clone() @@ -618,7 +619,7 @@ func doReorgWorkForModifyColumn(w *worker, jobCtx *jobContext, t *meta.Meta, job } func adjustTableInfoAfterModifyColumnWithData(tblInfo *model.TableInfo, pos *ast.ColumnPosition, - oldCol, changingCol *model.ColumnInfo, newName model.CIStr, changingIdxs []*model.IndexInfo) (err error) { + oldCol, changingCol *model.ColumnInfo, newName pmodel.CIStr, changingIdxs []*model.IndexInfo) (err error) { if pos != nil && pos.RelativeColumn != nil && oldCol.Name.L == pos.RelativeColumn.Name.L { // For cases like `modify column b after b`, it should report this error. return errors.Trace(infoschema.ErrColumnNotExists.GenWithStackByArgs(oldCol.Name, tblInfo.Name)) @@ -642,7 +643,7 @@ func adjustTableInfoAfterModifyColumnWithData(tblInfo *model.TableInfo, pos *ast return nil } -func checkModifyColumnWithGeneratedColumnsConstraint(allCols []*table.Column, oldColName model.CIStr) error { +func checkModifyColumnWithGeneratedColumnsConstraint(allCols []*table.Column, oldColName pmodel.CIStr) error { for _, col := range allCols { if col.GeneratedExpr == nil { continue @@ -666,7 +667,7 @@ func GetModifiableColumnJob( sctx sessionctx.Context, is infoschema.InfoSchema, // WARN: is maybe nil here. ident ast.Ident, - originalColName model.CIStr, + originalColName pmodel.CIStr, schema *model.DBInfo, t table.Table, spec *ast.AlterTableSpec, diff --git a/pkg/ddl/modify_column_test.go b/pkg/ddl/modify_column_test.go index 450be74cda387..eb2081f114b77 100644 --- a/pkg/ddl/modify_column_test.go +++ b/pkg/ddl/modify_column_test.go @@ -25,8 +25,9 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/sessiontxn" @@ -93,7 +94,7 @@ func TestModifyColumnReorgInfo(t *testing.T) { currJob = job var ( _newCol *model.ColumnInfo - _oldColName *model.CIStr + _oldColName *pmodel.CIStr _pos = &ast.ColumnPosition{} _modifyColumnTp byte _updatedAutoRandomBits uint64 diff --git a/pkg/ddl/multi_schema_change.go b/pkg/ddl/multi_schema_change.go index 4edf6b171de44..db675ac8bda26 100644 --- a/pkg/ddl/multi_schema_change.go +++ b/pkg/ddl/multi_schema_change.go @@ -16,8 +16,9 @@ package ddl import ( "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" @@ -203,19 +204,19 @@ func fillMultiSchemaInfo(info *model.MultiSchemaInfo, job *model.Job) (err error pos := job.Args[1].(*ast.ColumnPosition) info.AddColumns = append(info.AddColumns, col.Name) for colName := range col.Dependences { - info.RelativeColumns = append(info.RelativeColumns, model.CIStr{L: colName, O: colName}) + info.RelativeColumns = append(info.RelativeColumns, pmodel.CIStr{L: colName, O: colName}) } if pos != nil && pos.Tp == ast.ColumnPositionAfter { info.PositionColumns = append(info.PositionColumns, pos.RelativeColumn.Name) } case model.ActionDropColumn: - colName := job.Args[0].(model.CIStr) + colName := job.Args[0].(pmodel.CIStr) info.DropColumns = append(info.DropColumns, colName) case model.ActionDropIndex, model.ActionDropPrimaryKey: - indexName := job.Args[0].(model.CIStr) + indexName := job.Args[0].(pmodel.CIStr) info.DropIndexes = append(info.DropIndexes, indexName) case model.ActionAddIndex, model.ActionAddPrimaryKey: - indexName := job.Args[1].(model.CIStr) + indexName := job.Args[1].(pmodel.CIStr) indexPartSpecifications := job.Args[2].([]*ast.IndexPartSpecification) info.AddIndexes = append(info.AddIndexes, indexName) for _, indexPartSpecification := range indexPartSpecifications { @@ -224,18 +225,18 @@ func fillMultiSchemaInfo(info *model.MultiSchemaInfo, job *model.Job) (err error if hiddenCols, ok := job.Args[4].([]*model.ColumnInfo); ok { for _, c := range hiddenCols { for depColName := range c.Dependences { - info.RelativeColumns = append(info.RelativeColumns, model.NewCIStr(depColName)) + info.RelativeColumns = append(info.RelativeColumns, pmodel.NewCIStr(depColName)) } } } case model.ActionRenameIndex: - from := job.Args[0].(model.CIStr) - to := job.Args[1].(model.CIStr) + from := job.Args[0].(pmodel.CIStr) + to := job.Args[1].(pmodel.CIStr) info.AddIndexes = append(info.AddIndexes, to) info.DropIndexes = append(info.DropIndexes, from) case model.ActionModifyColumn: newCol := *job.Args[0].(**model.ColumnInfo) - oldColName := job.Args[1].(model.CIStr) + oldColName := job.Args[1].(pmodel.CIStr) pos := job.Args[2].(*ast.ColumnPosition) if newCol.Name.L != oldColName.L { info.AddColumns = append(info.AddColumns, newCol.Name) @@ -250,7 +251,7 @@ func fillMultiSchemaInfo(info *model.MultiSchemaInfo, job *model.Job) (err error col := job.Args[0].(*table.Column) info.ModifyColumns = append(info.ModifyColumns, col.Name) case model.ActionAlterIndexVisibility: - idxName := job.Args[0].(model.CIStr) + idxName := job.Args[0].(pmodel.CIStr) info.AlterIndexes = append(info.AlterIndexes, idxName) case model.ActionRebaseAutoID, model.ActionModifyTableComment, model.ActionModifyTableCharsetAndCollate: case model.ActionAddForeignKey: @@ -269,7 +270,7 @@ func checkOperateSameColAndIdx(info *model.MultiSchemaInfo) error { modifyCols := make(map[string]struct{}) modifyIdx := make(map[string]struct{}) - checkColumns := func(colNames []model.CIStr, addToModifyCols bool) error { + checkColumns := func(colNames []pmodel.CIStr, addToModifyCols bool) error { for _, colName := range colNames { name := colName.L if _, ok := modifyCols[name]; ok { @@ -282,7 +283,7 @@ func checkOperateSameColAndIdx(info *model.MultiSchemaInfo) error { return nil } - checkIndexes := func(idxNames []model.CIStr, addToModifyIdx bool) error { + checkIndexes := func(idxNames []pmodel.CIStr, addToModifyIdx bool) error { for _, idxName := range idxNames { name := idxName.L if _, ok := modifyIdx[name]; ok { @@ -345,7 +346,7 @@ func mergeAddIndex(info *model.MultiSchemaInfo) { } var unique []bool - var indexNames []model.CIStr + var indexNames []pmodel.CIStr var indexPartSpecifications [][]*ast.IndexPartSpecification var indexOption []*ast.IndexOption var hiddenCols [][]*model.ColumnInfo @@ -354,7 +355,7 @@ func mergeAddIndex(info *model.MultiSchemaInfo) { for _, subJob := range info.SubJobs { if subJob.Type == model.ActionAddIndex { unique = append(unique, subJob.Args[0].(bool)) - indexNames = append(indexNames, subJob.Args[1].(model.CIStr)) + indexNames = append(indexNames, subJob.Args[1].(pmodel.CIStr)) indexPartSpecifications = append(indexPartSpecifications, subJob.Args[2].([]*ast.IndexPartSpecification)) indexOption = append(indexOption, subJob.Args[3].(*ast.IndexOption)) hiddenCols = append(hiddenCols, subJob.Args[4].([]*model.ColumnInfo)) diff --git a/pkg/ddl/multi_schema_change_test.go b/pkg/ddl/multi_schema_change_test.go index 6812af5c41567..ca4d2fe3c4bd5 100644 --- a/pkg/ddl/multi_schema_change_test.go +++ b/pkg/ddl/multi_schema_change_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" diff --git a/pkg/ddl/mv_index_test.go b/pkg/ddl/mv_index_test.go index 9fe51ece6d5c2..cc633cbfa49ea 100644 --- a/pkg/ddl/mv_index_test.go +++ b/pkg/ddl/mv_index_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" ) diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index bc90bfc024419..32b60954682d7 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -36,12 +36,13 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/parser/terror" @@ -369,7 +370,7 @@ func checkAddListPartitions(tblInfo *model.TableInfo) error { // (needs reorganize partition instead). func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) error { switch meta.Partition.Type { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: if len(meta.Partition.Columns) == 0 { newDefs, oldDefs := part.Definitions, meta.Partition.Definitions rangeValue := oldDefs[len(oldDefs)-1].LessThan[0] @@ -400,7 +401,7 @@ func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) er currentRangeValue = nextRangeValue } } - case model.PartitionTypeList: + case pmodel.PartitionTypeList: err := checkAddListPartitions(meta) if err != nil { return err @@ -518,9 +519,9 @@ func buildTablePartitionInfo(ctx sessionctx.Context, s *ast.PartitionOptions, tb var enable bool switch s.Tp { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: enable = true - case model.PartitionTypeList: + case pmodel.PartitionTypeList: // Partition by list is enabled only when tidb_enable_list_partition is 'ON'. enable = ctx.GetSessionVars().EnableListTablePartition if enable { @@ -529,7 +530,7 @@ func buildTablePartitionInfo(ctx sessionctx.Context, s *ast.PartitionOptions, tb return err } } - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: // Partition by hash and key is enabled by default. if s.Sub != nil { // Subpartitioning only allowed with Range or List @@ -539,10 +540,10 @@ func buildTablePartitionInfo(ctx sessionctx.Context, s *ast.PartitionOptions, tb if s.Linear { ctx.GetSessionVars().StmtCtx.AppendWarning(dbterror.ErrUnsupportedCreatePartition.FastGen(fmt.Sprintf("LINEAR %s is not supported, using non-linear %s instead", s.Tp.String(), s.Tp.String()))) } - if s.Tp == model.PartitionTypeHash || len(s.ColumnNames) != 0 { + if s.Tp == pmodel.PartitionTypeHash || len(s.ColumnNames) != 0 { enable = true } - if s.Tp == model.PartitionTypeKey && len(s.ColumnNames) == 0 { + if s.Tp == pmodel.PartitionTypeKey && len(s.ColumnNames) == 0 { enable = true } } @@ -574,11 +575,11 @@ func buildTablePartitionInfo(ctx sessionctx.Context, s *ast.PartitionOptions, tb } pi.Expr = buf.String() } else if s.ColumnNames != nil { - pi.Columns = make([]model.CIStr, 0, len(s.ColumnNames)) + pi.Columns = make([]pmodel.CIStr, 0, len(s.ColumnNames)) for _, cn := range s.ColumnNames { pi.Columns = append(pi.Columns, cn.Name) } - if pi.Type == model.PartitionTypeKey && len(s.ColumnNames) == 0 { + if pi.Type == pmodel.PartitionTypeKey && len(s.ColumnNames) == 0 { if tbInfo.PKIsHandle { pi.Columns = append(pi.Columns, tbInfo.GetPkName()) pi.IsEmptyColumns = true @@ -750,10 +751,10 @@ func isValidKeyPartitionColType(fieldType types.FieldType) bool { } } -func isColTypeAllowedAsPartitioningCol(partType model.PartitionType, fieldType types.FieldType) bool { +func isColTypeAllowedAsPartitioningCol(partType pmodel.PartitionType, fieldType types.FieldType) bool { // For key partition, the permitted partition field types can be all field types except // BLOB, JSON, Geometry - if partType == model.PartitionTypeKey { + if partType == pmodel.PartitionTypeKey { return isValidKeyPartitionColType(fieldType) } // The permitted data types are shown in the following list: @@ -776,7 +777,7 @@ func isColTypeAllowedAsPartitioningCol(partType model.PartitionType, fieldType t // will return nil if error occurs, i.e. not an INTERVAL partitioned table func getPartitionIntervalFromTable(ctx expression.BuildContext, tbInfo *model.TableInfo) *ast.PartitionInterval { if tbInfo.Partition == nil || - tbInfo.Partition.Type != model.PartitionTypeRange { + tbInfo.Partition.Type != pmodel.PartitionTypeRange { return nil } if len(tbInfo.Partition.Columns) > 1 { @@ -896,7 +897,7 @@ func getPartitionIntervalFromTable(ctx expression.BuildContext, tbInfo *model.Ta } partitionMethod := ast.PartitionMethod{ - Tp: model.PartitionTypeRange, + Tp: pmodel.PartitionTypeRange, Interval: &interval, } partOption := &ast.PartitionOptions{PartitionMethod: partitionMethod} @@ -1030,7 +1031,7 @@ func generatePartitionDefinitionsFromInterval(ctx expression.BuildContext, partO if partOptions.Interval == nil { return nil } - if tbInfo.Partition.Type != model.PartitionTypeRange { + if tbInfo.Partition.Type != pmodel.PartitionTypeRange { return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("INTERVAL partitioning, only allowed on RANGE partitioning") } if len(partOptions.ColumnNames) > 1 || len(tbInfo.Partition.Columns) > 1 { @@ -1105,7 +1106,7 @@ func generatePartitionDefinitionsFromInterval(ctx expression.BuildContext, partO partExpr = ast.NewValueExpr(min, "", "") } partOptions.Definitions = append(partOptions.Definitions, &ast.PartitionDefinition{ - Name: model.NewCIStr("P_NULL"), + Name: pmodel.NewCIStr("P_NULL"), Clause: &ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{partExpr}, }, @@ -1119,7 +1120,7 @@ func generatePartitionDefinitionsFromInterval(ctx expression.BuildContext, partO if partOptions.Interval.MaxValPart { partOptions.Definitions = append(partOptions.Definitions, &ast.PartitionDefinition{ - Name: model.NewCIStr("P_MAXVALUE"), + Name: pmodel.NewCIStr("P_MAXVALUE"), Clause: &ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{&ast.MaxValueExpr{}}, }, @@ -1305,7 +1306,7 @@ func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTable } } else { currExpr = &ast.FuncCallExpr{ - FnName: model.NewCIStr("DATE_ADD"), + FnName: pmodel.NewCIStr("DATE_ADD"), Args: []ast.ExprNode{ startExpr, currExpr, @@ -1364,7 +1365,7 @@ func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTable } } partDefs = append(partDefs, &ast.PartitionDefinition{ - Name: model.NewCIStr(partName), + Name: pmodel.NewCIStr(partName), Clause: &ast.PartitionDefinitionClauseLessThan{ Exprs: []ast.ExprNode{currExpr}, }, @@ -1388,7 +1389,7 @@ func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTable // buildPartitionDefinitionsInfo build partition definitions info without assign partition id. tbInfo will be constant func buildPartitionDefinitionsInfo(ctx expression.BuildContext, defs []*ast.PartitionDefinition, tbInfo *model.TableInfo, numParts uint64) (partitions []model.PartitionDefinition, err error) { switch tbInfo.Partition.Type { - case model.PartitionTypeNone: + case pmodel.PartitionTypeNone: if len(defs) != 1 { return nil, dbterror.ErrUnsupportedPartitionType } @@ -1396,11 +1397,11 @@ func buildPartitionDefinitionsInfo(ctx expression.BuildContext, defs []*ast.Part if comment, set := defs[0].Comment(); set { partitions[0].Comment = comment } - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: partitions, err = buildRangePartitionDefinitions(ctx, defs, tbInfo) - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: partitions, err = buildHashPartitionDefinitions(defs, tbInfo, numParts) - case model.PartitionTypeList: + case pmodel.PartitionTypeList: partitions, err = buildListPartitionDefinitions(ctx, defs, tbInfo) default: err = dbterror.ErrUnsupportedPartitionType @@ -1423,7 +1424,7 @@ func setPartitionPlacementFromOptions(partition *model.PartitionDefinition, opti for _, opt := range options { if opt.Tp == ast.TableOptionPlacementPolicy { partition.PlacementPolicyRef = &model.PolicyRefInfo{ - Name: model.NewCIStr(opt.StrValue), + Name: pmodel.NewCIStr(opt.StrValue), } } } @@ -1471,7 +1472,7 @@ func buildHashPartitionDefinitions(defs []*ast.PartitionDefinition, tbInfo *mode } } else { // Use the default - definitions[i].Name = model.NewCIStr(fmt.Sprintf("p%d", i)) + definitions[i].Name = pmodel.NewCIStr(fmt.Sprintf("p%d", i)) } } return definitions, nil @@ -1485,7 +1486,7 @@ func buildListPartitionDefinitions(ctx expression.BuildContext, defs []*ast.Part return nil, dbterror.ErrWrongPartitionName.GenWithStack("partition column name cannot be found") } for _, def := range defs { - if err := def.Clause.Validate(model.PartitionTypeList, len(tbInfo.Partition.Columns)); err != nil { + if err := def.Clause.Validate(pmodel.PartitionTypeList, len(tbInfo.Partition.Columns)); err != nil { return nil, err } clause := def.Clause.(*ast.PartitionDefinitionClauseIn) @@ -1580,7 +1581,7 @@ func buildRangePartitionDefinitions(ctx expression.BuildContext, defs []*ast.Par return nil, dbterror.ErrWrongPartitionName.GenWithStack("partition column name cannot be found") } for _, def := range defs { - if err := def.Clause.Validate(model.PartitionTypeRange, len(tbInfo.Partition.Columns)); err != nil { + if err := def.Clause.Validate(pmodel.PartitionTypeRange, len(tbInfo.Partition.Columns)); err != nil { return nil, err } clause := def.Clause.(*ast.PartitionDefinitionClauseLessThan) @@ -1755,7 +1756,7 @@ func checkAndOverridePartitionID(newTableInfo, oldTableInfo *model.TableInfo) er return dbterror.ErrRepairTableFail.GenWithStackByArgs("Partition type should be the same") } // Check whether partitionType is hash partition. - if newTableInfo.Partition.Type == model.PartitionTypeHash { + if newTableInfo.Partition.Type == pmodel.PartitionTypeHash { if newTableInfo.Partition.Num != oldTableInfo.Partition.Num { return dbterror.ErrRepairTableFail.GenWithStackByArgs("Hash partition num should be the same") } @@ -2012,7 +2013,7 @@ func getRangeValue(ctx expression.BuildContext, str string, unsigned bool) (any, // CheckDropTablePartition checks if the partition exists and does not allow deleting the last existing partition in the table. func CheckDropTablePartition(meta *model.TableInfo, partLowerNames []string) error { pi := meta.Partition - if pi.Type != model.PartitionTypeRange && pi.Type != model.PartitionTypeList { + if pi.Type != pmodel.PartitionTypeRange && pi.Type != pmodel.PartitionTypeList { return dbterror.ErrOnlyOnRangeListPartition.GenWithStackByArgs("DROP") } @@ -2185,12 +2186,12 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, t *meta.Meta, job *mod return ver, err } // ALTER TABLE ... PARTITION BY - if partInfo.Type != model.PartitionTypeNone { + if partInfo.Type != pmodel.PartitionTypeNone { // Also remove anything with the new table id physicalTableIDs = append(physicalTableIDs, partInfo.NewTableID) // Reset if it was normal table before - if tblInfo.Partition.Type == model.PartitionTypeNone || - tblInfo.Partition.DDLType == model.PartitionTypeNone { + if tblInfo.Partition.Type == pmodel.PartitionTypeNone || + tblInfo.Partition.DDLType == pmodel.PartitionTypeNone { tblInfo.Partition = nil } else { tblInfo.Partition.ClearReorgIntermediateInfo() @@ -3434,7 +3435,7 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, t *meta.Meta, job *mo return ver, errors.Trace(err) } tblInfo.ID = partInfo.NewTableID - if partInfo.DDLType != model.PartitionTypeNone { + if partInfo.DDLType != pmodel.PartitionTypeNone { // if partitioned before, then also add the old table ID, // otherwise it will be the already included first partition physicalTableIDs = append(physicalTableIDs, oldTblID) @@ -4030,7 +4031,7 @@ func checkExchangePartitionRecordValidation(w *worker, ptbl, ntbl table.Table, p pi := pt.Partition switch pi.Type { - case model.PartitionTypeHash: + case pmodel.PartitionTypeHash: if pi.Num == 1 { checkNt = false } else { @@ -4047,7 +4048,7 @@ func checkExchangePartitionRecordValidation(w *worker, ptbl, ntbl table.Table, p paramList = append(paramList, pi.Num, index) } } - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: // Table has only one partition and has the maximum value if len(pi.Definitions) == 1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) { checkNt = false @@ -4063,7 +4064,7 @@ func checkExchangePartitionRecordValidation(w *worker, ptbl, ntbl table.Table, p paramList = append(paramList, params...) } } - case model.PartitionTypeList: + case pmodel.PartitionTypeList: if len(pi.Columns) == 0 { conds := buildCheckSQLConditionForListPartition(pi, index) buf.WriteString(conds) @@ -4346,7 +4347,7 @@ func checkPartitionKeysConstraint(pi *model.PartitionInfo, indexColumns []*model partCols []*model.ColumnInfo err error ) - if pi.Type == model.PartitionTypeNone { + if pi.Type == pmodel.PartitionTypeNone { return true, nil } // The expr will be an empty string if the partition is defined by: @@ -4747,8 +4748,8 @@ func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, // This also solves the issue with comments within comments that would happen for // PLACEMENT POLICY options. defaultPartitionDefinitions := true - if partitionInfo.Type == model.PartitionTypeHash || - partitionInfo.Type == model.PartitionTypeKey { + if partitionInfo.Type == pmodel.PartitionTypeHash || + partitionInfo.Type == pmodel.PartitionTypeKey { for i, def := range partitionInfo.Definitions { if def.Name.O != fmt.Sprintf("p%d", i) { defaultPartitionDefinitions = false @@ -4761,7 +4762,7 @@ func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, } if defaultPartitionDefinitions { - if partitionInfo.Type == model.PartitionTypeHash { + if partitionInfo.Type == pmodel.PartitionTypeHash { fmt.Fprintf(buf, "\nPARTITION BY HASH (%s) PARTITIONS %d", partitionInfo.Expr, partitionInfo.Num) } else { buf.WriteString("\nPARTITION BY KEY (") @@ -4777,7 +4778,7 @@ func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, // partitionInfo.Type == model.PartitionTypeRange || partitionInfo.Type == model.PartitionTypeList // || partitionInfo.Type == model.PartitionTypeKey // Notice that MySQL uses two spaces between LIST and COLUMNS... - if partitionInfo.Type == model.PartitionTypeKey { + if partitionInfo.Type == pmodel.PartitionTypeKey { fmt.Fprintf(buf, "\nPARTITION BY %s (", partitionInfo.Type.String()) } else { fmt.Fprintf(buf, "\nPARTITION BY %s COLUMNS(", partitionInfo.Type.String()) @@ -4802,13 +4803,13 @@ func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, } fmt.Fprintf(buf, "PARTITION %s", stringutil.Escape(def.Name.O, sqlMode)) // PartitionTypeHash and PartitionTypeKey do not have any VALUES definition - if partitionInfo.Type == model.PartitionTypeRange { + if partitionInfo.Type == pmodel.PartitionTypeRange { lessThans := make([]string, len(def.LessThan)) for idx, v := range def.LessThan { lessThans[idx] = hexIfNonPrint(v) } fmt.Fprintf(buf, " VALUES LESS THAN (%s)", strings.Join(lessThans, ",")) - } else if partitionInfo.Type == model.PartitionTypeList { + } else if partitionInfo.Type == pmodel.PartitionTypeList { if len(def.InValues) == 0 { fmt.Fprintf(buf, " DEFAULT") } else if len(def.InValues) == 1 && @@ -4889,11 +4890,11 @@ func checkPartitionDefinitionConstraints(ctx sessionctx.Context, tbInfo *model.T } switch tbInfo.Partition.Type { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: err = checkPartitionByRange(ctx, tbInfo) - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: err = checkPartitionByHash(ctx, tbInfo) - case model.PartitionTypeList: + case pmodel.PartitionTypeList: err = checkPartitionByList(ctx, tbInfo) } return errors.Trace(err) diff --git a/pkg/ddl/partition_test.go b/pkg/ddl/partition_test.go index 29a0b7ec8b4ac..8afb434ba70d0 100644 --- a/pkg/ddl/partition_test.go +++ b/pkg/ddl/partition_test.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" @@ -50,11 +51,11 @@ func TestDropAndTruncatePartition(t *testing.T) { func buildTableInfoWithPartition(t *testing.T, store kv.Storage) (*model.TableInfo, []int64) { tbl := &model.TableInfo{ - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), } tbl.MaxColumnID++ col := &model.ColumnInfo{ - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Offset: 0, State: model.StatePublic, FieldType: *types.NewFieldType(mysql.TypeLong), @@ -70,33 +71,33 @@ func buildTableInfoWithPartition(t *testing.T, store kv.Storage) (*model.TableIn partIDs, err := genGlobalIDs(store, 5) require.NoError(t, err) partInfo := &model.PartitionInfo{ - Type: model.PartitionTypeRange, + Type: pmodel.PartitionTypeRange, Expr: tbl.Columns[0].Name.L, Enable: true, Definitions: []model.PartitionDefinition{ { ID: partIDs[0], - Name: model.NewCIStr("p0"), + Name: pmodel.NewCIStr("p0"), LessThan: []string{"100"}, }, { ID: partIDs[1], - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), LessThan: []string{"200"}, }, { ID: partIDs[2], - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), LessThan: []string{"300"}, }, { ID: partIDs[3], - Name: model.NewCIStr("p3"), + Name: pmodel.NewCIStr("p3"), LessThan: []string{"400"}, }, { ID: partIDs[4], - Name: model.NewCIStr("p4"), + Name: pmodel.NewCIStr("p4"), LessThan: []string{"500"}, }, }, @@ -240,7 +241,7 @@ func TestReorganizePartitionRollback(t *testing.T) { " PARTITION `p3` VALUES LESS THAN (8000000),\n" + " PARTITION `p4` VALUES LESS THAN (10000000),\n" + " PARTITION `p5` VALUES LESS THAN (MAXVALUE))")) - tbl, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) require.NotNil(t, tbl.Meta().Partition) require.Nil(t, tbl.Meta().Partition.AddingDefinitions) diff --git a/pkg/ddl/placement/BUILD.bazel b/pkg/ddl/placement/BUILD.bazel index 32914e98ab5a1..56938bf82e106 100644 --- a/pkg/ddl/placement/BUILD.bazel +++ b/pkg/ddl/placement/BUILD.bazel @@ -13,7 +13,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/ddl/placement", visibility = ["//visibility:public"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/tablecodec", "//pkg/util/codec", "@com_github_pingcap_failpoint//:failpoint", @@ -40,6 +40,7 @@ go_test( deps = [ "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/store/mockstore", "//pkg/tablecodec", diff --git a/pkg/ddl/placement/bundle.go b/pkg/ddl/placement/bundle.go index 427bbdf2a7d98..ee84ddd3c00f7 100644 --- a/pkg/ddl/placement/bundle.go +++ b/pkg/ddl/placement/bundle.go @@ -26,7 +26,7 @@ import ( "strings" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" pd "github.com/tikv/pd/client/http" diff --git a/pkg/ddl/placement/bundle_test.go b/pkg/ddl/placement/bundle_test.go index 0f75bf50fd69f..204ca23ccdc6c 100644 --- a/pkg/ddl/placement/bundle_test.go +++ b/pkg/ddl/placement/bundle_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" diff --git a/pkg/ddl/placement/meta_bundle_test.go b/pkg/ddl/placement/meta_bundle_test.go index 04a71c5af5263..6967f86d95e2d 100644 --- a/pkg/ddl/placement/meta_bundle_test.go +++ b/pkg/ddl/placement/meta_bundle_test.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/placement" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" @@ -46,7 +47,7 @@ func createMetaBundleSuite() *metaBundleSuite { s := new(metaBundleSuite) s.policy1 = &model.PolicyInfo{ ID: 11, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), PlacementSettings: &model.PlacementSettings{ PrimaryRegion: "r1", Regions: "r1,r2", @@ -55,7 +56,7 @@ func createMetaBundleSuite() *metaBundleSuite { } s.policy2 = &model.PolicyInfo{ ID: 12, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), PlacementSettings: &model.PlacementSettings{ PrimaryRegion: "r2", Regions: "r1,r2", @@ -64,7 +65,7 @@ func createMetaBundleSuite() *metaBundleSuite { } s.policy3 = &model.PolicyInfo{ ID: 13, - Name: model.NewCIStr("p3"), + Name: pmodel.NewCIStr("p3"), PlacementSettings: &model.PlacementSettings{ LeaderConstraints: "[+region=bj]", }, @@ -72,58 +73,58 @@ func createMetaBundleSuite() *metaBundleSuite { } s.tbl1 = &model.TableInfo{ ID: 101, - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), PlacementPolicyRef: &model.PolicyRefInfo{ ID: 11, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, Partition: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ { ID: 1000, - Name: model.NewCIStr("par0"), + Name: pmodel.NewCIStr("par0"), }, { ID: 1001, - Name: model.NewCIStr("par1"), - PlacementPolicyRef: &model.PolicyRefInfo{ID: 12, Name: model.NewCIStr("p2")}, + Name: pmodel.NewCIStr("par1"), + PlacementPolicyRef: &model.PolicyRefInfo{ID: 12, Name: pmodel.NewCIStr("p2")}, }, { ID: 1002, - Name: model.NewCIStr("par2"), + Name: pmodel.NewCIStr("par2"), }, }, }, } s.tbl2 = &model.TableInfo{ ID: 102, - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), Partition: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ { ID: 1000, - Name: model.NewCIStr("par0"), - PlacementPolicyRef: &model.PolicyRefInfo{ID: 11, Name: model.NewCIStr("p1")}, + Name: pmodel.NewCIStr("par0"), + PlacementPolicyRef: &model.PolicyRefInfo{ID: 11, Name: pmodel.NewCIStr("p1")}, }, { ID: 1001, - Name: model.NewCIStr("par1"), + Name: pmodel.NewCIStr("par1"), }, { ID: 1002, - Name: model.NewCIStr("par2"), + Name: pmodel.NewCIStr("par2"), }, }, }, } s.tbl3 = &model.TableInfo{ ID: 103, - Name: model.NewCIStr("t3"), - PlacementPolicyRef: &model.PolicyRefInfo{ID: 13, Name: model.NewCIStr("p3")}, + Name: pmodel.NewCIStr("t3"), + PlacementPolicyRef: &model.PolicyRefInfo{ID: 13, Name: pmodel.NewCIStr("p3")}, } s.tbl4 = &model.TableInfo{ ID: 104, - Name: model.NewCIStr("t4"), + Name: pmodel.NewCIStr("t4"), } return s } diff --git a/pkg/ddl/placement_policy.go b/pkg/ddl/placement_policy.go index 7a1aae089efd1..8b180e3ba8e7e 100644 --- a/pkg/ddl/placement_policy.go +++ b/pkg/ddl/placement_policy.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/sessiontxn" @@ -116,7 +117,7 @@ func getPolicyInfo(t *meta.Meta, policyID int64) (*model.PolicyInfo, error) { return policy, nil } -func getPlacementPolicyByName(infoCache *infoschema.InfoCache, t *meta.Meta, policyName model.CIStr) (*model.PolicyInfo, error) { +func getPlacementPolicyByName(infoCache *infoschema.InfoCache, t *meta.Meta, policyName pmodel.CIStr) (*model.PolicyInfo, error) { currVer, err := t.GetSchemaVersion() if err != nil { return nil, err @@ -497,7 +498,7 @@ func GetRangePlacementPolicyName(ctx context.Context, rangeBundleID string) (str return "", nil } -func buildPolicyInfo(name model.CIStr, options []*ast.PlacementOption) (*model.PolicyInfo, error) { +func buildPolicyInfo(name pmodel.CIStr, options []*ast.PlacementOption) (*model.PolicyInfo, error) { policyInfo := &model.PolicyInfo{PlacementSettings: &model.PlacementSettings{}} policyInfo.Name = name for _, opt := range options { diff --git a/pkg/ddl/placement_policy_ddl_test.go b/pkg/ddl/placement_policy_ddl_test.go index 379e5c62f6c1c..f1968c5b2f8f5 100644 --- a/pkg/ddl/placement_policy_ddl_test.go +++ b/pkg/ddl/placement_policy_ddl_test.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" @@ -32,7 +33,7 @@ import ( func testPlacementPolicyInfo(t *testing.T, store kv.Storage, name string, settings *model.PlacementSettings) *model.PolicyInfo { policy := &model.PolicyInfo{ - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), PlacementSettings: settings, } genIDs, err := genGlobalIDs(store, 1) @@ -156,12 +157,12 @@ func testTableInfoWithPartition(t *testing.T, store kv.Storage, name string, num require.NoError(t, err) pid := genIDs[0] tblInfo.Partition = &model.PartitionInfo{ - Type: model.PartitionTypeRange, + Type: pmodel.PartitionTypeRange, Expr: tblInfo.Columns[0].Name.L, Enable: true, Definitions: []model.PartitionDefinition{{ ID: pid, - Name: model.NewCIStr("p0"), + Name: pmodel.NewCIStr("p0"), LessThan: []string{"maxvalue"}, }}, } diff --git a/pkg/ddl/placement_policy_test.go b/pkg/ddl/placement_policy_test.go index ddbb7e4a6543d..98065a9ea1e94 100644 --- a/pkg/ddl/placement_policy_test.go +++ b/pkg/ddl/placement_policy_test.go @@ -32,7 +32,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/gcworker" "github.com/pingcap/tidb/pkg/testkit" @@ -79,7 +80,7 @@ func (c *bundleCheck) check(t *testing.T, is infoschema.InfoSchema) { } func checkExistTableBundlesInPD(t *testing.T, do *domain.Domain, dbName string, tbName string) { - tblInfo, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tbName)) + tblInfo, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tbName)) require.NoError(t, err) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) @@ -276,27 +277,27 @@ PARTITION p1 VALUES LESS THAN (1000)) `) defer tk.MustExec("drop table if exists tp") - oldPolicy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p")) + oldPolicy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p")) oldPolicy = oldPolicy.Clone() require.True(t, ok) // create a non exist policy for _, onExist := range []ddl.OnExist{ddl.OnExistReplace, ddl.OnExistIgnore, ddl.OnExistError} { newPolicy := oldPolicy.Clone() - newPolicy.Name = model.NewCIStr("p2") + newPolicy.Name = pmodel.NewCIStr("p2") newPolicy.Followers = 2 newPolicy.LearnerConstraints = "[+zone=z2]" tk.Session().SetValue(sessionctx.QueryString, "skip") err := dom.DDLExecutor().CreatePlacementPolicyWithInfo(tk.Session(), newPolicy.Clone(), onExist) require.NoError(t, err) // old policy should not be changed - found, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p")) + found, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p")) require.True(t, ok) checkPolicyEquals(t, oldPolicy, found) checkExistTableBundlesInPD(t, dom, "test", "tp") // new created policy - found, ok = dom.InfoSchema().PolicyByName(model.NewCIStr("p2")) + found, ok = dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p2")) require.True(t, ok) // ID of the created policy should be reassigned require.NotEqual(t, newPolicy.ID, found.ID) @@ -312,7 +313,7 @@ PARTITION p1 VALUES LESS THAN (1000)) err := dom.DDLExecutor().CreatePlacementPolicyWithInfo(tk.Session(), newPolicy.Clone(), ddl.OnExistError) require.Error(t, err) require.True(t, infoschema.ErrPlacementPolicyExists.Equal(err)) - found, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p")) + found, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p")) require.True(t, ok) checkPolicyEquals(t, oldPolicy, found) checkExistTableBundlesInPD(t, dom, "test", "tp") @@ -323,7 +324,7 @@ PARTITION p1 VALUES LESS THAN (1000)) tk.Session().SetValue(sessionctx.QueryString, "skip") err = dom.DDLExecutor().CreatePlacementPolicyWithInfo(tk.Session(), newPolicy.Clone(), ddl.OnExistIgnore) require.NoError(t, err) - found, ok = dom.InfoSchema().PolicyByName(model.NewCIStr("p")) + found, ok = dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p")) require.True(t, ok) checkPolicyEquals(t, oldPolicy, found) checkExistTableBundlesInPD(t, dom, "test", "tp") @@ -336,7 +337,7 @@ PARTITION p1 VALUES LESS THAN (1000)) tk.Session().SetValue(sessionctx.QueryString, "skip") err = dom.DDLExecutor().CreatePlacementPolicyWithInfo(tk.Session(), newPolicy.Clone(), ddl.OnExistReplace) require.NoError(t, err) - found, ok = dom.InfoSchema().PolicyByName(model.NewCIStr("p")) + found, ok = dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p")) require.True(t, ok) // when replace a policy the old policy's id should not be changed newPolicy.ID = oldPolicy.ID @@ -389,7 +390,7 @@ func testGetPolicyByNameFromIS(t *testing.T, ctx sessionctx.Context, policy stri // Make sure the table schema is the new schema. err := dom.Reload() require.NoError(t, err) - po, ok := dom.InfoSchema().PolicyByName(model.NewCIStr(policy)) + po, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr(policy)) require.Equal(t, true, ok) return po } @@ -556,7 +557,7 @@ func TestAlterPlacementPolicy(t *testing.T) { );`) defer tk.MustExec("drop table if exists tp") - policy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("x")) + policy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("x")) require.True(t, ok) // test for normal cases @@ -727,7 +728,7 @@ func TestCreateTableWithPlacementPolicy(t *testing.T) { } func getClonedTable(dom *domain.Domain, dbName string, tableName string) (*model.TableInfo, error) { - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tableName)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) if err != nil { return nil, err } @@ -740,7 +741,7 @@ func getClonedTable(dom *domain.Domain, dbName string, tableName string) (*model } func getClonedDatabase(dom *domain.Domain, dbName string) (*model.DBInfo, bool) { - db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr(dbName)) + db, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr(dbName)) if !ok { return nil, ok } @@ -769,7 +770,7 @@ func TestCreateTableWithInfoPlacement(t *testing.T) { tbl, err := getClonedTable(dom, "test", "t1") require.NoError(t, err) - policy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) require.Equal(t, policy.ID, tbl.PlacementPolicyRef.ID) @@ -777,7 +778,7 @@ func TestCreateTableWithInfoPlacement(t *testing.T) { tk.MustExec("drop placement policy p1") tk.MustExec("create placement policy p1 followers=2") tk.Session().SetValue(sessionctx.QueryString, "skip") - require.Nil(t, dom.DDLExecutor().CreateTableWithInfo(tk.Session(), model.NewCIStr("test2"), tbl, nil, ddl.WithOnExist(ddl.OnExistError))) + require.Nil(t, dom.DDLExecutor().CreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test2"), tbl, nil, ddl.WithOnExist(ddl.OnExistError))) tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` int(11) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) @@ -789,16 +790,16 @@ func TestCreateTableWithInfoPlacement(t *testing.T) { // The ref id for new table should be the new policy id tbl2, err := getClonedTable(dom, "test2", "t1") require.NoError(t, err) - policy2, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy2, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) require.Equal(t, policy2.ID, tbl2.PlacementPolicyRef.ID) require.True(t, policy2.ID != policy.ID) // Test policy not exists - tbl2.Name = model.NewCIStr("t3") - tbl2.PlacementPolicyRef.Name = model.NewCIStr("pxx") + tbl2.Name = pmodel.NewCIStr("t3") + tbl2.PlacementPolicyRef.Name = pmodel.NewCIStr("pxx") tk.Session().SetValue(sessionctx.QueryString, "skip") - err = dom.DDLExecutor().CreateTableWithInfo(tk.Session(), model.NewCIStr("test2"), tbl2, nil, ddl.WithOnExist(ddl.OnExistError)) + err = dom.DDLExecutor().CreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test2"), tbl2, nil, ddl.WithOnExist(ddl.OnExistError)) require.Equal(t, "[schema:8239]Unknown placement policy 'pxx'", err.Error()) } @@ -819,12 +820,12 @@ func TestCreateSchemaWithInfoPlacement(t *testing.T) { db, ok := getClonedDatabase(dom, "test2") require.True(t, ok) - policy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) require.Equal(t, policy.ID, db.PlacementPolicyRef.ID) db2 := db.Clone() - db2.Name = model.NewCIStr("test3") + db2.Name = pmodel.NewCIStr("test3") tk.MustExec("alter database test2 placement policy='default'") tk.MustExec("drop placement policy p1") tk.MustExec("create placement policy p1 followers=2") @@ -837,14 +838,14 @@ func TestCreateSchemaWithInfoPlacement(t *testing.T) { // The ref id for new table should be the new policy id db2, ok = getClonedDatabase(dom, "test3") require.True(t, ok) - policy2, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy2, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) require.Equal(t, policy2.ID, db2.PlacementPolicyRef.ID) require.True(t, policy2.ID != policy.ID) // Test policy not exists - db2.Name = model.NewCIStr("test4") - db2.PlacementPolicyRef.Name = model.NewCIStr("p2") + db2.Name = pmodel.NewCIStr("test4") + db2.PlacementPolicyRef.Name = pmodel.NewCIStr("p2") tk.Session().SetValue(sessionctx.QueryString, "skip") err := dom.DDLExecutor().CreateSchemaWithInfo(tk.Session(), db2, ddl.OnExistError) require.Equal(t, "[schema:8239]Unknown placement policy 'p2'", err.Error()) @@ -966,7 +967,7 @@ func testGetPolicyByName(t *testing.T, ctx sessionctx.Context, name string, must // Make sure the table schema is the new schema. err := dom.Reload() require.NoError(t, err) - po, ok := dom.InfoSchema().PolicyByName(model.NewCIStr(name)) + po, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr(name)) if mustExist { require.Equal(t, true, ok) } @@ -1105,7 +1106,7 @@ func TestAlterTablePartitionWithPlacementPolicy(t *testing.T) { tk.MustQuery("SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, TIDB_PLACEMENT_POLICY_NAME FROM information_schema.Partitions WHERE TABLE_SCHEMA='test' AND TABLE_NAME = 't1' AND PARTITION_NAME = 'p0'").Check(testkit.Rows(`def test t1 p0 x`)) checkExistTableBundlesInPD(t, dom, "test", "t1") - policyX, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("x")) + policyX, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("x")) require.True(t, ok) ptDef := testGetPartitionDefinitionsByName(t, tk.Session(), "test", "t1", "p0") require.NotNil(t, ptDef) @@ -1119,7 +1120,7 @@ func testGetPartitionDefinitionsByName(t *testing.T, ctx sessionctx.Context, db // Make sure the table schema is the new schema. err := dom.Reload() require.NoError(t, err) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(table)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(table)) require.NoError(t, err) require.NotNil(t, tbl) var ptDef model.PartitionDefinition @@ -1253,7 +1254,7 @@ func TestDatabasePlacement(t *testing.T) { tk.MustExec("create placement policy p2 primary_region='r2' regions='r1,r2'") defer tk.MustExec("drop placement policy p2") - policy1, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy1, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) tk.MustExec(`create database db2`) @@ -1262,7 +1263,7 @@ func TestDatabasePlacement(t *testing.T) { "db2 CREATE DATABASE `db2` /*!40100 DEFAULT CHARACTER SET utf8mb4 */", )) - policy2, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p2")) + policy2, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p2")) require.True(t, ok) // alter with policy @@ -1271,7 +1272,7 @@ func TestDatabasePlacement(t *testing.T) { "db2 CREATE DATABASE `db2` /*!40100 DEFAULT CHARACTER SET utf8mb4 */ /*T![placement] PLACEMENT POLICY=`p1` */", )) - db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr("db2")) + db, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr("db2")) require.True(t, ok) require.Equal(t, policy1.ID, db.PlacementPolicyRef.ID) @@ -1280,7 +1281,7 @@ func TestDatabasePlacement(t *testing.T) { "db2 CREATE DATABASE `db2` /*!40100 DEFAULT CHARACTER SET utf8mb4 */ /*T![placement] PLACEMENT POLICY=`p2` */", )) - db, ok = dom.InfoSchema().SchemaByName(model.NewCIStr("db2")) + db, ok = dom.InfoSchema().SchemaByName(pmodel.NewCIStr("db2")) require.True(t, ok) require.Equal(t, policy2.ID, db.PlacementPolicyRef.ID) @@ -1290,7 +1291,7 @@ func TestDatabasePlacement(t *testing.T) { "db2 CREATE DATABASE `db2` /*!40100 DEFAULT CHARACTER SET utf8mb4 */", )) - db, ok = dom.InfoSchema().SchemaByName(model.NewCIStr("db2")) + db, ok = dom.InfoSchema().SchemaByName(pmodel.NewCIStr("db2")) require.True(t, ok) require.Nil(t, db.PlacementPolicyRef) @@ -1344,7 +1345,7 @@ func TestDropDatabaseGCPlacement(t *testing.T) { )`) is := dom.InfoSchema() - tt, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tt, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tk.MustExec("drop database db2") @@ -1401,7 +1402,7 @@ func TestDropTableGCPlacement(t *testing.T) { defer tk.MustExec("drop table if exists t2") is := dom.InfoSchema() - t1, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + t1, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tk.MustExec("drop table t2") @@ -1436,7 +1437,7 @@ func TestAlterTablePlacement(t *testing.T) { tk.MustExec("create placement policy p1 primary_region='r1' regions='r1'") defer tk.MustExec("drop placement policy p1") - policy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) tk.MustExec(`CREATE TABLE tp (id INT) PARTITION BY RANGE (id) ( @@ -1463,7 +1464,7 @@ func TestAlterTablePlacement(t *testing.T) { "(PARTITION `p0` VALUES LESS THAN (100),\n" + " PARTITION `p1` VALUES LESS THAN (1000))")) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, policy.ID, tb.Meta().PlacementPolicyRef.ID) checkExistTableBundlesInPD(t, dom, "test", "tp") @@ -1536,9 +1537,9 @@ func TestDropTablePartitionGCPlacement(t *testing.T) { defer tk.MustExec("drop table if exists t2") is := dom.InfoSchema() - t1, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + t1, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) - t2, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + t2, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tk.MustExec("alter table t2 drop partition p0") @@ -1599,7 +1600,7 @@ func TestAlterTablePartitionPlacement(t *testing.T) { tk.MustExec("create placement policy p1 primary_region='r1' regions='r1'") defer tk.MustExec("drop placement policy p1") - policy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) tk.MustExec(`CREATE TABLE tp (id INT) placement policy p0 PARTITION BY RANGE (id) ( @@ -1626,7 +1627,7 @@ func TestAlterTablePartitionPlacement(t *testing.T) { "(PARTITION `p0` VALUES LESS THAN (100) /*T![placement] PLACEMENT POLICY=`p1` */,\n" + " PARTITION `p1` VALUES LESS THAN (1000))")) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, policy.ID, tb.Meta().Partition.Definitions[0].PlacementPolicyRef.ID) checkExistTableBundlesInPD(t, dom, "test", "tp") @@ -1686,7 +1687,7 @@ func TestAddPartitionWithPlacement(t *testing.T) { tk.MustExec("create placement policy p2 primary_region='r2' regions='r2'") defer tk.MustExec("drop placement policy p2") - policy2, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p2")) + policy2, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p2")) require.True(t, ok) tk.MustExec(`CREATE TABLE tp (id INT) PLACEMENT POLICY p1 PARTITION BY RANGE (id) ( @@ -1721,7 +1722,7 @@ func TestAddPartitionWithPlacement(t *testing.T) { " PARTITION `p4` VALUES LESS THAN (1000000))")) checkExistTableBundlesInPD(t, dom, "test", "tp") - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, policy2.ID, tb.Meta().Partition.Definitions[2].PlacementPolicyRef.ID) @@ -1769,10 +1770,10 @@ func TestTruncateTableWithPlacement(t *testing.T) { tk.MustExec("create placement policy p2 primary_region='r2' regions='r2'") defer tk.MustExec("drop placement policy p2") - policy1, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy1, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) - policy2, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p2")) + policy2, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p2")) require.True(t, ok) tk.MustExec(`CREATE TABLE t1 (id INT) placement policy p1`) @@ -1784,7 +1785,7 @@ func TestTruncateTableWithPlacement(t *testing.T) { " `id` int(11) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![placement] PLACEMENT POLICY=`p1` */")) - t1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + t1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) checkExistTableBundlesInPD(t, dom, "test", "t1") @@ -1793,7 +1794,7 @@ func TestTruncateTableWithPlacement(t *testing.T) { "t1 CREATE TABLE `t1` (\n" + " `id` int(11) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![placement] PLACEMENT POLICY=`p1` */")) - newT1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + newT1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) require.True(t, newT1.Meta().ID != t1.Meta().ID) checkExistTableBundlesInPD(t, dom, "test", "t1") @@ -1807,7 +1808,7 @@ func TestTruncateTableWithPlacement(t *testing.T) { );`) defer tk.MustExec("drop table tp") - tp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, policy1.ID, tp.Meta().PlacementPolicyRef.ID) require.Equal(t, policy2.ID, tp.Meta().Partition.Definitions[1].PlacementPolicyRef.ID) @@ -1822,7 +1823,7 @@ func TestTruncateTableWithPlacement(t *testing.T) { checkExistTableBundlesInPD(t, dom, "test", "tp") tk.MustExec("TRUNCATE TABLE tp") - newTp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + newTp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.True(t, newTp.Meta().ID != tp.Meta().ID) require.Equal(t, policy1.ID, newTp.Meta().PlacementPolicyRef.ID) @@ -1900,13 +1901,13 @@ func TestTruncateTablePartitionWithPlacement(t *testing.T) { tk.MustExec("create placement policy p3 primary_region='r3' regions='r3'") defer tk.MustExec("drop placement policy p3") - policy1, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy1, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) - policy2, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p2")) + policy2, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p2")) require.True(t, ok) - policy3, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p3")) + policy3, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p3")) require.True(t, ok) // test for partitioned table @@ -1918,7 +1919,7 @@ func TestTruncateTablePartitionWithPlacement(t *testing.T) { );`) defer tk.MustExec("drop table tp") - tp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) checkOldPartitions := make([]model.PartitionDefinition, 0, 2) @@ -1933,7 +1934,7 @@ func TestTruncateTablePartitionWithPlacement(t *testing.T) { } tk.MustExec("ALTER TABLE tp TRUNCATE partition p1,p3") - newTp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + newTp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, tp.Meta().ID, newTp.Meta().ID) require.Equal(t, policy1.ID, newTp.Meta().PlacementPolicyRef.ID) @@ -1961,7 +1962,7 @@ func TestTruncateTablePartitionWithPlacement(t *testing.T) { // add new partition will not override bundle waiting for GC tk.MustExec("alter table tp add partition (partition p4 values less than(1000000))") - newTp2, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + newTp2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, 5, len(newTp2.Meta().Partition.Definitions)) checkWaitingGCPartitionBundlesInPD(t, dom, checkOldPartitions) @@ -2023,7 +2024,7 @@ func TestDropTableWithPlacement(t *testing.T) { );`) defer tk.MustExec("drop table if exists tp") - tp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) checkExistTableBundlesInPD(t, dom, "test", "tp") tk.MustExec("drop table tp") @@ -2069,10 +2070,10 @@ func TestDropPartitionWithPlacement(t *testing.T) { tk.MustExec("create placement policy p3 primary_region='r3' regions='r3'") defer tk.MustExec("drop placement policy p3") - policy1, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + policy1, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) - policy3, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p3")) + policy3, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p3")) require.True(t, ok) // test for partitioned table @@ -2084,7 +2085,7 @@ func TestDropPartitionWithPlacement(t *testing.T) { );`) defer tk.MustExec("drop table tp") - tp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) checkOldPartitions := make([]model.PartitionDefinition, 0, 2) @@ -2099,7 +2100,7 @@ func TestDropPartitionWithPlacement(t *testing.T) { } tk.MustExec("ALTER TABLE tp DROP partition p1,p3") - newTp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + newTp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, tp.Meta().ID, newTp.Meta().ID) require.Equal(t, policy1.ID, newTp.Meta().PlacementPolicyRef.ID) @@ -2113,7 +2114,7 @@ func TestDropPartitionWithPlacement(t *testing.T) { // add new partition will not override bundle waiting for GC tk.MustExec("alter table tp add partition (partition p4 values less than(1000000))") - newTp2, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + newTp2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, 3, len(newTp2.Meta().Partition.Definitions)) checkWaitingGCPartitionBundlesInPD(t, dom, checkOldPartitions) @@ -2147,14 +2148,14 @@ func TestExchangePartitionWithPlacement(t *testing.T) { tk.MustExec("create placement policy pp2 primary_region='r2' regions='r2'") tk.MustExec("create placement policy pp3 primary_region='r3' regions='r3'") - policy1, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("pp1")) + policy1, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("pp1")) require.True(t, ok) tk.MustExec(`CREATE TABLE t1 (id INT) placement policy pp1`) tk.MustExec(`CREATE TABLE t2 (id INT)`) tk.MustExec(`CREATE TABLE t3 (id INT) placement policy pp3`) - t1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + t1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) t1ID := t1.Meta().ID @@ -2164,7 +2165,7 @@ func TestExchangePartitionWithPlacement(t *testing.T) { PARTITION p3 VALUES LESS THAN (10000) )`) - tp, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tp, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) tpID := tp.Meta().ID par0ID := tp.Meta().Partition.Definitions[0].ID @@ -2183,12 +2184,12 @@ func TestExchangePartitionWithPlacement(t *testing.T) { "(PARTITION `p1` VALUES LESS THAN (100) /*T![placement] PLACEMENT POLICY=`pp1` */,\n" + " PARTITION `p2` VALUES LESS THAN (1000) /*T![placement] PLACEMENT POLICY=`pp2` */,\n" + " PARTITION `p3` VALUES LESS THAN (10000))")) - tp, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tp, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.Equal(t, tpID, tp.Meta().ID) require.Equal(t, t1ID, tp.Meta().Partition.Definitions[0].ID) require.NotNil(t, tp.Meta().Partition.Definitions[0].PlacementPolicyRef) - t1, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + t1, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) require.Equal(t, par0ID, t1.Meta().ID) require.Equal(t, policy1.ID, t1.Meta().PlacementPolicyRef.ID) diff --git a/pkg/ddl/placement_sql_test.go b/pkg/ddl/placement_sql_test.go index e3478712bd847..522351c17b309 100644 --- a/pkg/ddl/placement_sql_test.go +++ b/pkg/ddl/placement_sql_test.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/domain/infosync" mysql "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -55,7 +56,7 @@ PARTITION BY RANGE (c) ( is := dom.InfoSchema() - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) partDefs := tb.Meta().GetPartitionInfo().Definitions @@ -187,7 +188,7 @@ func TestCreateSchemaWithPlacement(t *testing.T) { tk.MustQuery("SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TIDB_PLACEMENT_POLICY_NAME FROM information_schema.Tables WHERE TABLE_SCHEMA='SchemaPolicyPlacementTest' AND TABLE_NAME = 'UsePolicy'").Check(testkit.Rows(`def SchemaPolicyPlacementTest UsePolicy PolicyTableTest`)) is := dom.InfoSchema() - db, ok := is.SchemaByName(model.NewCIStr("SchemaPolicyPlacementTest")) + db, ok := is.SchemaByName(pmodel.NewCIStr("SchemaPolicyPlacementTest")) require.True(t, ok) require.NotNil(t, db.PlacementPolicyRef) require.Equal(t, "PolicySchemaTest", db.PlacementPolicyRef.Name.O) @@ -310,7 +311,7 @@ func TestPlacementMode(t *testing.T) { defer tk.MustExec("drop table if exists t2") tk.MustQuery("show warnings").Check(testkit.Rows()) - existPolicy, ok := dom.InfoSchema().PolicyByName(model.NewCIStr("p1")) + existPolicy, ok := dom.InfoSchema().PolicyByName(pmodel.NewCIStr("p1")) require.True(t, ok) // invalid values @@ -348,7 +349,7 @@ func TestPlacementMode(t *testing.T) { // create placement policy in ignore mode (policy name not exists) newPolicy = existPolicy.Clone() - newPolicy.Name = model.NewCIStr("p3") + newPolicy.Name = pmodel.NewCIStr("p3") newPolicy.Followers = 8 tk.Session().SetValue(sessionctx.QueryString, "skip") err = dom.DDLExecutor().CreatePlacementPolicyWithInfo(tk.Session(), newPolicy, ddl.OnExistError) @@ -533,9 +534,9 @@ func TestPlacementMode(t *testing.T) { tbl, err := getClonedTableFromDomain("test", "t1", dom) require.NoError(t, err) require.NotNil(t, tbl.PlacementPolicyRef) - tbl.Name = model.NewCIStr("t2") + tbl.Name = pmodel.NewCIStr("t2") tk.Session().SetValue(sessionctx.QueryString, "skip") - err = dom.DDLExecutor().CreateTableWithInfo(tk.Session(), model.NewCIStr("test"), tbl, nil, ddl.WithOnExist(ddl.OnExistError)) + err = dom.DDLExecutor().CreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), tbl, nil, ddl.WithOnExist(ddl.OnExistError)) require.NoError(t, err) tk.MustQuery("show create table t2").Check(testkit.Rows("t2 CREATE TABLE `t2` (\n" + " `id` int(11) DEFAULT NULL\n" + @@ -546,10 +547,10 @@ func TestPlacementMode(t *testing.T) { tbl, err = getClonedTableFromDomain("test", "t1", dom) require.NoError(t, err) require.NotNil(t, tbl.PlacementPolicyRef) - tbl.Name = model.NewCIStr("t2") - tbl.PlacementPolicyRef.Name = model.NewCIStr("pxx") + tbl.Name = pmodel.NewCIStr("t2") + tbl.PlacementPolicyRef.Name = pmodel.NewCIStr("pxx") tk.Session().SetValue(sessionctx.QueryString, "skip") - err = dom.DDLExecutor().CreateTableWithInfo(tk.Session(), model.NewCIStr("test"), tbl, nil, ddl.WithOnExist(ddl.OnExistError)) + err = dom.DDLExecutor().CreateTableWithInfo(tk.Session(), pmodel.NewCIStr("test"), tbl, nil, ddl.WithOnExist(ddl.OnExistError)) require.NoError(t, err) tk.MustQuery("show create table t2").Check(testkit.Rows("t2 CREATE TABLE `t2` (\n" + " `id` int(11) DEFAULT NULL\n" + @@ -560,7 +561,7 @@ func TestPlacementMode(t *testing.T) { db1, ok := getClonedDatabaseFromDomain("db1", dom) require.True(t, ok) require.NotNil(t, db1.PlacementPolicyRef) - db1.Name = model.NewCIStr("db2") + db1.Name = pmodel.NewCIStr("db2") tk.Session().SetValue(sessionctx.QueryString, "skip") err = dom.DDLExecutor().CreateSchemaWithInfo(tk.Session(), db1, ddl.OnExistError) require.NoError(t, err) @@ -571,8 +572,8 @@ func TestPlacementMode(t *testing.T) { db1, ok = getClonedDatabaseFromDomain("db1", dom) require.True(t, ok) require.NotNil(t, db1.PlacementPolicyRef) - db1.Name = model.NewCIStr("db2") - db1.PlacementPolicyRef.Name = model.NewCIStr("pxx") + db1.Name = pmodel.NewCIStr("db2") + db1.PlacementPolicyRef.Name = pmodel.NewCIStr("pxx") tk.Session().SetValue(sessionctx.QueryString, "skip") err = dom.DDLExecutor().CreateSchemaWithInfo(tk.Session(), db1, ddl.OnExistError) require.NoError(t, err) @@ -580,7 +581,7 @@ func TestPlacementMode(t *testing.T) { } func checkTiflashReplicaSet(t *testing.T, do *domain.Domain, db, tb string, cnt uint64) { - tbl, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(tb)) + tbl, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(tb)) require.NoError(t, err) tiflashReplica := tbl.Meta().TiFlashReplica @@ -727,7 +728,7 @@ func TestPlacementTiflashCheck(t *testing.T) { } func getClonedTableFromDomain(dbName string, tableName string, dom *domain.Domain) (*model.TableInfo, error) { - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tableName)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) if err != nil { return nil, err } @@ -740,7 +741,7 @@ func getClonedTableFromDomain(dbName string, tableName string, dom *domain.Domai } func getClonedDatabaseFromDomain(dbName string, dom *domain.Domain) (*model.DBInfo, bool) { - db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr(dbName)) + db, ok := dom.InfoSchema().SchemaByName(pmodel.NewCIStr(dbName)) if !ok { return nil, ok } diff --git a/pkg/ddl/reorg.go b/pkg/ddl/reorg.go index 8d2df7ca83e4a..13b068172de61 100644 --- a/pkg/ddl/reorg.go +++ b/pkg/ddl/reorg.go @@ -36,8 +36,8 @@ import ( "github.com/pingcap/tidb/pkg/expression/contextstatic" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/ddl/reorg_partition_test.go b/pkg/ddl/reorg_partition_test.go index 1425642551e7d..36c7cd16012b4 100644 --- a/pkg/ddl/reorg_partition_test.go +++ b/pkg/ddl/reorg_partition_test.go @@ -25,7 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/logutil" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/table" @@ -190,9 +191,9 @@ func TestReorgPartitionConcurrent(t *testing.T) { tk.MustExec(`admin check table t`) writeOnlyInfoSchema := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() require.Equal(t, int64(1), writeOnlyInfoSchema.SchemaMetaVersion()-deleteOnlyInfoSchema.SchemaMetaVersion()) - deleteOnlyTbl, err := deleteOnlyInfoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + deleteOnlyTbl, err := deleteOnlyInfoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) - writeOnlyTbl, err := writeOnlyInfoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + writeOnlyTbl, err := writeOnlyInfoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) writeOnlyParts := writeOnlyTbl.Meta().Partition writeOnlyTbl.Meta().Partition = deleteOnlyTbl.Meta().Partition @@ -233,13 +234,13 @@ func TestReorgPartitionConcurrent(t *testing.T) { deleteReorgInfoSchema := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() require.Equal(t, int64(1), deleteReorgInfoSchema.SchemaMetaVersion()-writeReorgInfoSchema.SchemaMetaVersion()) tk.MustExec(`insert into t values (16, "16", 16)`) - oldTbl, err := writeReorgInfoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + oldTbl, err := writeReorgInfoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) partDef := oldTbl.Meta().Partition.Definitions[1] require.Equal(t, "p1", partDef.Name.O) rows := getNumRowsFromPartitionDefs(t, tk, oldTbl, oldTbl.Meta().Partition.Definitions[1:2]) require.Equal(t, 5, rows) - currTbl, err := deleteReorgInfoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + currTbl, err := deleteReorgInfoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) currPart := currTbl.Meta().Partition currTbl.Meta().Partition = oldTbl.Meta().Partition @@ -277,7 +278,7 @@ func TestReorgPartitionConcurrent(t *testing.T) { tk.MustExec(`admin check table t`) newInfoSchema := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() require.Equal(t, int64(1), newInfoSchema.SchemaMetaVersion()-deleteReorgInfoSchema.SchemaMetaVersion()) - oldTbl, err = deleteReorgInfoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + oldTbl, err = deleteReorgInfoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) partDef = oldTbl.Meta().Partition.Definitions[1] require.Equal(t, "p1a", partDef.Name.O) @@ -295,7 +296,7 @@ func TestReorgPartitionConcurrent(t *testing.T) { " PARTITION `p1a` VALUES LESS THAN (15),\n" + " PARTITION `p1b` VALUES LESS THAN (20),\n" + " PARTITION `pMax` VALUES LESS THAN (MAXVALUE))")) - newTbl, err := deleteReorgInfoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + newTbl, err := deleteReorgInfoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) newPart := newTbl.Meta().Partition newTbl.Meta().Partition = oldTbl.Meta().Partition @@ -394,7 +395,7 @@ func TestReorgPartitionFailConcurrent(t *testing.T) { go backgroundExec(store, schemaName, "alter table t reorganize partition p1a,p1b into (partition p1a values less than (14), partition p1b values less than (17), partition p1c values less than (20))", alterErr) wait <- true infoSchema := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() - tbl, err := infoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + tbl, err := infoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) require.Equal(t, 0, getNumRowsFromPartitionDefs(t, tk, tbl, tbl.Meta().Partition.AddingDefinitions)) tk.MustExec(`delete from t where a = 14`) @@ -402,7 +403,7 @@ func TestReorgPartitionFailConcurrent(t *testing.T) { tk.MustExec(`admin check table t`) wait <- true wait <- true - tbl, err = infoSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + tbl, err = infoSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) require.Equal(t, 5, getNumRowsFromPartitionDefs(t, tk, tbl, tbl.Meta().Partition.AddingDefinitions)) tk.MustExec(`delete from t where a = 15`) @@ -529,7 +530,7 @@ func TestReorgPartitionRollback(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/mockUpdateVersionAndTableInfoErr")) ctx := tk.Session() is := domain.GetDomain(ctx).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) noNewTablesAfter(t, tk, ctx, tbl) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/reorgPartitionAfterDataCopy", `return(true)`)) @@ -553,7 +554,7 @@ func TestReorgPartitionRollback(t *testing.T) { " PARTITION `p1` VALUES LESS THAN (20),\n" + " PARTITION `pMax` VALUES LESS THAN (MAXVALUE))")) - tbl, err = is.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) noNewTablesAfter(t, tk, ctx, tbl) } diff --git a/pkg/ddl/repair_table_test.go b/pkg/ddl/repair_table_test.go index bffd508172aca..4d074db2ce889 100644 --- a/pkg/ddl/repair_table_test.go +++ b/pkg/ddl/repair_table_test.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/testkit" diff --git a/pkg/ddl/resource_group.go b/pkg/ddl/resource_group.go index 20849d6ca520e..b714b5b7093c5 100644 --- a/pkg/ddl/resource_group.go +++ b/pkg/ddl/resource_group.go @@ -27,8 +27,9 @@ import ( "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" rg "github.com/pingcap/tidb/pkg/resourcegroup" "github.com/pingcap/tidb/pkg/util/dbterror" kvutil "github.com/tikv/client-go/v2/util" @@ -262,16 +263,16 @@ func SetDirectResourceGroupRunawayOption(resourceGroupSettings *model.ResourceGr } settings := resourceGroupSettings.Runaway switch opt.Tp { - case ast.RunawayRule: + case pmodel.RunawayRule: // because execute time won't be too long, we use `time` pkg which does not support to parse unit 'd'. dur, err := time.ParseDuration(opt.RuleOption.ExecElapsed) if err != nil { return err } settings.ExecElapsedTimeMs = uint64(dur.Milliseconds()) - case ast.RunawayAction: + case pmodel.RunawayAction: settings.Action = opt.ActionOption.Type - case ast.RunawayWatch: + case pmodel.RunawayWatch: settings.WatchType = opt.WatchOption.Type if dur := opt.WatchOption.Duration; len(dur) > 0 { dur, err := time.ParseDuration(dur) diff --git a/pkg/ddl/resourcegroup/BUILD.bazel b/pkg/ddl/resourcegroup/BUILD.bazel index 3c18dcc7a024e..90fc95e3aa38d 100644 --- a/pkg/ddl/resourcegroup/BUILD.bazel +++ b/pkg/ddl/resourcegroup/BUILD.bazel @@ -9,6 +9,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/ddl/resourcegroup", visibility = ["//visibility:public"], deps = [ + "//pkg/meta/model", "//pkg/parser/model", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/resource_manager", diff --git a/pkg/ddl/resourcegroup/group.go b/pkg/ddl/resourcegroup/group.go index 18bb42a602541..f9e0b129f995e 100644 --- a/pkg/ddl/resourcegroup/group.go +++ b/pkg/ddl/resourcegroup/group.go @@ -16,7 +16,8 @@ package resourcegroup import ( rmpb "github.com/pingcap/kvproto/pkg/resource_manager" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" ) // MaxGroupNameLength is max length of the name of a resource group @@ -44,11 +45,11 @@ func NewGroupFromOptions(groupName string, options *model.ResourceGroupSettings) return nil, ErrInvalidResourceGroupRunawayExecElapsedTime } runaway.Rule.ExecElapsedTimeMs = options.Runaway.ExecElapsedTimeMs - if options.Runaway.Action == model.RunawayActionNone { + if options.Runaway.Action == pmodel.RunawayActionNone { return nil, ErrUnknownResourceGroupRunawayAction } runaway.Action = rmpb.RunawayAction(options.Runaway.Action) - if options.Runaway.WatchType != model.WatchNone { + if options.Runaway.WatchType != pmodel.WatchNone { runaway.Watch = &rmpb.RunawayWatch{} runaway.Watch.Type = rmpb.RunawayWatchType(options.Runaway.WatchType) runaway.Watch.LastingDurationMs = options.Runaway.WatchDurationMs diff --git a/pkg/ddl/restart_test.go b/pkg/ddl/restart_test.go index f0cb18bbb4425..9edc884ff992f 100644 --- a/pkg/ddl/restart_test.go +++ b/pkg/ddl/restart_test.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" diff --git a/pkg/ddl/rollingback.go b/pkg/ddl/rollingback.go index 0676e708a4917..a5d1972043aef 100644 --- a/pkg/ddl/rollingback.go +++ b/pkg/ddl/rollingback.go @@ -21,8 +21,9 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl/ingest" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -59,7 +60,7 @@ func convertAddIdxJob2RollbackJob( }) originalState := allIndexInfos[0].State - idxNames := make([]model.CIStr, 0, len(allIndexInfos)) + idxNames := make([]pmodel.CIStr, 0, len(allIndexInfos)) ifExists := make([]bool, 0, len(allIndexInfos)) for _, indexInfo := range allIndexInfos { if indexInfo.Primary { @@ -112,7 +113,7 @@ func convertNotReorgAddIdxJob2RollbackJob(jobCtx *jobContext, t *meta.Meta, job } unique := make([]bool, 1) - indexName := make([]model.CIStr, 1) + indexName := make([]pmodel.CIStr, 1) indexPartSpecifications := make([][]*ast.IndexPartSpecification, 1) indexOption := make([]*ast.IndexOption, 1) @@ -509,7 +510,7 @@ func convertJob2RollbackJob(w *worker, jobCtx *jobContext, t *meta.Meta, job *mo model.ActionRenameTable, model.ActionRenameTables, model.ActionModifyTableCharsetAndCollate, model.ActionModifySchemaCharsetAndCollate, model.ActionRepairTable, - model.ActionModifyTableAutoIdCache, model.ActionAlterIndexVisibility, + model.ActionModifyTableAutoIDCache, model.ActionAlterIndexVisibility, model.ActionModifySchemaDefaultPlacement, model.ActionRecoverSchema: ver, err = cancelOnlyNotHandledJob(job, model.StateNone) case model.ActionMultiSchemaChange: diff --git a/pkg/ddl/rollingback_test.go b/pkg/ddl/rollingback_test.go index 7e10f00072c4c..a6fe116e5a55a 100644 --- a/pkg/ddl/rollingback_test.go +++ b/pkg/ddl/rollingback_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/sanity_check.go b/pkg/ddl/sanity_check.go index 9f8086beefedd..871b89ca79c3c 100644 --- a/pkg/ddl/sanity_check.go +++ b/pkg/ddl/sanity_check.go @@ -23,9 +23,10 @@ import ( "github.com/pingcap/tidb/pkg/ddl/logutil" sess "github.com/pingcap/tidb/pkg/ddl/session" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/mathutil" @@ -145,7 +146,7 @@ func expectedDeleteRangeCnt(ctx delRangeCntCtx, job *model.Job) (int, error) { } return mathutil.Max(len(partitionIDs), 1), nil case model.ActionDropColumn: - var colName model.CIStr + var colName pmodel.CIStr var ifExists bool var indexIDs []int64 var partitionIDs []int64 diff --git a/pkg/ddl/schema.go b/pkg/ddl/schema.go index 5e8551a43cec1..629d2013cc481 100644 --- a/pkg/ddl/schema.go +++ b/pkg/ddl/schema.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) func onCreateSchema(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int64, _ error) { diff --git a/pkg/ddl/schema_test.go b/pkg/ddl/schema_test.go index 39e4ed6d70274..29806bea8bd74 100644 --- a/pkg/ddl/schema_test.go +++ b/pkg/ddl/schema_test.go @@ -27,7 +27,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -79,7 +80,7 @@ func testCheckTableState(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, t // testTableInfo creates a test table with num int columns and with no index. func testTableInfo(store kv.Storage, name string, num int) (*model.TableInfo, error) { tblInfo := &model.TableInfo{ - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), } genIDs, err := genGlobalIDs(store, 1) @@ -91,7 +92,7 @@ func testTableInfo(store kv.Storage, name string, num int) (*model.TableInfo, er cols := make([]*model.ColumnInfo, num) for i := range cols { col := &model.ColumnInfo{ - Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)), + Name: pmodel.NewCIStr(fmt.Sprintf("c%d", i+1)), Offset: i, DefaultValue: i + 1, State: model.StatePublic, @@ -122,7 +123,7 @@ func genGlobalIDs(store kv.Storage, count int) ([]int64, error) { func testSchemaInfo(store kv.Storage, name string) (*model.DBInfo, error) { dbInfo := &model.DBInfo{ - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), } genIDs, err := genGlobalIDs(store, 1) diff --git a/pkg/ddl/schema_version.go b/pkg/ddl/schema_version.go index 84421e1c25114..7b472b7224cd0 100644 --- a/pkg/ddl/schema_version.go +++ b/pkg/ddl/schema_version.go @@ -22,8 +22,9 @@ import ( "github.com/pingcap/tidb/pkg/ddl/logutil" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -96,7 +97,7 @@ func SetSchemaDiffForRenameTable(diff *model.SchemaDiff, job *model.Job) error { func SetSchemaDiffForRenameTables(diff *model.SchemaDiff, job *model.Job) error { var ( oldSchemaIDs, newSchemaIDs, tableIDs []int64 - tableNames, oldSchemaNames []*model.CIStr + tableNames, oldSchemaNames []*pmodel.CIStr ) err := job.DecodeArgs(&oldSchemaIDs, &newSchemaIDs, &tableNames, &tableIDs, &oldSchemaNames) if err != nil { diff --git a/pkg/ddl/schematracker/BUILD.bazel b/pkg/ddl/schematracker/BUILD.bazel index 2a2a3a19a41f1..de05d2d58abcb 100644 --- a/pkg/ddl/schematracker/BUILD.bazel +++ b/pkg/ddl/schematracker/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//pkg/infoschema", "//pkg/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/owner", "//pkg/parser/ast", "//pkg/parser/charset", @@ -51,9 +52,11 @@ go_test( "//pkg/executor", "//pkg/infoschema", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/util/chunk", "//pkg/util/mock", diff --git a/pkg/ddl/schematracker/checker.go b/pkg/ddl/schematracker/checker.go index fe0ad5ca50059..4742209f46799 100644 --- a/pkg/ddl/schematracker/checker.go +++ b/pkg/ddl/schematracker/checker.go @@ -29,9 +29,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/owner" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -87,7 +88,7 @@ func (d *Checker) CreateTestDB(ctx sessionctx.Context) { d.tracker.CreateTestDB(ctx) } -func (d *Checker) checkDBInfo(ctx sessionctx.Context, dbName model.CIStr) { +func (d *Checker) checkDBInfo(ctx sessionctx.Context, dbName pmodel.CIStr) { if d.closed.Load() { return } @@ -120,7 +121,7 @@ func (d *Checker) checkDBInfo(ctx sessionctx.Context, dbName model.CIStr) { } } -func (d *Checker) checkTableInfo(ctx sessionctx.Context, dbName, tableName model.CIStr) { +func (d *Checker) checkTableInfo(ctx sessionctx.Context, dbName, tableName pmodel.CIStr) { if d.closed.Load() { return } @@ -466,13 +467,13 @@ func (d *Checker) CreateSchemaWithInfo(ctx sessionctx.Context, info *model.DBInf } // CreateTableWithInfo implements the DDL interface. -func (*Checker) CreateTableWithInfo(_ sessionctx.Context, _ model.CIStr, _ *model.TableInfo, _ []model.InvolvingSchemaInfo, _ ...ddl.CreateTableOption) error { +func (*Checker) CreateTableWithInfo(_ sessionctx.Context, _ pmodel.CIStr, _ *model.TableInfo, _ []model.InvolvingSchemaInfo, _ ...ddl.CreateTableOption) error { //TODO implement me panic("implement me") } // BatchCreateTableWithInfo implements the DDL interface. -func (*Checker) BatchCreateTableWithInfo(_ sessionctx.Context, _ model.CIStr, _ []*model.TableInfo, _ ...ddl.CreateTableOption) error { +func (*Checker) BatchCreateTableWithInfo(_ sessionctx.Context, _ pmodel.CIStr, _ []*model.TableInfo, _ ...ddl.CreateTableOption) error { //TODO implement me panic("implement me") } diff --git a/pkg/ddl/schematracker/dm_tracker.go b/pkg/ddl/schematracker/dm_tracker.go index 5c01159af7092..5e38d10559a8b 100644 --- a/pkg/ddl/schematracker/dm_tracker.go +++ b/pkg/ddl/schematracker/dm_tracker.go @@ -25,9 +25,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" field_types "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/sessionctx" @@ -92,7 +93,7 @@ func (d *SchemaTracker) CreateSchema(ctx sessionctx.Context, stmt *ast.CreateDat // CreateTestDB creates the `test` database, which is the default behavior of TiDB. func (d *SchemaTracker) CreateTestDB(ctx sessionctx.Context) { _ = d.CreateSchema(ctx, &ast.CreateDatabaseStmt{ - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), }) } @@ -232,7 +233,7 @@ func (d *SchemaTracker) CreateTable(ctx sessionctx.Context, s *ast.CreateTableSt // CreateTableWithInfo implements the DDL interface. func (d *SchemaTracker) CreateTableWithInfo( _ sessionctx.Context, - dbName model.CIStr, + dbName pmodel.CIStr, info *model.TableInfo, _ []model.InvolvingSchemaInfo, cs ...ddl.CreateTableOption, @@ -365,11 +366,11 @@ func (d *SchemaTracker) DropView(_ sessionctx.Context, stmt *ast.DropTableStmt) // CreateIndex implements the DDL interface. func (d *SchemaTracker) CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error { ident := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} - return d.createIndex(ctx, ident, stmt.KeyType, model.NewCIStr(stmt.IndexName), + return d.createIndex(ctx, ident, stmt.KeyType, pmodel.NewCIStr(stmt.IndexName), stmt.IndexPartSpecifications, stmt.IndexOption, stmt.IfNotExists) } -func (d *SchemaTracker) putTableIfNoError(err error, dbName model.CIStr, tbInfo *model.TableInfo) { +func (d *SchemaTracker) putTableIfNoError(err error, dbName pmodel.CIStr, tbInfo *model.TableInfo) { if err != nil { return } @@ -381,7 +382,7 @@ func (d *SchemaTracker) createIndex( ctx sessionctx.Context, ti ast.Ident, keyType ast.IndexKeyType, - indexName model.CIStr, + indexName pmodel.CIStr, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool, @@ -398,11 +399,11 @@ func (d *SchemaTracker) createIndex( // Deal with anonymous index. if len(indexName.L) == 0 { - colName := model.NewCIStr("expression_index") + colName := pmodel.NewCIStr("expression_index") if indexPartSpecifications[0].Column != nil { colName = indexPartSpecifications[0].Column.Name } - indexName = ddl.GetName4AnonymousIndex(t, colName, model.NewCIStr("")) + indexName = ddl.GetName4AnonymousIndex(t, colName, pmodel.NewCIStr("")) } if indexInfo := tblInfo.FindIndexByName(indexName.L); indexInfo != nil { @@ -448,7 +449,7 @@ func (d *SchemaTracker) createIndex( // DropIndex implements the DDL interface. func (d *SchemaTracker) DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error { ti := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} - err := d.dropIndex(ctx, ti, model.NewCIStr(stmt.IndexName), stmt.IfExists) + err := d.dropIndex(ctx, ti, pmodel.NewCIStr(stmt.IndexName), stmt.IfExists) if (infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err)) && stmt.IfExists { err = nil } @@ -456,7 +457,7 @@ func (d *SchemaTracker) DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStm } // dropIndex is shared by DropIndex and AlterTable. -func (d *SchemaTracker) dropIndex(_ sessionctx.Context, ti ast.Ident, indexName model.CIStr, ifExists bool) (err error) { +func (d *SchemaTracker) dropIndex(_ sessionctx.Context, ti ast.Ident, indexName pmodel.CIStr, ifExists bool) (err error) { tblInfo, err := d.TableClonedByName(ti.Schema, ti.Name) if err != nil { return infoschema.ErrTableNotExists.GenWithStackByArgs(ti.Schema, ti.Name) @@ -694,7 +695,7 @@ func (d *SchemaTracker) handleModifyColumn( ctx context.Context, sctx sessionctx.Context, ident ast.Ident, - originalColName model.CIStr, + originalColName pmodel.CIStr, spec *ast.AlterTableSpec, ) (err error) { tblInfo, err := d.TableClonedByName(ident.Schema, ident.Name) @@ -838,7 +839,7 @@ func (d *SchemaTracker) dropTablePartitions(_ sessionctx.Context, ident ast.Iden func (d *SchemaTracker) createPrimaryKey( ctx sessionctx.Context, ti ast.Ident, - indexName model.CIStr, + indexName pmodel.CIStr, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ) (err error) { @@ -849,7 +850,7 @@ func (d *SchemaTracker) createPrimaryKey( defer d.putTableIfNoError(err, ti.Schema, tblInfo) - indexName = model.NewCIStr(mysql.PrimaryKeyName) + indexName = pmodel.NewCIStr(mysql.PrimaryKeyName) if indexInfo := tblInfo.FindIndexByName(indexName.L); indexInfo != nil || // If the table's PKIsHandle is true, it also means that this table has a primary key. tblInfo.PKIsHandle { @@ -925,9 +926,9 @@ func (d *SchemaTracker) AlterTable(ctx context.Context, sctx sessionctx.Context, case ast.AlterTableDropColumn: err = d.dropColumn(sctx, ident, spec) case ast.AlterTableDropIndex: - err = d.dropIndex(sctx, ident, model.NewCIStr(spec.Name), spec.IfExists) + err = d.dropIndex(sctx, ident, pmodel.NewCIStr(spec.Name), spec.IfExists) case ast.AlterTableDropPrimaryKey: - err = d.dropIndex(sctx, ident, model.NewCIStr(mysql.PrimaryKeyName), spec.IfExists) + err = d.dropIndex(sctx, ident, pmodel.NewCIStr(mysql.PrimaryKeyName), spec.IfExists) case ast.AlterTableRenameIndex: err = d.renameIndex(sctx, ident, spec) case ast.AlterTableDropPartition: @@ -936,13 +937,13 @@ func (d *SchemaTracker) AlterTable(ctx context.Context, sctx sessionctx.Context, constr := spec.Constraint switch spec.Constraint.Tp { case ast.ConstraintKey, ast.ConstraintIndex: - err = d.createIndex(sctx, ident, ast.IndexKeyTypeNone, model.NewCIStr(constr.Name), + err = d.createIndex(sctx, ident, ast.IndexKeyTypeNone, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option, constr.IfNotExists) case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey: - err = d.createIndex(sctx, ident, ast.IndexKeyTypeUnique, model.NewCIStr(constr.Name), + err = d.createIndex(sctx, ident, ast.IndexKeyTypeUnique, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option, false) // IfNotExists should be not applied case ast.ConstraintPrimaryKey: - err = d.createPrimaryKey(sctx, ident, model.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option) + err = d.createPrimaryKey(sctx, ident, pmodel.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option) case ast.ConstraintForeignKey, ast.ConstraintFulltext, ast.ConstraintCheck: @@ -1182,7 +1183,7 @@ func (*SchemaTracker) AlterResourceGroup(_ sessionctx.Context, _ *ast.AlterResou } // BatchCreateTableWithInfo implements the DDL interface, it will call CreateTableWithInfo for each table. -func (d *SchemaTracker) BatchCreateTableWithInfo(ctx sessionctx.Context, schema model.CIStr, info []*model.TableInfo, cs ...ddl.CreateTableOption) error { +func (d *SchemaTracker) BatchCreateTableWithInfo(ctx sessionctx.Context, schema pmodel.CIStr, info []*model.TableInfo, cs ...ddl.CreateTableOption) error { for _, tableInfo := range info { if err := d.CreateTableWithInfo(ctx, schema, tableInfo, nil, cs...); err != nil { return err diff --git a/pkg/ddl/schematracker/dm_tracker_test.go b/pkg/ddl/schematracker/dm_tracker_test.go index ca0ec21499173..89b47bc8b21a5 100644 --- a/pkg/ddl/schematracker/dm_tracker_test.go +++ b/pkg/ddl/schematracker/dm_tracker_test.go @@ -28,9 +28,11 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/mock" @@ -103,7 +105,7 @@ func execAlter(t *testing.T, tracker schematracker.SchemaTracker, sql string) { } func mustTableByName(t *testing.T, tracker schematracker.SchemaTracker, schema, table string) *model.TableInfo { - tblInfo, err := tracker.TableByName(context.Background(), model.NewCIStr(schema), model.NewCIStr(table)) + tblInfo, err := tracker.TableByName(context.Background(), pmodel.NewCIStr(schema), pmodel.NewCIStr(table)) require.NoError(t, err) return tblInfo } @@ -200,7 +202,7 @@ func TestIndexLength(t *testing.T) { ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" checkShowCreateTable(t, tblInfo, expected) - err := tracker.DeleteTable(model.NewCIStr("test"), model.NewCIStr("t")) + err := tracker.DeleteTable(pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) sql = "create table test.t(a text, b text charset ascii, c blob);" @@ -482,11 +484,11 @@ func (m mockRestrictedSQLExecutor) ParseWithParams(ctx context.Context, sql stri return nil, nil } -func (m mockRestrictedSQLExecutor) ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*ast.ResultField, error) { +func (m mockRestrictedSQLExecutor) ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*resolve.ResultField, error) { return nil, nil, nil } -func (m mockRestrictedSQLExecutor) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...any) ([]chunk.Row, []*ast.ResultField, error) { +func (m mockRestrictedSQLExecutor) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...any) ([]chunk.Row, []*resolve.ResultField, error) { return nil, nil, nil } diff --git a/pkg/ddl/schematracker/info_store.go b/pkg/ddl/schematracker/info_store.go index dad0f5a159f7d..52454640b8af6 100644 --- a/pkg/ddl/schematracker/info_store.go +++ b/pkg/ddl/schematracker/info_store.go @@ -18,7 +18,8 @@ import ( "context" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" ) @@ -40,7 +41,7 @@ func NewInfoStore(lowerCaseTableNames int) *InfoStore { } } -func (i *InfoStore) ciStr2Key(name model.CIStr) string { +func (i *InfoStore) ciStr2Key(name pmodel.CIStr) string { if i.lowerCaseTableNames == 0 { return name.O } @@ -48,7 +49,7 @@ func (i *InfoStore) ciStr2Key(name model.CIStr) string { } // SchemaByName returns the DBInfo of given name. nil if not found. -func (i *InfoStore) SchemaByName(name model.CIStr) *model.DBInfo { +func (i *InfoStore) SchemaByName(name pmodel.CIStr) *model.DBInfo { key := i.ciStr2Key(name) return i.dbs[key] } @@ -63,7 +64,7 @@ func (i *InfoStore) PutSchema(dbInfo *model.DBInfo) { } // DeleteSchema deletes the schema from InfoSchema. Returns true when the schema exists, false otherwise. -func (i *InfoStore) DeleteSchema(name model.CIStr) bool { +func (i *InfoStore) DeleteSchema(name pmodel.CIStr) bool { key := i.ciStr2Key(name) _, ok := i.dbs[key] if !ok { @@ -75,7 +76,7 @@ func (i *InfoStore) DeleteSchema(name model.CIStr) bool { } // TableByName returns the TableInfo. It will also return the error like an infoschema. -func (i *InfoStore) TableByName(_ context.Context, schema, table model.CIStr) (*model.TableInfo, error) { +func (i *InfoStore) TableByName(_ context.Context, schema, table pmodel.CIStr) (*model.TableInfo, error) { schemaKey := i.ciStr2Key(schema) tables, ok := i.tables[schemaKey] if !ok { @@ -91,7 +92,7 @@ func (i *InfoStore) TableByName(_ context.Context, schema, table model.CIStr) (* } // TableClonedByName is like TableByName, plus it will clone the TableInfo. -func (i *InfoStore) TableClonedByName(schema, table model.CIStr) (*model.TableInfo, error) { +func (i *InfoStore) TableClonedByName(schema, table pmodel.CIStr) (*model.TableInfo, error) { tbl, err := i.TableByName(context.Background(), schema, table) if err != nil { return nil, err @@ -100,7 +101,7 @@ func (i *InfoStore) TableClonedByName(schema, table model.CIStr) (*model.TableIn } // PutTable puts a TableInfo, it will overwrite the old one. If the schema doesn't exist, it will return ErrDatabaseNotExists. -func (i *InfoStore) PutTable(schemaName model.CIStr, tblInfo *model.TableInfo) error { +func (i *InfoStore) PutTable(schemaName pmodel.CIStr, tblInfo *model.TableInfo) error { schemaKey := i.ciStr2Key(schemaName) tables, ok := i.tables[schemaKey] if !ok { @@ -113,7 +114,7 @@ func (i *InfoStore) PutTable(schemaName model.CIStr, tblInfo *model.TableInfo) e // DeleteTable deletes the TableInfo, it will return ErrDatabaseNotExists or ErrTableNotExists when schema or table does // not exist. -func (i *InfoStore) DeleteTable(schema, table model.CIStr) error { +func (i *InfoStore) DeleteTable(schema, table pmodel.CIStr) error { schemaKey := i.ciStr2Key(schema) tables, ok := i.tables[schemaKey] if !ok { @@ -139,7 +140,7 @@ func (i *InfoStore) AllSchemaNames() []string { } // AllTableNamesOfSchema return all table names of a schema. -func (i *InfoStore) AllTableNamesOfSchema(schema model.CIStr) ([]string, error) { +func (i *InfoStore) AllTableNamesOfSchema(schema pmodel.CIStr) ([]string, error) { schemaKey := i.ciStr2Key(schema) tables, ok := i.tables[schemaKey] if !ok { @@ -160,19 +161,19 @@ type InfoStoreAdaptor struct { } // SchemaByName implements the InfoSchema interface. -func (i InfoStoreAdaptor) SchemaByName(schema model.CIStr) (*model.DBInfo, bool) { +func (i InfoStoreAdaptor) SchemaByName(schema pmodel.CIStr) (*model.DBInfo, bool) { dbInfo := i.inner.SchemaByName(schema) return dbInfo, dbInfo != nil } // TableExists implements the InfoSchema interface. -func (i InfoStoreAdaptor) TableExists(schema, table model.CIStr) bool { +func (i InfoStoreAdaptor) TableExists(schema, table pmodel.CIStr) bool { tableInfo, _ := i.inner.TableByName(context.Background(), schema, table) return tableInfo != nil } // TableByName implements the InfoSchema interface. -func (i InfoStoreAdaptor) TableByName(ctx context.Context, schema, table model.CIStr) (t table.Table, err error) { +func (i InfoStoreAdaptor) TableByName(ctx context.Context, schema, table pmodel.CIStr) (t table.Table, err error) { tableInfo, err := i.inner.TableByName(ctx, schema, table) if err != nil { return nil, err @@ -181,6 +182,6 @@ func (i InfoStoreAdaptor) TableByName(ctx context.Context, schema, table model.C } // TableInfoByName implements the InfoSchema interface. -func (i InfoStoreAdaptor) TableInfoByName(schema, table model.CIStr) (*model.TableInfo, error) { +func (i InfoStoreAdaptor) TableInfoByName(schema, table pmodel.CIStr) (*model.TableInfo, error) { return i.inner.TableByName(context.Background(), schema, table) } diff --git a/pkg/ddl/schematracker/info_store_test.go b/pkg/ddl/schematracker/info_store_test.go index c62151e6113f2..7a8be23f21cd6 100644 --- a/pkg/ddl/schematracker/info_store_test.go +++ b/pkg/ddl/schematracker/info_store_test.go @@ -20,15 +20,16 @@ import ( "testing" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) func TestInfoStoreLowerCaseTableNames(t *testing.T) { - dbName := model.NewCIStr("DBName") - lowerDBName := model.NewCIStr("dbname") - tableName := model.NewCIStr("TableName") - lowerTableName := model.NewCIStr("tablename") + dbName := pmodel.NewCIStr("DBName") + lowerDBName := pmodel.NewCIStr("dbname") + tableName := pmodel.NewCIStr("TableName") + lowerTableName := pmodel.NewCIStr("tablename") dbInfo := &model.DBInfo{Name: dbName} tableInfo := &model.TableInfo{Name: tableName} @@ -57,7 +58,7 @@ func TestInfoStoreLowerCaseTableNames(t *testing.T) { schemaNames := is.AllSchemaNames() require.Equal(t, []string{dbName.O}, schemaNames) - _, err = is.AllTableNamesOfSchema(model.NewCIStr("wrong-db")) + _, err = is.AllTableNamesOfSchema(pmodel.NewCIStr("wrong-db")) require.Error(t, err) tableNames, err := is.AllTableNamesOfSchema(dbName) require.NoError(t, err) @@ -85,7 +86,7 @@ func TestInfoStoreLowerCaseTableNames(t *testing.T) { schemaNames = is.AllSchemaNames() require.Equal(t, []string{dbName.L}, schemaNames) - _, err = is.AllTableNamesOfSchema(model.NewCIStr("wrong-db")) + _, err = is.AllTableNamesOfSchema(pmodel.NewCIStr("wrong-db")) require.Error(t, err) tableNames, err = is.AllTableNamesOfSchema(dbName) require.NoError(t, err) @@ -94,10 +95,10 @@ func TestInfoStoreLowerCaseTableNames(t *testing.T) { func TestInfoStoreDeleteTables(t *testing.T) { is := NewInfoStore(0) - dbName1 := model.NewCIStr("DBName1") - dbName2 := model.NewCIStr("DBName2") - tableName1 := model.NewCIStr("TableName1") - tableName2 := model.NewCIStr("TableName2") + dbName1 := pmodel.NewCIStr("DBName1") + dbName2 := pmodel.NewCIStr("DBName2") + tableName1 := pmodel.NewCIStr("TableName1") + tableName2 := pmodel.NewCIStr("TableName2") dbInfo1 := &model.DBInfo{Name: dbName1} dbInfo2 := &model.DBInfo{Name: dbName2} tableInfo1 := &model.TableInfo{Name: tableName1} diff --git a/pkg/ddl/sequence.go b/pkg/ddl/sequence.go index 9eff78a2cc0ff..22c5092110b66 100644 --- a/pkg/ddl/sequence.go +++ b/pkg/ddl/sequence.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/mathutil" ) diff --git a/pkg/ddl/sequence_test.go b/pkg/ddl/sequence_test.go index 2f5aeb833186e..3f94edc1dd391 100644 --- a/pkg/ddl/sequence_test.go +++ b/pkg/ddl/sequence_test.go @@ -19,8 +19,8 @@ import ( "time" mysql "github.com/pingcap/tidb/pkg/errno" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/testkit" diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index e5eb0134bb2bf..a921ae1245348 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/logutil" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/ddl/stat_test.go b/pkg/ddl/stat_test.go index b46350cbe1132..777e33a322296 100644 --- a/pkg/ddl/stat_test.go +++ b/pkg/ddl/stat_test.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" sessiontypes "github.com/pingcap/tidb/pkg/session/types" @@ -100,9 +101,9 @@ func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bo TableID: tblInfo.ID, Type: model.ActionAddIndex, BinlogInfo: &model.HistoryInfo{}, - Args: []any{unique, model.NewCIStr(indexName), + Args: []any{unique, pmodel.NewCIStr(indexName), []*ast.IndexPartSpecification{{ - Column: &ast.ColumnName{Name: model.NewCIStr(colName)}, + Column: &ast.ColumnName{Name: pmodel.NewCIStr(colName)}, Length: types.UnspecifiedLength}}}, ReorgMeta: &model.DDLReorgMeta{ // Add index job must have this field. SQLMode: mysql.SQLMode(0), diff --git a/pkg/ddl/systable/BUILD.bazel b/pkg/ddl/systable/BUILD.bazel index c0ddb9bfc63d0..a70523546864b 100644 --- a/pkg/ddl/systable/BUILD.bazel +++ b/pkg/ddl/systable/BUILD.bazel @@ -11,7 +11,7 @@ go_library( deps = [ "//pkg/ddl/logutil", "//pkg/ddl/session", - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_pingcap_errors//:errors", "@org_uber_go_zap//:zap", ], @@ -30,7 +30,7 @@ go_test( "//pkg/ddl/mock", "//pkg/ddl/session", "//pkg/domain", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/store/mockstore", "//pkg/testkit", "@com_github_ngaut_pools//:pools", diff --git a/pkg/ddl/systable/manager.go b/pkg/ddl/systable/manager.go index 2dc24d813c0ae..04d8275bc78b9 100644 --- a/pkg/ddl/systable/manager.go +++ b/pkg/ddl/systable/manager.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl/session" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) var ( diff --git a/pkg/ddl/systable/manager_test.go b/pkg/ddl/systable/manager_test.go index 2145694ad0fc9..fca1ef808ef91 100644 --- a/pkg/ddl/systable/manager_test.go +++ b/pkg/ddl/systable/manager_test.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/session" "github.com/pingcap/tidb/pkg/ddl/systable" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" diff --git a/pkg/ddl/table.go b/pkg/ddl/table.go index b32e9e6e83134..af6d984d54e2d 100644 --- a/pkg/ddl/table.go +++ b/pkg/ddl/table.go @@ -32,9 +32,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" field_types "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/sessionctx/variable" statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util" @@ -668,7 +669,7 @@ func onModifyTableAutoIDCache(jobCtx *jobContext, t *meta.Meta, job *model.Job) return 0, errors.Trace(err) } - tblInfo.AutoIdCache = cache + tblInfo.AutoIDCache = cache ver, err := updateVersionAndTableInfo(jobCtx, t, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) @@ -736,8 +737,8 @@ func verifyNoOverflowShardBits(s *sess.Pool, tbl table.Table, shardRowIDBits uin func onRenameTable(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int64, _ error) { var oldSchemaID int64 - var oldSchemaName model.CIStr - var tableName model.CIStr + var oldSchemaName pmodel.CIStr + var tableName pmodel.CIStr if err := job.DecodeArgs(&oldSchemaID, &tableName, &oldSchemaName); err != nil { // Invalid arguments, cancel this job. job.State = model.JobStateCancelled @@ -781,10 +782,10 @@ func onRenameTable(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int64, func onRenameTables(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int64, _ error) { oldSchemaIDs := []int64{} newSchemaIDs := []int64{} - tableNames := []*model.CIStr{} + tableNames := []*pmodel.CIStr{} tableIDs := []int64{} - oldSchemaNames := []*model.CIStr{} - oldTableNames := []*model.CIStr{} + oldSchemaNames := []*pmodel.CIStr{} + oldTableNames := []*pmodel.CIStr{} if err := job.DecodeArgs(&oldSchemaIDs, &newSchemaIDs, &tableNames, &tableIDs, &oldSchemaNames, &oldTableNames); err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) @@ -821,7 +822,7 @@ func onRenameTables(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int64 return ver, nil } -func checkAndRenameTables(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, oldSchemaID, newSchemaID int64, oldSchemaName, tableName *model.CIStr) (ver int64, _ error) { +func checkAndRenameTables(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, oldSchemaID, newSchemaID int64, oldSchemaName, tableName *pmodel.CIStr) (ver int64, _ error) { err := t.DropTableOrView(oldSchemaID, tblInfo.ID) if err != nil { job.State = model.JobStateCancelled @@ -872,7 +873,7 @@ func checkAndRenameTables(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo return ver, nil } -func adjustForeignKeyChildTableInfoAfterRenameTable(infoCache *infoschema.InfoCache, t *meta.Meta, job *model.Job, fkh *foreignKeyHelper, tblInfo *model.TableInfo, oldSchemaName, oldTableName, newTableName model.CIStr, newSchemaID int64) error { +func adjustForeignKeyChildTableInfoAfterRenameTable(infoCache *infoschema.InfoCache, t *meta.Meta, job *model.Job, fkh *foreignKeyHelper, tblInfo *model.TableInfo, oldSchemaName, oldTableName, newTableName pmodel.CIStr, newSchemaID int64) error { if !variable.EnableForeignKey.Load() || newTableName.L == oldTableName.L { return nil } @@ -946,7 +947,7 @@ func finishJobRenameTable(jobCtx *jobContext, t *meta.Meta, job *model.Job) (int } func finishJobRenameTables(jobCtx *jobContext, t *meta.Meta, job *model.Job, - tableNames []*model.CIStr, tableIDs, newSchemaIDs []int64) (int64, error) { + tableNames []*pmodel.CIStr, tableIDs, newSchemaIDs []int64) (int64, error) { tblSchemaIDs := make(map[int64]int64, len(tableIDs)) for i := range tableIDs { tblSchemaIDs[tableIDs[i]] = newSchemaIDs[i] @@ -1260,7 +1261,7 @@ func checkTableNotExistsFromInfoSchema(is infoschema.InfoSchema, schemaID int64, if !ok { return infoschema.ErrDatabaseNotExists.GenWithStackByArgs("") } - if is.TableExists(schema.Name, model.NewCIStr(tableName)) { + if is.TableExists(schema.Name, pmodel.NewCIStr(tableName)) { return infoschema.ErrTableExists.GenWithStackByArgs(tableName) } return nil diff --git a/pkg/ddl/table_lock.go b/pkg/ddl/table_lock.go index 65f722732e200..a2643bf937f67 100644 --- a/pkg/ddl/table_lock.go +++ b/pkg/ddl/table_lock.go @@ -18,7 +18,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util/dbterror" ) @@ -124,8 +125,8 @@ func lockTable(tbInfo *model.TableInfo, idx int, arg *LockTablesArg) error { if tbInfo.Lock.State == model.TableLockStatePreLock { return nil } - if (tbInfo.Lock.Tp == model.TableLockRead && arg.LockTables[idx].Tp == model.TableLockRead) || - (tbInfo.Lock.Tp == model.TableLockReadOnly && arg.LockTables[idx].Tp == model.TableLockReadOnly) { + if (tbInfo.Lock.Tp == pmodel.TableLockRead && arg.LockTables[idx].Tp == pmodel.TableLockRead) || + (tbInfo.Lock.Tp == pmodel.TableLockReadOnly && arg.LockTables[idx].Tp == pmodel.TableLockReadOnly) { sessionIndex := findSessionInfoIndex(tbInfo.Lock.Sessions, arg.SessionInfo) // repeat lock. if sessionIndex >= 0 { @@ -141,15 +142,15 @@ func lockTable(tbInfo *model.TableInfo, idx int, arg *LockTablesArg) error { } // checkTableLocked uses to check whether table was locked. -func checkTableLocked(tbInfo *model.TableInfo, lockTp model.TableLockType, sessionInfo model.SessionInfo) error { +func checkTableLocked(tbInfo *model.TableInfo, lockTp pmodel.TableLockType, sessionInfo model.SessionInfo) error { if !tbInfo.IsLocked() { return nil } if tbInfo.Lock.State == model.TableLockStatePreLock { return nil } - if (tbInfo.Lock.Tp == model.TableLockRead && lockTp == model.TableLockRead) || - (tbInfo.Lock.Tp == model.TableLockReadOnly && lockTp == model.TableLockReadOnly) { + if (tbInfo.Lock.Tp == pmodel.TableLockRead && lockTp == pmodel.TableLockRead) || + (tbInfo.Lock.Tp == pmodel.TableLockReadOnly && lockTp == pmodel.TableLockReadOnly) { return nil } sessionIndex := findSessionInfoIndex(tbInfo.Lock.Sessions, sessionInfo) @@ -159,7 +160,7 @@ func checkTableLocked(tbInfo *model.TableInfo, lockTp model.TableLockType, sessi return nil } // If no other session locked this table, and it is not a read only lock (session unrelated). - if len(tbInfo.Lock.Sessions) == 1 && tbInfo.Lock.Tp != model.TableLockReadOnly { + if len(tbInfo.Lock.Sessions) == 1 && tbInfo.Lock.Tp != pmodel.TableLockReadOnly { return nil } } diff --git a/pkg/ddl/table_modify_test.go b/pkg/ddl/table_modify_test.go index 03f14eddbe53b..7547d0551bd9b 100644 --- a/pkg/ddl/table_modify_test.go +++ b/pkg/ddl/table_modify_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" sessiontypes "github.com/pingcap/tidb/pkg/session/types" "github.com/pingcap/tidb/pkg/sessiontxn" diff --git a/pkg/ddl/table_test.go b/pkg/ddl/table_test.go index 7b0d4268b877a..1f96e5afef2d7 100644 --- a/pkg/ddl/table_test.go +++ b/pkg/ddl/table_test.go @@ -28,8 +28,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" @@ -46,8 +47,8 @@ func testRenameTable( ctx sessionctx.Context, d ddl.ExecutorForTest, newSchemaID, oldSchemaID int64, - oldSchemaName model.CIStr, - newSchemaName model.CIStr, + oldSchemaName pmodel.CIStr, + newSchemaName pmodel.CIStr, tblInfo *model.TableInfo, ) *model.Job { job := &model.Job{ @@ -72,7 +73,7 @@ func testRenameTable( return job } -func testRenameTables(t *testing.T, ctx sessionctx.Context, d ddl.ExecutorForTest, oldSchemaIDs, newSchemaIDs []int64, newTableNames []*model.CIStr, oldTableIDs []int64, oldSchemaNames, oldTableNames []*model.CIStr) *model.Job { +func testRenameTables(t *testing.T, ctx sessionctx.Context, d ddl.ExecutorForTest, oldSchemaIDs, newSchemaIDs []int64, newTableNames []*pmodel.CIStr, oldTableIDs []int64, oldSchemaNames, oldTableNames []*pmodel.CIStr) *model.Job { job := &model.Job{ Type: model.ActionRenameTables, BinlogInfo: &model.HistoryInfo{}, @@ -97,9 +98,9 @@ func testLockTable( d ddl.ExecutorForTest, uuid string, newSchemaID int64, - schemaName model.CIStr, + schemaName pmodel.CIStr, tblInfo *model.TableInfo, - lockTp model.TableLockType, + lockTp pmodel.TableLockType, ) *model.Job { arg := &ddl.LockTablesArg{ LockTables: []model.TableLockTpInfo{{SchemaID: newSchemaID, TableID: tblInfo.ID, Tp: lockTp}}, @@ -127,7 +128,7 @@ func testLockTable( return job } -func checkTableLockedTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo, serverID string, sessionID uint64, lockTp model.TableLockType) { +func checkTableLockedTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo, serverID string, sessionID uint64, lockTp pmodel.TableLockType) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) @@ -251,10 +252,10 @@ func TestTable(t *testing.T) { testCheckTableState(t, store, dbInfo1, tblInfo, model.StatePublic) testCheckJobDone(t, store, job.ID, true) - job = testLockTable(t, ctx, de, d.GetID(), dbInfo1.ID, dbInfo1.Name, tblInfo, model.TableLockWrite) + job = testLockTable(t, ctx, de, d.GetID(), dbInfo1.ID, dbInfo1.Name, tblInfo, pmodel.TableLockWrite) testCheckTableState(t, store, dbInfo1, tblInfo, model.StatePublic) testCheckJobDone(t, store, job.ID, true) - checkTableLockedTest(t, store, dbInfo1, tblInfo, d.GetID(), ctx.GetSessionVars().ConnectionID, model.TableLockWrite) + checkTableLockedTest(t, store, dbInfo1, tblInfo, d.GetID(), ctx.GetSessionVars().ConnectionID, pmodel.TableLockWrite) // for alter cache table job = testAlterCacheTable(t, ctx, de, dbInfo1.ID, dbInfo1.Name, tblInfo) testCheckTableState(t, store, dbInfo1, tblInfo, model.StatePublic) @@ -379,7 +380,7 @@ func testAlterCacheTable( ctx sessionctx.Context, d ddl.ExecutorForTest, newSchemaID int64, - newSchemaName model.CIStr, + newSchemaName pmodel.CIStr, tblInfo *model.TableInfo, ) *model.Job { job := &model.Job{ @@ -406,7 +407,7 @@ func testAlterNoCacheTable( ctx sessionctx.Context, d ddl.ExecutorForTest, newSchemaID int64, - newSchemaName model.CIStr, + newSchemaName pmodel.CIStr, tblInfo *model.TableInfo, ) *model.Job { job := &model.Job{ @@ -452,7 +453,7 @@ func TestRenameTables(t *testing.T) { require.NoError(t, err) newTblInfos = append(newTblInfos, tblInfo) } - job := testRenameTables(t, ctx, de, []int64{dbInfo.ID, dbInfo.ID}, []int64{dbInfo.ID, dbInfo.ID}, []*model.CIStr{&newTblInfos[0].Name, &newTblInfos[1].Name}, []int64{tblInfos[0].ID, tblInfos[1].ID}, []*model.CIStr{&dbInfo.Name, &dbInfo.Name}, []*model.CIStr{&tblInfos[0].Name, &tblInfos[1].Name}) + job := testRenameTables(t, ctx, de, []int64{dbInfo.ID, dbInfo.ID}, []int64{dbInfo.ID, dbInfo.ID}, []*pmodel.CIStr{&newTblInfos[0].Name, &newTblInfos[1].Name}, []int64{tblInfos[0].ID, tblInfos[1].ID}, []*pmodel.CIStr{&dbInfo.Name, &dbInfo.Name}, []*pmodel.CIStr{&tblInfos[0].Name, &tblInfos[1].Name}) historyJob, err := ddl.GetHistoryJobByID(testkit.NewTestKit(t, store).Session(), job.ID) require.NoError(t, err) @@ -477,15 +478,15 @@ func TestCreateTables(t *testing.T) { infos = append(infos, &model.TableInfo{ ID: genIDs[0], - Name: model.NewCIStr("s1"), + Name: pmodel.NewCIStr("s1"), }) infos = append(infos, &model.TableInfo{ ID: genIDs[1], - Name: model.NewCIStr("s2"), + Name: pmodel.NewCIStr("s2"), }) infos = append(infos, &model.TableInfo{ ID: genIDs[2], - Name: model.NewCIStr("s3"), + Name: pmodel.NewCIStr("s3"), }) job := &model.Job{ diff --git a/pkg/ddl/tests/adminpause/BUILD.bazel b/pkg/ddl/tests/adminpause/BUILD.bazel index c11bac8d47f86..144bb1872474a 100644 --- a/pkg/ddl/tests/adminpause/BUILD.bazel +++ b/pkg/ddl/tests/adminpause/BUILD.bazel @@ -13,7 +13,7 @@ go_library( "//pkg/ddl", "//pkg/ddl/logutil", "//pkg/domain", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit", ], ) @@ -36,7 +36,7 @@ go_test( "//pkg/ddl/testutil", "//pkg/domain", "//pkg/errno", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit", "//pkg/testkit/testfailpoint", "//pkg/testkit/testsetup", diff --git a/pkg/ddl/tests/adminpause/ddl_stmt_cases.go b/pkg/ddl/tests/adminpause/ddl_stmt_cases.go index e592b19e19e86..cf70377fd7741 100644 --- a/pkg/ddl/tests/adminpause/ddl_stmt_cases.go +++ b/pkg/ddl/tests/adminpause/ddl_stmt_cases.go @@ -17,7 +17,7 @@ package adminpause import ( "sync" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" ) diff --git a/pkg/ddl/tests/adminpause/pause_cancel_test.go b/pkg/ddl/tests/adminpause/pause_cancel_test.go index 3a4c33c3d3fd9..d958b6e4d41e1 100644 --- a/pkg/ddl/tests/adminpause/pause_cancel_test.go +++ b/pkg/ddl/tests/adminpause/pause_cancel_test.go @@ -24,7 +24,7 @@ import ( testddlutil "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/pkg/util/sqlexec" diff --git a/pkg/ddl/tests/adminpause/pause_negative_test.go b/pkg/ddl/tests/adminpause/pause_negative_test.go index eb1383778ba1f..2eb0d3a190a6f 100644 --- a/pkg/ddl/tests/adminpause/pause_negative_test.go +++ b/pkg/ddl/tests/adminpause/pause_negative_test.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/pkg/util/sqlexec" diff --git a/pkg/ddl/tests/adminpause/pause_resume_test.go b/pkg/ddl/tests/adminpause/pause_resume_test.go index ba3a6d26175ef..a73873e765b3b 100644 --- a/pkg/ddl/tests/adminpause/pause_resume_test.go +++ b/pkg/ddl/tests/adminpause/pause_resume_test.go @@ -26,7 +26,7 @@ import ( testddlutil "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/pkg/util/sqlexec" diff --git a/pkg/ddl/tests/fastcreatetable/BUILD.bazel b/pkg/ddl/tests/fastcreatetable/BUILD.bazel index 88429b3a0829d..bbd4dadbd2c3d 100644 --- a/pkg/ddl/tests/fastcreatetable/BUILD.bazel +++ b/pkg/ddl/tests/fastcreatetable/BUILD.bazel @@ -12,7 +12,7 @@ go_test( deps = [ "//pkg/config", "//pkg/ddl", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/server", "//pkg/testkit", "//pkg/testkit/testfailpoint", diff --git a/pkg/ddl/tests/fastcreatetable/fastcreatetable_test.go b/pkg/ddl/tests/fastcreatetable/fastcreatetable_test.go index 1b20945b6e9ac..ce2277a17ebe5 100644 --- a/pkg/ddl/tests/fastcreatetable/fastcreatetable_test.go +++ b/pkg/ddl/tests/fastcreatetable/fastcreatetable_test.go @@ -19,7 +19,7 @@ import ( "time" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/tests/fk/BUILD.bazel b/pkg/ddl/tests/fk/BUILD.bazel index 27509bace3f07..de6741571cc12 100644 --- a/pkg/ddl/tests/fk/BUILD.bazel +++ b/pkg/ddl/tests/fk/BUILD.bazel @@ -16,6 +16,7 @@ go_test( "//pkg/infoschema", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/sessiontxn", diff --git a/pkg/ddl/tests/fk/foreign_key_test.go b/pkg/ddl/tests/fk/foreign_key_test.go index 0a7f56d5f5f74..5fd96928ec5bf 100644 --- a/pkg/ddl/tests/fk/foreign_key_test.go +++ b/pkg/ddl/tests/fk/foreign_key_test.go @@ -25,8 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util/dbterror" @@ -51,21 +52,21 @@ func TestCreateTableWithForeignKeyMetaInfo(t *testing.T) { tb1ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test", "t1") require.Equal(t, 1, len(tb1ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("t2"), - ChildFKName: model.NewCIStr("fk_b"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("t2"), + ChildFKName: pmodel.NewCIStr("fk_b"), }, *tb1ReferredFKs[0]) tb2ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test2", "t2") require.Equal(t, 0, len(tb2ReferredFKs)) require.Equal(t, 1, len(tb2Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_b"), - RefSchema: model.NewCIStr("test"), - RefTable: model.NewCIStr("t1"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("b")}, + Name: pmodel.NewCIStr("fk_b"), + RefSchema: pmodel.NewCIStr("test"), + RefTable: pmodel.NewCIStr("t1"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("b")}, OnDelete: 2, OnUpdate: 1, State: model.StatePublic, @@ -85,21 +86,21 @@ func TestCreateTableWithForeignKeyMetaInfo(t *testing.T) { tb2ReferredFKs = getTableInfoReferredForeignKeys(t, dom, "test2", "t2") require.Equal(t, 1, len(tb2ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("t3"), - ChildFKName: model.NewCIStr("fk_b"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("t3"), + ChildFKName: pmodel.NewCIStr("fk_b"), }, *tb2ReferredFKs[0]) tb3ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test2", "t3") require.Equal(t, 0, len(tb3ReferredFKs)) require.Equal(t, 1, len(tb3Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_b"), - RefSchema: model.NewCIStr("test2"), - RefTable: model.NewCIStr("t2"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("b")}, + Name: pmodel.NewCIStr("fk_b"), + RefSchema: pmodel.NewCIStr("test2"), + RefTable: pmodel.NewCIStr("t2"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("b")}, OnDelete: 4, OnUpdate: 3, State: model.StatePublic, @@ -116,18 +117,18 @@ func TestCreateTableWithForeignKeyMetaInfo(t *testing.T) { tb5ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test2", "t5") require.Equal(t, 1, len(tb5ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("t5"), - ChildFKName: model.NewCIStr("fk_1"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("t5"), + ChildFKName: pmodel.NewCIStr("fk_1"), }, *tb5ReferredFKs[0]) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_1"), - RefSchema: model.NewCIStr("test2"), - RefTable: model.NewCIStr("t5"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("a")}, + Name: pmodel.NewCIStr("fk_1"), + RefSchema: pmodel.NewCIStr("test2"), + RefTable: pmodel.NewCIStr("t5"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("a")}, State: model.StatePublic, Version: 1, }, *tb5Info.ForeignKeys[0]) @@ -161,21 +162,21 @@ func TestCreateTableWithForeignKeyMetaInfo2(t *testing.T) { tb1ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test", "t1") require.Equal(t, 1, len(tb1ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("t2"), - ChildFKName: model.NewCIStr("fk_b"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("t2"), + ChildFKName: pmodel.NewCIStr("fk_b"), }, *tb1ReferredFKs[0]) tb2ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test2", "t2") require.Equal(t, 0, len(tb2ReferredFKs)) require.Equal(t, 1, len(tb2Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_b"), - RefSchema: model.NewCIStr("test"), - RefTable: model.NewCIStr("t1"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("b")}, + Name: pmodel.NewCIStr("fk_b"), + RefSchema: pmodel.NewCIStr("test"), + RefTable: pmodel.NewCIStr("t1"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("b")}, OnDelete: 2, OnUpdate: 1, State: model.StatePublic, @@ -193,27 +194,27 @@ func TestCreateTableWithForeignKeyMetaInfo2(t *testing.T) { tb1ReferredFKs = getTableInfoReferredForeignKeys(t, dom, "test", "t1") require.Equal(t, 2, len(tb1ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test"), - ChildTable: model.NewCIStr("t3"), - ChildFKName: model.NewCIStr("fk_a"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test"), + ChildTable: pmodel.NewCIStr("t3"), + ChildFKName: pmodel.NewCIStr("fk_a"), }, *tb1ReferredFKs[0]) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("t2"), - ChildFKName: model.NewCIStr("fk_b"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("t2"), + ChildFKName: pmodel.NewCIStr("fk_b"), }, *tb1ReferredFKs[1]) tb3ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test", "t3") require.Equal(t, 0, len(tb3ReferredFKs)) require.Equal(t, 2, len(tb3Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_a"), - RefSchema: model.NewCIStr("test"), - RefTable: model.NewCIStr("t1"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("a")}, + Name: pmodel.NewCIStr("fk_a"), + RefSchema: pmodel.NewCIStr("test"), + RefTable: pmodel.NewCIStr("t1"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("a")}, OnDelete: 2, OnUpdate: 1, State: model.StatePublic, @@ -221,11 +222,11 @@ func TestCreateTableWithForeignKeyMetaInfo2(t *testing.T) { }, *tb3Info.ForeignKeys[0]) require.Equal(t, model.FKInfo{ ID: 2, - Name: model.NewCIStr("fk_a2"), - RefSchema: model.NewCIStr("test2"), - RefTable: model.NewCIStr("t2"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("a")}, + Name: pmodel.NewCIStr("fk_a2"), + RefSchema: pmodel.NewCIStr("test2"), + RefTable: pmodel.NewCIStr("t2"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("a")}, State: model.StatePublic, Version: 1, }, *tb3Info.ForeignKeys[1]) @@ -243,21 +244,21 @@ func TestCreateTableWithForeignKeyMetaInfo2(t *testing.T) { tb1ReferredFKs = getTableInfoReferredForeignKeys(t, dom, "test", "t1") require.Equal(t, 1, len(tb1ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test"), - ChildTable: model.NewCIStr("t3"), - ChildFKName: model.NewCIStr("fk_a"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test"), + ChildTable: pmodel.NewCIStr("t3"), + ChildFKName: pmodel.NewCIStr("fk_a"), }, *tb1ReferredFKs[0]) tb3ReferredFKs = getTableInfoReferredForeignKeys(t, dom, "test", "t3") require.Equal(t, 0, len(tb3ReferredFKs)) require.Equal(t, 2, len(tb3Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_a"), - RefSchema: model.NewCIStr("test"), - RefTable: model.NewCIStr("t1"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("a")}, + Name: pmodel.NewCIStr("fk_a"), + RefSchema: pmodel.NewCIStr("test"), + RefTable: pmodel.NewCIStr("t1"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("a")}, OnDelete: 2, OnUpdate: 1, State: model.StatePublic, @@ -265,11 +266,11 @@ func TestCreateTableWithForeignKeyMetaInfo2(t *testing.T) { }, *tb3Info.ForeignKeys[0]) require.Equal(t, model.FKInfo{ ID: 2, - Name: model.NewCIStr("fk_a2"), - RefSchema: model.NewCIStr("test2"), - RefTable: model.NewCIStr("t2"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("a")}, + Name: pmodel.NewCIStr("fk_a2"), + RefSchema: pmodel.NewCIStr("test2"), + RefTable: pmodel.NewCIStr("t2"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("a")}, State: model.StatePublic, Version: 1, }, *tb3Info.ForeignKeys[1]) @@ -365,18 +366,18 @@ func TestRenameTableWithForeignKeyMetaInfo(t *testing.T) { require.Equal(t, 1, len(tblInfo.ForeignKeys)) require.Equal(t, 1, len(tbReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("t2"), - ChildFKName: model.NewCIStr("fk"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("t2"), + ChildFKName: pmodel.NewCIStr("fk"), }, *tbReferredFKs[0]) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk"), - RefSchema: model.NewCIStr("test2"), - RefTable: model.NewCIStr("t2"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("a")}, + Name: pmodel.NewCIStr("fk"), + RefSchema: pmodel.NewCIStr("test2"), + RefTable: pmodel.NewCIStr("t2"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("a")}, State: model.StatePublic, Version: 1, }, *tblInfo.ForeignKeys[0]) @@ -397,21 +398,21 @@ func TestRenameTableWithForeignKeyMetaInfo(t *testing.T) { tb1ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test", "t1") require.Equal(t, 1, len(tb1ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("tt2"), - ChildFKName: model.NewCIStr("fk_b"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("tt2"), + ChildFKName: pmodel.NewCIStr("fk_b"), }, *tb1ReferredFKs[0]) tb2ReferredFKs := getTableInfoReferredForeignKeys(t, dom, "test2", "tt2") require.Equal(t, 0, len(tb2ReferredFKs)) require.Equal(t, 1, len(tb2Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_b"), - RefSchema: model.NewCIStr("test"), - RefTable: model.NewCIStr("t1"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("b")}, + Name: pmodel.NewCIStr("fk_b"), + RefSchema: pmodel.NewCIStr("test"), + RefTable: pmodel.NewCIStr("t1"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("b")}, State: model.StatePublic, Version: 1, }, *tb2Info.ForeignKeys[0]) @@ -429,10 +430,10 @@ func TestRenameTableWithForeignKeyMetaInfo(t *testing.T) { require.Equal(t, model.ActionRenameTable, diff.Type) require.Equal(t, 0, len(diff.AffectedOpts)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("tt2"), - ChildFKName: model.NewCIStr("fk_b"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("tt2"), + ChildFKName: pmodel.NewCIStr("fk_b"), }, *tb1ReferredFKs[0]) tbl2Info := getTableInfo(t, dom, "test2", "tt2") tb2ReferredFKs = getTableInfoReferredForeignKeys(t, dom, "test2", "tt2") @@ -440,11 +441,11 @@ func TestRenameTableWithForeignKeyMetaInfo(t *testing.T) { require.Equal(t, 1, len(tbl2Info.ForeignKeys)) require.Equal(t, model.FKInfo{ ID: 1, - Name: model.NewCIStr("fk_b"), - RefSchema: model.NewCIStr("test3"), - RefTable: model.NewCIStr("tt1"), - RefCols: []model.CIStr{model.NewCIStr("id")}, - Cols: []model.CIStr{model.NewCIStr("b")}, + Name: pmodel.NewCIStr("fk_b"), + RefSchema: pmodel.NewCIStr("test3"), + RefTable: pmodel.NewCIStr("tt1"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("b")}, State: model.StatePublic, Version: 1, }, *tbl2Info.ForeignKeys[0]) @@ -958,7 +959,7 @@ func getTableInfo(t *testing.T, dom *domain.Domain, db, tb string) *model.TableI err := dom.Reload() require.NoError(t, err) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(tb)) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(tb)) require.NoError(t, err) _, exist := is.TableByID(context.Background(), tbl.Meta().ID) require.True(t, exist) @@ -1464,16 +1465,16 @@ func TestRenameTablesWithForeignKey(t *testing.T) { require.Equal(t, 1, len(tt1ReferredFKs)) require.Equal(t, 1, len(tt2ReferredFKs)) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test2"), - ChildTable: model.NewCIStr("tt2"), - ChildFKName: model.NewCIStr("fk"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test2"), + ChildTable: pmodel.NewCIStr("tt2"), + ChildFKName: pmodel.NewCIStr("fk"), }, *tt1ReferredFKs[0]) require.Equal(t, model.ReferredFKInfo{ - Cols: []model.CIStr{model.NewCIStr("id")}, - ChildSchema: model.NewCIStr("test1"), - ChildTable: model.NewCIStr("tt1"), - ChildFKName: model.NewCIStr("fk"), + Cols: []pmodel.CIStr{pmodel.NewCIStr("id")}, + ChildSchema: pmodel.NewCIStr("test1"), + ChildTable: pmodel.NewCIStr("tt1"), + ChildFKName: pmodel.NewCIStr("fk"), }, *tt2ReferredFKs[0]) // check show create table information diff --git a/pkg/ddl/tests/indexmerge/BUILD.bazel b/pkg/ddl/tests/indexmerge/BUILD.bazel index ede375edfed6e..71f1344442fdf 100644 --- a/pkg/ddl/tests/indexmerge/BUILD.bazel +++ b/pkg/ddl/tests/indexmerge/BUILD.bazel @@ -19,7 +19,7 @@ go_test( "//pkg/errno", "//pkg/kv", "//pkg/meta/autoid", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/tablecodec", "//pkg/testkit", "//pkg/testkit/testfailpoint", diff --git a/pkg/ddl/tests/indexmerge/merge_test.go b/pkg/ddl/tests/indexmerge/merge_test.go index 1504c50fb27b1..d8f16032df284 100644 --- a/pkg/ddl/tests/indexmerge/merge_test.go +++ b/pkg/ddl/tests/indexmerge/merge_test.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/pkg/ddl/tests/partition/BUILD.bazel b/pkg/ddl/tests/partition/BUILD.bazel index 714e27cca252a..9eab00ff199f3 100644 --- a/pkg/ddl/tests/partition/BUILD.bazel +++ b/pkg/ddl/tests/partition/BUILD.bazel @@ -17,6 +17,7 @@ go_test( "//pkg/domain", "//pkg/errno", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/ddl/tests/partition/db_partition_test.go b/pkg/ddl/tests/partition/db_partition_test.go index bc189f6e0c742..20646e1da046a 100644 --- a/pkg/ddl/tests/partition/db_partition_test.go +++ b/pkg/ddl/tests/partition/db_partition_test.go @@ -33,8 +33,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/session" @@ -98,11 +99,11 @@ func TestCreateTableWithPartition(t *testing.T) { );`) ctx := tk.Session() is := domain.GetDomain(ctx).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tp")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tp")) require.NoError(t, err) require.NotNil(t, tbl.Meta().Partition) part := tbl.Meta().Partition - require.Equal(t, model.PartitionTypeRange, part.Type) + require.Equal(t, pmodel.PartitionTypeRange, part.Type) require.Equal(t, "`a`", part.Expr) for _, pdef := range part.Definitions { require.Greater(t, pdef.ID, int64(0)) @@ -932,7 +933,7 @@ func TestCreateTableWithListPartition(t *testing.T) { tblInfo := tbl.Meta() require.NotNil(t, tblInfo.Partition) require.True(t, tblInfo.Partition.Enable) - require.Equal(t, model.PartitionTypeList, tblInfo.Partition.Type) + require.Equal(t, pmodel.PartitionTypeList, tblInfo.Partition.Type) } } @@ -1143,7 +1144,7 @@ func TestCreateTableWithListColumnsPartition(t *testing.T) { tblInfo := tbl.Meta() require.NotNil(t, tblInfo.Partition) require.Equal(t, true, tblInfo.Partition.Enable) - require.True(t, tblInfo.Partition.Type == model.PartitionTypeList) + require.True(t, tblInfo.Partition.Type == pmodel.PartitionTypeList) } } @@ -1165,10 +1166,10 @@ func TestAlterTableTruncatePartitionByList(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") require.NotNil(t, tbl.Meta().Partition) part := tbl.Meta().Partition - require.True(t, part.Type == model.PartitionTypeList) + require.True(t, part.Type == pmodel.PartitionTypeList) require.Len(t, part.Definitions, 3) require.Equal(t, [][]string{{"3"}, {"4"}}, part.Definitions[1].InValues) - require.Equal(t, model.NewCIStr("p1"), part.Definitions[1].Name) + require.Equal(t, pmodel.NewCIStr("p1"), part.Definitions[1].Name) require.False(t, part.Definitions[1].ID == oldTbl.Meta().Partition.Definitions[1].ID) sql := "alter table t truncate partition p10;" @@ -1197,10 +1198,10 @@ func TestAlterTableTruncatePartitionByListColumns(t *testing.T) { tbl := external.GetTableByName(t, tk, "test", "t") require.NotNil(t, tbl.Meta().Partition) part := tbl.Meta().Partition - require.True(t, part.Type == model.PartitionTypeList) + require.True(t, part.Type == pmodel.PartitionTypeList) require.Len(t, part.Definitions, 3) require.Equal(t, [][]string{{"3", `'a'`}, {"4", `'b'`}}, part.Definitions[1].InValues) - require.Equal(t, model.NewCIStr("p1"), part.Definitions[1].Name) + require.Equal(t, pmodel.NewCIStr("p1"), part.Definitions[1].Name) require.False(t, part.Definitions[1].ID == oldTbl.Meta().Partition.Definitions[1].ID) sql := "alter table t truncate partition p10;" @@ -2258,7 +2259,7 @@ func TestTruncatePartitionAndDropTable(t *testing.T) { result.Check(testkit.Rows("10")) ctx := tk.Session() is := domain.GetDomain(ctx).InfoSchema() - oldTblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + oldTblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) // Only one partition id test is taken here. tk.MustExec("truncate table t3;") @@ -2293,7 +2294,7 @@ func TestTruncatePartitionAndDropTable(t *testing.T) { result = tk.MustQuery("select count(*) from t4; ") result.Check(testkit.Rows("10")) is = domain.GetDomain(ctx).InfoSchema() - oldTblInfo, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t4")) + oldTblInfo, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t4")) require.NoError(t, err) // Only one partition id test is taken here. oldPID = oldTblInfo.Meta().Partition.Definitions[1].ID @@ -2317,13 +2318,13 @@ func TestTruncatePartitionAndDropTable(t *testing.T) { partition p5 values less than (2015) );`) is = domain.GetDomain(ctx).InfoSchema() - oldTblInfo, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t5")) + oldTblInfo, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t5")) require.NoError(t, err) oldPID = oldTblInfo.Meta().Partition.Definitions[0].ID tk.MustExec("truncate table t5;") is = domain.GetDomain(ctx).InfoSchema() - newTblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t5")) + newTblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t5")) require.NoError(t, err) newPID := newTblInfo.Meta().Partition.Definitions[0].ID require.True(t, oldPID != newPID) @@ -2339,14 +2340,14 @@ func TestTruncatePartitionAndDropTable(t *testing.T) { partition by hash( month(signed) ) partitions 12;`) is = domain.GetDomain(ctx).InfoSchema() - oldTblInfo, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("clients")) + oldTblInfo, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("clients")) require.NoError(t, err) oldDefs := oldTblInfo.Meta().Partition.Definitions // Test truncate `hash partitioned table` reassigns new partitionIDs. tk.MustExec("truncate table clients;") is = domain.GetDomain(ctx).InfoSchema() - newTblInfo, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("clients")) + newTblInfo, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("clients")) require.NoError(t, err) newDefs := newTblInfo.Meta().Partition.Definitions for i := 0; i < len(oldDefs); i++ { @@ -2500,7 +2501,7 @@ func testPartitionAddIndex(tk *testkit.TestKit, t *testing.T, key string) { tk.MustExec("alter table partition_add_idx add index idx2 (id, hired)") ctx := tk.Session() is := domain.GetDomain(ctx).InfoSchema() - tt, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("partition_add_idx")) + tt, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("partition_add_idx")) require.NoError(t, err) var idx1 table.Index for _, idx := range tt.Indices() { @@ -3263,7 +3264,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { first partition less than ('2023-01-01') last partition less than ('2023-01-03');`) ctx := tk.Session() - tbl, err := domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) pd := tbl.Meta().Partition.Definitions require.Equal(t, 3, len(pd)) @@ -3272,7 +3273,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { require.Equal(t, "'2023-01-03 00:00:00'", pd[2].LessThan[0]) tk.MustExec("alter table t last partition less than ('2024-01-04')") tk.MustExec("alter table t last partition less than ('2025-01-01 00:00:00')") - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 732, len(pd)) @@ -3288,7 +3289,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { interval (2 day) first partition less than ('2023-01-01') last partition less than ('2023-01-05');`) - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 3, len(pd)) @@ -3297,7 +3298,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { require.Equal(t, "'2023-01-05 00:00:00'", pd[2].LessThan[0]) tk.MustExec("alter table t2 last partition less than ('2023-01-09')") tk.MustExec("alter table t2 last partition less than ('2023-01-11 00:00:00')") - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 6, len(pd)) @@ -3314,7 +3315,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { interval (2 day) first partition less than ('2023-01-01 12:01:02') last partition less than ('2023-01-05 12:01:02');`) - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 3, len(pd)) @@ -3322,7 +3323,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { require.Equal(t, "'2023-01-03 12:01:02'", pd[1].LessThan[0]) require.Equal(t, "'2023-01-05 12:01:02'", pd[2].LessThan[0]) tk.MustExec("alter table t3 last partition less than ('2023-01-09 12:01:02')") - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 5, len(pd)) @@ -3338,7 +3339,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { interval (48 hour) first partition less than ('2023-01-01') last partition less than ('2023-01-05');`) - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t4")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t4")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 3, len(pd)) @@ -3346,7 +3347,7 @@ func TestAlterLastIntervalPartition(t *testing.T) { require.Equal(t, "'2023-01-03 00:00:00'", pd[1].LessThan[0]) require.Equal(t, "'2023-01-05 00:00:00'", pd[2].LessThan[0]) tk.MustExec("alter table t4 last partition less than ('2023-01-09 00:00:00')") - tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t4")) + tbl, err = domain.GetDomain(ctx).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t4")) require.NoError(t, err) pd = tbl.Meta().Partition.Definitions require.Equal(t, 5, len(pd)) diff --git a/pkg/ddl/tests/serial/BUILD.bazel b/pkg/ddl/tests/serial/BUILD.bazel index e8c736ec88ebf..0566b1e160acb 100644 --- a/pkg/ddl/tests/serial/BUILD.bazel +++ b/pkg/ddl/tests/serial/BUILD.bazel @@ -21,6 +21,7 @@ go_test( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/session", diff --git a/pkg/ddl/tests/serial/serial_test.go b/pkg/ddl/tests/serial/serial_test.go index a548e1bc74bbc..2004c65fe6823 100644 --- a/pkg/ddl/tests/serial/serial_test.go +++ b/pkg/ddl/tests/serial/serial_test.go @@ -34,7 +34,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -119,7 +120,7 @@ func TestCreateTableWithLike(t *testing.T) { tk.MustQuery("select * from t1").Check(testkit.Rows("1 11")) tk.MustQuery("select * from t2").Check(testkit.Rows("1 12")) is := domain.GetDomain(tk.Session()).InfoSchema() - tbl1, err := is.TableByName(context.Background(), model.NewCIStr("ctwl_db"), model.NewCIStr("t1")) + tbl1, err := is.TableByName(context.Background(), pmodel.NewCIStr("ctwl_db"), pmodel.NewCIStr("t1")) require.NoError(t, err) tbl1Info := tbl1.Meta() require.Nil(t, tbl1Info.ForeignKeys) @@ -127,7 +128,7 @@ func TestCreateTableWithLike(t *testing.T) { col := tbl1Info.Columns[0] hasNotNull := mysql.HasNotNullFlag(col.GetFlag()) require.True(t, hasNotNull) - tbl2, err := is.TableByName(context.Background(), model.NewCIStr("ctwl_db"), model.NewCIStr("t2")) + tbl2, err := is.TableByName(context.Background(), pmodel.NewCIStr("ctwl_db"), pmodel.NewCIStr("t2")) require.NoError(t, err) tbl2Info := tbl2.Meta() require.Nil(t, tbl2Info.ForeignKeys) @@ -141,7 +142,7 @@ func TestCreateTableWithLike(t *testing.T) { tk.MustExec("insert into t1 set c2=11") tk.MustQuery("select * from t1").Check(testkit.Rows("1 11")) is = domain.GetDomain(tk.Session()).InfoSchema() - tbl1, err = is.TableByName(context.Background(), model.NewCIStr("ctwl_db1"), model.NewCIStr("t1")) + tbl1, err = is.TableByName(context.Background(), pmodel.NewCIStr("ctwl_db1"), pmodel.NewCIStr("t1")) require.NoError(t, err) require.Nil(t, tbl1.Meta().ForeignKeys) @@ -277,7 +278,7 @@ func TestCreateTableWithLikeAtTemporaryMode(t *testing.T) { tk.MustExec(`create global temporary table test_gv_ddl_temp like test_gv_ddl on commit delete rows;`) defer tk.MustExec("drop table if exists test_gv_ddl_temp, test_gv_ddl") is := sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() - table, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_gv_ddl")) + table, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_gv_ddl")) require.NoError(t, err) testCases := []struct { generatedExprString string @@ -306,7 +307,7 @@ func TestCreateTableWithLikeAtTemporaryMode(t *testing.T) { defer tk.MustExec("drop table if exists test_foreign_key, t1") tk.MustExec("create global temporary table test_foreign_key_temp like test_foreign_key on commit delete rows") is = sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() - table, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("test_foreign_key_temp")) + table, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("test_foreign_key_temp")) require.NoError(t, err) tableInfo := table.Meta() require.Equal(t, 0, len(tableInfo.ForeignKeys)) @@ -390,7 +391,7 @@ func TestCreateTableWithLikeAtTemporaryMode(t *testing.T) { tk.MustExec("create table foreign_key_table2 (c int,d int,foreign key (d) references foreign_key_table1 (b))") tk.MustExec("create temporary table foreign_key_tmp like foreign_key_table2") is = sessiontxn.GetTxnManager(tk.Session()).GetTxnInfoSchema() - table, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("foreign_key_tmp")) + table, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("foreign_key_tmp")) require.NoError(t, err) tableInfo = table.Meta() require.Equal(t, 0, len(tableInfo.ForeignKeys)) @@ -1240,7 +1241,7 @@ func TestGetReverseKey(t *testing.T) { // Get table ID for split. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("db_get"), model.NewCIStr("test_get")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("db_get"), pmodel.NewCIStr("test_get")) require.NoError(t, err) // Split the table. tableStart := tablecodec.GenTableRecordPrefix(tbl.Meta().ID) diff --git a/pkg/ddl/tests/tiflash/BUILD.bazel b/pkg/ddl/tests/tiflash/BUILD.bazel index e3d7289f33f94..5f675fa46b68b 100644 --- a/pkg/ddl/tests/tiflash/BUILD.bazel +++ b/pkg/ddl/tests/tiflash/BUILD.bazel @@ -18,6 +18,7 @@ go_test( "//pkg/domain", "//pkg/domain/infosync", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session", "//pkg/sessionctx", diff --git a/pkg/ddl/tests/tiflash/ddl_tiflash_test.go b/pkg/ddl/tests/tiflash/ddl_tiflash_test.go index a5578277cf6c2..decd9912450cf 100644 --- a/pkg/ddl/tests/tiflash/ddl_tiflash_test.go +++ b/pkg/ddl/tests/tiflash/ddl_tiflash_test.go @@ -35,7 +35,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/gcworker" @@ -147,7 +148,7 @@ func (s *tiflashContext) CheckFlashback(tk *testkit.TestKit, t *testing.T) { time.Sleep(ddl.PollTiFlashInterval * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) if tb.Meta().Partition != nil { @@ -281,7 +282,7 @@ func TestTiFlashReplicaPartitionTableNormal(t *testing.T) { tk.MustExec("drop table if exists ddltiflash") tk.MustExec("create table ddltiflash(z int) PARTITION BY RANGE(z) (PARTITION p0 VALUES LESS THAN (10),PARTITION p1 VALUES LESS THAN (20), PARTITION p2 VALUES LESS THAN (30))") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) replica := tb.Meta().TiFlashReplica require.Nil(t, replica) @@ -294,7 +295,7 @@ func TestTiFlashReplicaPartitionTableNormal(t *testing.T) { // Should get schema again CheckTableAvailable(s.dom, t, 1, []string{}) - tb2, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb2, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb2) pi := tb2.Meta().GetPartitionInfo() @@ -334,7 +335,7 @@ func TestTiFlashReplicaPartitionTableBlock(t *testing.T) { }() tk.MustExec(fmt.Sprintf("ALTER TABLE ddltiflash ADD PARTITION (PARTITION pn VALUES LESS THAN (%v))", lessThan)) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) pi := tb.Meta().GetPartitionInfo() require.NotNil(t, pi) @@ -372,14 +373,14 @@ func TestTiFlashReplicaAvailable(t *testing.T) { CheckTableAvailableWithTableName(s.dom, t, 1, []string{}, "test", "ddltiflash2") s.CheckFlashback(tk, t) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) r, ok := s.tiflash.GetPlacementRule(infosync.MakeRuleID(tb.Meta().ID)) require.NotNil(t, r) require.True(t, ok) tk.MustExec("alter table ddltiflash set tiflash replica 0") time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable) - tb, err = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) replica := tb.Meta().TiFlashReplica require.Nil(t, replica) @@ -481,7 +482,7 @@ func TestTiFlashFlashbackCluster(t *testing.T) { } func CheckTableAvailableWithTableName(dom *domain.Domain, t *testing.T, count uint64, labels []string, db string, table string) { - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(table)) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(table)) require.NoError(t, err) replica := tb.Meta().TiFlashReplica require.NotNil(t, replica) @@ -495,7 +496,7 @@ func CheckTableAvailable(dom *domain.Domain, t *testing.T, count uint64, labels } func CheckTableNoReplica(dom *domain.Domain, t *testing.T, db string, table string) { - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(table)) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(table)) require.NoError(t, err) replica := tb.Meta().TiFlashReplica require.Nil(t, replica) @@ -559,7 +560,7 @@ func TestSetPlacementRuleNormal(t *testing.T) { tk.MustExec("drop table if exists ddltiflash") tk.MustExec("create table ddltiflash(z int)") tk.MustExec("alter table ddltiflash set tiflash replica 1 location labels 'a','b'") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) expectRule := infosync.MakeNewRule(tb.Meta().ID, 1, []string{"a", "b"}) res := s.tiflash.CheckPlacementRule(expectRule) @@ -614,7 +615,7 @@ func TestSetPlacementRuleWithGCWorker(t *testing.T) { tk.MustExec("drop table if exists ddltiflash_gc") tk.MustExec("create table ddltiflash_gc(z int)") tk.MustExec("alter table ddltiflash_gc set tiflash replica 1 location labels 'a','b'") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash_gc")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash_gc")) require.NoError(t, err) expectRule := infosync.MakeNewRule(tb.Meta().ID, 1, []string{"a", "b"}) @@ -645,7 +646,7 @@ func TestSetPlacementRuleFail(t *testing.T) { s.tiflash.PdSwitch(true) }() tk.MustExec("alter table ddltiflash set tiflash replica 1") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) expectRule := infosync.MakeNewRule(tb.Meta().ID, 1, []string{}) @@ -753,7 +754,7 @@ func TestTiFlashBackoff(t *testing.T) { // 1, 1.5, 2.25, 3.375, 5.5625 // (1), 1, 1, 2, 3, 5 time.Sleep(ddl.PollTiFlashInterval * 5) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) require.False(t, tb.Meta().TiFlashReplica.Available) @@ -762,7 +763,7 @@ func TestTiFlashBackoff(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/PollTiFlashReplicaStatusReplaceCurAvailableValue")) time.Sleep(ddl.PollTiFlashInterval * 3) - tb, err = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) require.True(t, tb.Meta().TiFlashReplica.Available) @@ -870,7 +871,7 @@ func TestTiFlashBatchRateLimiter(t *testing.T) { check := func(expected int, total int) { cnt := 0 for i := 0; i < total; i++ { - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_ddl_limit"), model.NewCIStr(fmt.Sprintf("t%v", i))) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_ddl_limit"), pmodel.NewCIStr(fmt.Sprintf("t%v", i))) require.NoError(t, err) if tb.Meta().TiFlashReplica != nil { cnt++ @@ -972,7 +973,7 @@ func TestTiFlashProgress(t *testing.T) { tk.MustExec("create database tiflash_d") tk.MustExec("create table tiflash_d.t(z int)") tk.MustExec("alter table tiflash_d.t set tiflash replica 1") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_d"), model.NewCIStr("t")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_d"), pmodel.NewCIStr("t")) require.NoError(t, err) require.NotNil(t, tb) mustExist := func(tid int64) { @@ -992,13 +993,13 @@ func TestTiFlashProgress(t *testing.T) { tk.MustExec("truncate table tiflash_d.t") mustAbsent(tb.Meta().ID) - tb, _ = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_d"), model.NewCIStr("t")) + tb, _ = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_d"), pmodel.NewCIStr("t")) infosync.UpdateTiFlashProgressCache(tb.Meta().ID, 5.0) tk.MustExec("alter table tiflash_d.t set tiflash replica 0") mustAbsent(tb.Meta().ID) tk.MustExec("alter table tiflash_d.t set tiflash replica 1") - tb, _ = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_d"), model.NewCIStr("t")) + tb, _ = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_d"), pmodel.NewCIStr("t")) infosync.UpdateTiFlashProgressCache(tb.Meta().ID, 5.0) tk.MustExec("drop table tiflash_d.t") mustAbsent(tb.Meta().ID) @@ -1015,7 +1016,7 @@ func TestTiFlashProgressForPartitionTable(t *testing.T) { tk.MustExec("create database tiflash_d") tk.MustExec("create table tiflash_d.t(z int) PARTITION BY RANGE(z) (PARTITION p0 VALUES LESS THAN (10))") tk.MustExec("alter table tiflash_d.t set tiflash replica 1") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_d"), model.NewCIStr("t")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_d"), pmodel.NewCIStr("t")) require.NoError(t, err) require.NotNil(t, tb) mustExist := func(tid int64) { @@ -1035,13 +1036,13 @@ func TestTiFlashProgressForPartitionTable(t *testing.T) { tk.MustExec("truncate table tiflash_d.t") mustAbsent(tb.Meta().Partition.Definitions[0].ID) - tb, _ = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_d"), model.NewCIStr("t")) + tb, _ = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_d"), pmodel.NewCIStr("t")) infosync.UpdateTiFlashProgressCache(tb.Meta().Partition.Definitions[0].ID, 5.0) tk.MustExec("alter table tiflash_d.t set tiflash replica 0") mustAbsent(tb.Meta().Partition.Definitions[0].ID) tk.MustExec("alter table tiflash_d.t set tiflash replica 1") - tb, _ = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("tiflash_d"), model.NewCIStr("t")) + tb, _ = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("tiflash_d"), pmodel.NewCIStr("t")) infosync.UpdateTiFlashProgressCache(tb.Meta().Partition.Definitions[0].ID, 5.0) tk.MustExec("drop table tiflash_d.t") mustAbsent(tb.Meta().Partition.Definitions[0].ID) @@ -1080,7 +1081,7 @@ func TestTiFlashFailureProgressAfterAvailable(t *testing.T) { time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) // after available, progress should can be updated. @@ -1131,7 +1132,7 @@ func TestTiFlashProgressAfterAvailable(t *testing.T) { time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) // after available, progress should can be updated. @@ -1160,7 +1161,7 @@ func TestTiFlashProgressAfterAvailableForPartitionTable(t *testing.T) { time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) // after available, progress should can be updated. @@ -1189,7 +1190,7 @@ func TestTiFlashProgressCache(t *testing.T) { time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) infosync.UpdateTiFlashProgressCache(tb.Meta().ID, 0) @@ -1224,7 +1225,7 @@ func TestTiFlashProgressAvailableList(t *testing.T) { // After available, reset TiFlash sync status. for i := 0; i < tableCount; i++ { var err error - tbls[i], err = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr(tableNames[i])) + tbls[i], err = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr(tableNames[i])) require.NoError(t, err) require.NotNil(t, tbls[i]) s.tiflash.ResetSyncStatus(int(tbls[i].Meta().ID), false) @@ -1282,7 +1283,7 @@ func TestTiFlashAvailableAfterResetReplica(t *testing.T) { CheckTableAvailable(s.dom, t, 2, []string{}) tk.MustExec("alter table ddltiflash set tiflash replica 0") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) require.Nil(t, tb.Meta().TiFlashReplica) @@ -1297,7 +1298,7 @@ func TestTiFlashPartitionNotAvailable(t *testing.T) { tk.MustExec("drop table if exists ddltiflash") tk.MustExec("create table ddltiflash(z int) PARTITION BY RANGE(z) (PARTITION p0 VALUES LESS THAN (10))") - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) @@ -1305,7 +1306,7 @@ func TestTiFlashPartitionNotAvailable(t *testing.T) { s.tiflash.ResetSyncStatus(int(tb.Meta().Partition.Definitions[0].ID), false) time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) - tb, err = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) replica := tb.Meta().TiFlashReplica @@ -1315,7 +1316,7 @@ func TestTiFlashPartitionNotAvailable(t *testing.T) { s.tiflash.ResetSyncStatus(int(tb.Meta().Partition.Definitions[0].ID), true) time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) - tb, err = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) replica = tb.Meta().TiFlashReplica @@ -1343,7 +1344,7 @@ func TestTiFlashAvailableAfterAddPartition(t *testing.T) { time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err := s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err := s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) require.NotNil(t, tb) @@ -1359,7 +1360,7 @@ func TestTiFlashAvailableAfterAddPartition(t *testing.T) { tk.MustExec("ALTER TABLE ddltiflash ADD PARTITION (PARTITION pn VALUES LESS THAN (20))") time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3) CheckTableAvailable(s.dom, t, 1, []string{}) - tb, err = s.dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ddltiflash")) + tb, err = s.dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ddltiflash")) require.NoError(t, err) pi := tb.Meta().GetPartitionInfo() require.NotNil(t, pi) @@ -1429,7 +1430,7 @@ func TestTiFlashReorgPartition(t *testing.T) { // Add the tiflash stores as peers for the new regions, to fullfil the check // in checkPartitionReplica pdCli := s.store.(tikv.Storage).GetRegionCache().PDClient() - var dummy []model.CIStr + var dummy []pmodel.CIStr partInfo := &model.PartitionInfo{} _ = job.DecodeArgs(&dummy, &partInfo) ctx := context.Background() diff --git a/pkg/ddl/testutil/BUILD.bazel b/pkg/ddl/testutil/BUILD.bazel index cee0cfee17fcc..0aa782f2e1a93 100644 --- a/pkg/ddl/testutil/BUILD.bazel +++ b/pkg/ddl/testutil/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//pkg/disttask/operator", "//pkg/domain", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session", "//pkg/session/types", diff --git a/pkg/ddl/testutil/testutil.go b/pkg/ddl/testutil/testutil.go index 584543cec19bf..9d516c25eff99 100644 --- a/pkg/ddl/testutil/testutil.go +++ b/pkg/ddl/testutil/testutil.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/logutil" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" sessiontypes "github.com/pingcap/tidb/pkg/session/types" "github.com/pingcap/tidb/pkg/sessiontxn" @@ -71,7 +72,7 @@ func ExecMultiSQLInGoroutine(s kv.Storage, dbName string, multiSQL []string, don // ExtractAllTableHandles extracts all handles of a given table. func ExtractAllTableHandles(se sessiontypes.Session, dbName, tbName string) ([]int64, error) { dom := domain.GetDomain(se) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tbName)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tbName)) if err != nil { return nil, err } @@ -91,7 +92,7 @@ func ExtractAllTableHandles(se sessiontypes.Session, dbName, tbName string) ([]i // FindIdxInfo is to get IndexInfo by index name. func FindIdxInfo(dom *domain.Domain, dbName, tbName, idxName string) *model.IndexInfo { - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tbName)) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tbName)) if err != nil { logutil.DDLLogger().Warn("cannot find table", zap.String("dbName", dbName), zap.String("tbName", tbName)) return nil diff --git a/pkg/ddl/tiflash_replica_test.go b/pkg/ddl/tiflash_replica_test.go index bb122027fdf05..35b57c031a463 100644 --- a/pkg/ddl/tiflash_replica_test.go +++ b/pkg/ddl/tiflash_replica_test.go @@ -30,8 +30,9 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/tablecodec" @@ -187,7 +188,7 @@ func TestInfoSchemaForTiFlashReplica(t *testing.T) { tk.MustExec("alter table t set tiflash replica 2 location labels 'a','b';") tk.MustQuery("select TABLE_SCHEMA,TABLE_NAME,REPLICA_COUNT,LOCATION_LABELS,AVAILABLE,PROGRESS from information_schema.tiflash_replica").Check(testkit.Rows("test t 2 a,b 0 0")) dom := domain.GetDomain(tk.Session()) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica.Available = true updateTableMeta(t, store, tbl.Meta().DBID, tbl.Meta()) @@ -240,7 +241,7 @@ func TestSetTableFlashReplicaForSystemTable(t *testing.T) { for _, one := range sysTables { _, err := tk.Exec(fmt.Sprintf("alter table `%s` set tiflash replica 1", one)) if db == "MySQL" || db == "SYS" { - tbl, err1 := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(one)) + tbl, err1 := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(one)) require.NoError(t, err1) if tbl.Meta().View != nil { require.ErrorIs(t, err, dbterror.ErrWrongObject) @@ -411,7 +412,7 @@ func TestTruncateTable2(t *testing.T) { tk.MustExec("create table truncate_table (c1 int, c2 int)") tk.MustExec("insert truncate_table values (1, 1), (2, 2)") is := domain.GetDomain(tk.Session()).InfoSchema() - oldTblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("truncate_table")) + oldTblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("truncate_table")) require.NoError(t, err) oldTblID := oldTblInfo.Meta().ID @@ -421,7 +422,7 @@ func TestTruncateTable2(t *testing.T) { tk.MustQuery("select * from truncate_table").Check(testkit.Rows("3 3", "4 4")) is = domain.GetDomain(tk.Session()).InfoSchema() - newTblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("truncate_table")) + newTblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("truncate_table")) require.NoError(t, err) require.Greater(t, newTblInfo.Meta().ID, oldTblID) diff --git a/pkg/ddl/ttl.go b/pkg/ddl/ttl.go index 1a616448b5ee6..7bb18c33e66d1 100644 --- a/pkg/ddl/ttl.go +++ b/pkg/ddl/ttl.go @@ -20,9 +20,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" @@ -98,7 +99,7 @@ func onTTLInfoChange(jobCtx *jobContext, t *meta.Meta, job *model.Job) (ver int6 return ver, nil } -func checkTTLInfoValid(ctx sessionctx.Context, schema model.CIStr, tblInfo *model.TableInfo) error { +func checkTTLInfoValid(ctx sessionctx.Context, schema pmodel.CIStr, tblInfo *model.TableInfo) error { if err := checkTTLIntervalExpr(tblInfo.TTLInfo); err != nil { return err } @@ -129,7 +130,7 @@ func checkTTLInfoColumnType(tblInfo *model.TableInfo) error { // checkTTLTableSuitable returns whether this table is suitable to be a TTL table // A temporary table or a parent table referenced by a foreign key cannot be TTL table -func checkTTLTableSuitable(ctx sessionctx.Context, schema model.CIStr, tblInfo *model.TableInfo) error { +func checkTTLTableSuitable(ctx sessionctx.Context, schema pmodel.CIStr, tblInfo *model.TableInfo) error { if tblInfo.TempTableType != model.TempTableNone { return dbterror.ErrTempTableNotAllowedWithTTL } diff --git a/pkg/ddl/ttl_test.go b/pkg/ddl/ttl_test.go index 7e56f0199009a..4d1593c0020cf 100644 --- a/pkg/ddl/ttl_test.go +++ b/pkg/ddl/ttl_test.go @@ -17,8 +17,9 @@ package ddl import ( "testing" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/assert" ) @@ -45,13 +46,13 @@ func Test_getTTLInfoInOptions(t *testing.T) { []*ast.TableOption{ { Tp: ast.TableOptionTTL, - ColumnName: &ast.ColumnName{Name: model.NewCIStr("test_column")}, + ColumnName: &ast.ColumnName{Name: pmodel.NewCIStr("test_column")}, Value: ast.NewValueExpr(5, "", ""), TimeUnitValue: &ast.TimeUnitExpr{Unit: ast.TimeUnitYear}, }, }, &model.TTLInfo{ - ColumnName: model.NewCIStr("test_column"), + ColumnName: pmodel.NewCIStr("test_column"), IntervalExprStr: "5", IntervalTimeUnit: int(ast.TimeUnitYear), Enable: true, @@ -69,13 +70,13 @@ func Test_getTTLInfoInOptions(t *testing.T) { }, { Tp: ast.TableOptionTTL, - ColumnName: &ast.ColumnName{Name: model.NewCIStr("test_column")}, + ColumnName: &ast.ColumnName{Name: pmodel.NewCIStr("test_column")}, Value: ast.NewValueExpr(5, "", ""), TimeUnitValue: &ast.TimeUnitExpr{Unit: ast.TimeUnitYear}, }, }, &model.TTLInfo{ - ColumnName: model.NewCIStr("test_column"), + ColumnName: pmodel.NewCIStr("test_column"), IntervalExprStr: "5", IntervalTimeUnit: int(ast.TimeUnitYear), Enable: false, @@ -93,7 +94,7 @@ func Test_getTTLInfoInOptions(t *testing.T) { }, { Tp: ast.TableOptionTTL, - ColumnName: &ast.ColumnName{Name: model.NewCIStr("test_column")}, + ColumnName: &ast.ColumnName{Name: pmodel.NewCIStr("test_column")}, Value: ast.NewValueExpr(5, "", ""), TimeUnitValue: &ast.TimeUnitExpr{Unit: ast.TimeUnitYear}, }, @@ -103,7 +104,7 @@ func Test_getTTLInfoInOptions(t *testing.T) { }, }, &model.TTLInfo{ - ColumnName: model.NewCIStr("test_column"), + ColumnName: pmodel.NewCIStr("test_column"), IntervalExprStr: "5", IntervalTimeUnit: int(ast.TimeUnitYear), Enable: true, @@ -117,7 +118,7 @@ func Test_getTTLInfoInOptions(t *testing.T) { []*ast.TableOption{ { Tp: ast.TableOptionTTL, - ColumnName: &ast.ColumnName{Name: model.NewCIStr("test_column")}, + ColumnName: &ast.ColumnName{Name: pmodel.NewCIStr("test_column")}, Value: ast.NewValueExpr(5, "", ""), TimeUnitValue: &ast.TimeUnitExpr{Unit: ast.TimeUnitYear}, }, @@ -127,7 +128,7 @@ func Test_getTTLInfoInOptions(t *testing.T) { }, }, &model.TTLInfo{ - ColumnName: model.NewCIStr("test_column"), + ColumnName: pmodel.NewCIStr("test_column"), IntervalExprStr: "5", IntervalTimeUnit: int(ast.TimeUnitYear), Enable: true, diff --git a/pkg/ddl/util/BUILD.bazel b/pkg/ddl/util/BUILD.bazel index 115d40aa55774..2d49cf8e28ff0 100644 --- a/pkg/ddl/util/BUILD.bazel +++ b/pkg/ddl/util/BUILD.bazel @@ -15,8 +15,8 @@ go_library( "//pkg/ddl/logutil", "//pkg/infoschema/context", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/terror", "//pkg/sessionctx", "//pkg/sessionctx/variable", diff --git a/pkg/ddl/util/dead_table_lock_checker.go b/pkg/ddl/util/dead_table_lock_checker.go index 24ea730d3a014..b319140cfdb23 100644 --- a/pkg/ddl/util/dead_table_lock_checker.go +++ b/pkg/ddl/util/dead_table_lock_checker.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl/logutil" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" ) diff --git a/pkg/ddl/util/schema_change_notifier.go b/pkg/ddl/util/schema_change_notifier.go index 714ec0fccac59..9509c94b870ae 100644 --- a/pkg/ddl/util/schema_change_notifier.go +++ b/pkg/ddl/util/schema_change_notifier.go @@ -14,7 +14,7 @@ package util -import "github.com/pingcap/tidb/pkg/parser/model" +import "github.com/pingcap/tidb/pkg/meta/model" // SchemaChangeEvent stands for a schema change event. DDL will generate one event or multiple events (only for multi-schema change DDL). // The caller should check the Type field of SchemaChange and call the corresponding getter function to retrieve the needed information. diff --git a/pkg/ddl/util/util.go b/pkg/ddl/util/util.go index 6f088e0646854..0d9ecc3520826 100644 --- a/pkg/ddl/util/util.go +++ b/pkg/ddl/util/util.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl/logutil" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/distsql/BUILD.bazel b/pkg/distsql/BUILD.bazel index 8ea4d56d2b0c7..cd8b4ea89ed78 100644 --- a/pkg/distsql/BUILD.bazel +++ b/pkg/distsql/BUILD.bazel @@ -18,8 +18,8 @@ go_library( "//pkg/expression", "//pkg/infoschema/context", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/util", @@ -71,8 +71,8 @@ go_test( "//pkg/distsql/context", "//pkg/errctx", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/resourcegroup", "//pkg/sessionctx", diff --git a/pkg/distsql/request_builder.go b/pkg/distsql/request_builder.go index 9f0b8b24a2a18..9e6bfe16af4b0 100644 --- a/pkg/distsql/request_builder.go +++ b/pkg/distsql/request_builder.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/errctx" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/distsql/request_builder_test.go b/pkg/distsql/request_builder_test.go index 05b5672fff475..7641f7269d18f 100644 --- a/pkg/distsql/request_builder_test.go +++ b/pkg/distsql/request_builder_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/resourcegroup" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/disttask/importinto/BUILD.bazel b/pkg/disttask/importinto/BUILD.bazel index 70087e97adcaf..dfa03e048671b 100644 --- a/pkg/disttask/importinto/BUILD.bazel +++ b/pkg/disttask/importinto/BUILD.bazel @@ -44,9 +44,9 @@ go_library( "//pkg/lightning/mydump", "//pkg/lightning/verification", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/resourcemanager/pool/workerpool", "//pkg/resourcemanager/util", @@ -108,6 +108,7 @@ go_test( "//pkg/lightning/config", "//pkg/lightning/mydump", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/disttask/importinto/encode_and_sort_operator.go b/pkg/disttask/importinto/encode_and_sort_operator.go index 38b743392db43..de6f54b8cc2b0 100644 --- a/pkg/disttask/importinto/encode_and_sort_operator.go +++ b/pkg/disttask/importinto/encode_and_sort_operator.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/disttask/operator" "github.com/pingcap/tidb/pkg/executor/importer" "github.com/pingcap/tidb/pkg/lightning/backend/external" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/resourcemanager/pool/workerpool" "github.com/pingcap/tidb/pkg/resourcemanager/util" tidbutil "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/disttask/importinto/encode_and_sort_operator_test.go b/pkg/disttask/importinto/encode_and_sort_operator_test.go index f5d41d161e0b2..7b63561d7530a 100644 --- a/pkg/disttask/importinto/encode_and_sort_operator_test.go +++ b/pkg/disttask/importinto/encode_and_sort_operator_test.go @@ -31,9 +31,9 @@ import ( "github.com/pingcap/tidb/pkg/disttask/operator" "github.com/pingcap/tidb/pkg/executor/importer" "github.com/pingcap/tidb/pkg/lightning/backend" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" utilmock "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" diff --git a/pkg/disttask/importinto/planner_test.go b/pkg/disttask/importinto/planner_test.go index e329ef9212240..9672e30d70421 100644 --- a/pkg/disttask/importinto/planner_test.go +++ b/pkg/disttask/importinto/planner_test.go @@ -27,7 +27,8 @@ import ( "github.com/pingcap/tidb/pkg/executor/importer" "github.com/pingcap/tidb/pkg/lightning/backend/external" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -53,7 +54,7 @@ func TestToPhysicalPlan(t *testing.T) { Plan: importer.Plan{ DBName: "db", TableInfo: &model.TableInfo{ - Name: model.NewCIStr("tb"), + Name: pmodel.NewCIStr("tb"), }, }, Stmt: `IMPORT INTO db.tb FROM 'gs://test-load/*.csv?endpoint=xxx'`, diff --git a/pkg/disttask/importinto/scheduler_testkit_test.go b/pkg/disttask/importinto/scheduler_testkit_test.go index dc7de3662f7b3..dff9de8d6957f 100644 --- a/pkg/disttask/importinto/scheduler_testkit_test.go +++ b/pkg/disttask/importinto/scheduler_testkit_test.go @@ -29,7 +29,8 @@ import ( "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/executor/importer" "github.com/pingcap/tidb/pkg/lightning/backend/external" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/stretchr/testify/require" @@ -62,7 +63,7 @@ func TestSchedulerExtLocalSort(t *testing.T) { Plan: importer.Plan{ DBName: "test", TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), }, DisableTiKVImportMode: true, }, @@ -199,7 +200,7 @@ func TestSchedulerExtGlobalSort(t *testing.T) { Format: "csv", DBName: "test", TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), State: model.StatePublic, }, DisableTiKVImportMode: true, diff --git a/pkg/domain/BUILD.bazel b/pkg/domain/BUILD.bazel index a2a9ecc8950ac..e624d00fa158d 100644 --- a/pkg/domain/BUILD.bazel +++ b/pkg/domain/BUILD.bazel @@ -45,6 +45,7 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/owner", "//pkg/parser", @@ -143,6 +144,7 @@ go_test( "//pkg/infoschema", "//pkg/keyspace", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/ast", "//pkg/parser/auth", diff --git a/pkg/domain/domain.go b/pkg/domain/domain.go index 73752d559960b..4007f163942d6 100644 --- a/pkg/domain/domain.go +++ b/pkg/domain/domain.go @@ -54,11 +54,11 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" metrics2 "github.com/pingcap/tidb/pkg/planner/core/metrics" diff --git a/pkg/domain/historical_stats.go b/pkg/domain/historical_stats.go index e865cfc0f0048..22569fbc7d1d8 100644 --- a/pkg/domain/historical_stats.go +++ b/pkg/domain/historical_stats.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/failpoint" domain_metrics "github.com/pingcap/tidb/pkg/domain/metrics" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle" "github.com/pingcap/tidb/pkg/util/logutil" diff --git a/pkg/domain/infosync/BUILD.bazel b/pkg/domain/infosync/BUILD.bazel index 8cd43a9dd6567..6d70e0ff9d5a6 100644 --- a/pkg/domain/infosync/BUILD.bazel +++ b/pkg/domain/infosync/BUILD.bazel @@ -22,8 +22,8 @@ go_library( "//pkg/ddl/util", "//pkg/errno", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/resourcegroup", @@ -69,6 +69,7 @@ go_test( "//pkg/ddl/placement", "//pkg/ddl/util", "//pkg/keyspace", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/testkit/testsetup", "//pkg/util", diff --git a/pkg/domain/infosync/info.go b/pkg/domain/infosync/info.go index 06dd1703ad12a..217a168f27e28 100644 --- a/pkg/domain/infosync/info.go +++ b/pkg/domain/infosync/info.go @@ -39,8 +39,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/resourcegroup" diff --git a/pkg/domain/infosync/info_test.go b/pkg/domain/infosync/info_test.go index b0f4bca164e90..f8bf26dbb7c50 100644 --- a/pkg/domain/infosync/info_test.go +++ b/pkg/domain/infosync/info_test.go @@ -29,7 +29,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/placement" "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/keyspace" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit/testsetup" util2 "github.com/pingcap/tidb/pkg/util" "github.com/stretchr/testify/require" @@ -258,12 +259,12 @@ func TestTiFlashManager(t *testing.T) { ConfigureTiFlashPDForPartitions(true, &[]model.PartitionDefinition{ { ID: 2, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), LessThan: []string{}, }, { ID: 3, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), LessThan: []string{}, }, }, 3, &[]string{}, 100) diff --git a/pkg/domain/ru_stats_test.go b/pkg/domain/ru_stats_test.go index 59273ba28b248..ce4b184aebc4d 100644 --- a/pkg/domain/ru_stats_test.go +++ b/pkg/domain/ru_stats_test.go @@ -22,7 +22,8 @@ import ( rmpb "github.com/pingcap/kvproto/pkg/resource_manager" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" pd "github.com/tikv/pd/client" @@ -66,11 +67,11 @@ func testWriteRUStatisticsTz(t *testing.T, tz *time.Location) { infoGroups := make(map[string]*model.ResourceGroupInfo, 2) infoGroups["default"] = &model.ResourceGroupInfo{ ID: 1, - Name: model.NewCIStr("default"), + Name: pmodel.NewCIStr("default"), } infoGroups["test"] = &model.ResourceGroupInfo{ ID: 2, - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), } testInfo := &testInfoschema{ groups: infoGroups, @@ -132,7 +133,7 @@ type testInfoschema struct { groups map[string]*model.ResourceGroupInfo } -func (is *testInfoschema) ResourceGroupByName(name model.CIStr) (*model.ResourceGroupInfo, bool) { +func (is *testInfoschema) ResourceGroupByName(name pmodel.CIStr) (*model.ResourceGroupInfo, bool) { g, ok := is.groups[name.L] return g, ok } diff --git a/pkg/domain/schema_validator.go b/pkg/domain/schema_validator.go index d33b548af6f0f..6cf2480bc0ab2 100644 --- a/pkg/domain/schema_validator.go +++ b/pkg/domain/schema_validator.go @@ -19,8 +19,8 @@ import ( "sync" "time" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" diff --git a/pkg/domain/test_helper.go b/pkg/domain/test_helper.go index e421746e1472e..7721ed55ddf35 100644 --- a/pkg/domain/test_helper.go +++ b/pkg/domain/test_helper.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -33,7 +34,7 @@ func (do *Domain) MockInfoCacheAndLoadInfoSchema(is infoschema.InfoSchema) { // MustGetTableInfo returns the table info. Only used in unit tests. func (do *Domain) MustGetTableInfo(t *testing.T, dbName, tableName string) *model.TableInfo { - tbl, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tableName)) + tbl, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) require.Nil(t, err) return tbl.Meta() } diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel index 537657b36ec49..abac324a9dbdf 100644 --- a/pkg/executor/BUILD.bazel +++ b/pkg/executor/BUILD.bazel @@ -137,6 +137,7 @@ go_library( "//pkg/lightning/mydump", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", @@ -403,6 +404,7 @@ go_test( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", diff --git a/pkg/executor/adapter.go b/pkg/executor/adapter.go index d2122ce609b7e..ba4aa65634da4 100644 --- a/pkg/executor/adapter.go +++ b/pkg/executor/adapter.go @@ -40,10 +40,11 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/keyspace" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner" @@ -93,7 +94,7 @@ type processinfoSetter interface { // recordSet wraps an executor, implements sqlexec.RecordSet interface type recordSet struct { - fields []*ast.ResultField + fields []*resolve.ResultField executor exec.Executor // The `Fields` method may be called after `Close`, and the executor is cleared in the `Close` function. // Therefore, we need to store the schema in `recordSet` to avoid a null pointer exception when calling `executor.Schema()`. @@ -104,16 +105,16 @@ type recordSet struct { once sync.Once } -func (a *recordSet) Fields() []*ast.ResultField { +func (a *recordSet) Fields() []*resolve.ResultField { if len(a.fields) == 0 { a.fields = colNames2ResultFields(a.schema, a.stmt.OutputNames, a.stmt.Ctx.GetSessionVars().CurrentDB) } return a.fields } -func colNames2ResultFields(schema *expression.Schema, names []*types.FieldName, defaultDB string) []*ast.ResultField { - rfs := make([]*ast.ResultField, 0, schema.Len()) - defaultDBCIStr := model.NewCIStr(defaultDB) +func colNames2ResultFields(schema *expression.Schema, names []*types.FieldName, defaultDB string) []*resolve.ResultField { + rfs := make([]*resolve.ResultField, 0, schema.Len()) + defaultDBCIStr := pmodel.NewCIStr(defaultDB) for i := 0; i < schema.Len(); i++ { dbName := names[i].DBName if dbName.L == "" && names[i].TblName.L != "" { @@ -125,7 +126,7 @@ func colNames2ResultFields(schema *expression.Schema, names []*types.FieldName, origColName = names[i].ColName emptyOrgName = true } - rf := &ast.ResultField{ + rf := &resolve.ResultField{ Column: &model.ColumnInfo{Name: origColName, FieldType: *schema.Columns[i].RetType}, ColumnAsName: names[i].ColName, EmptyOrgName: emptyOrgName, @@ -870,12 +871,12 @@ func isNoResultPlan(p base.Plan) bool { type chunkRowRecordSet struct { rows []chunk.Row idx int - fields []*ast.ResultField + fields []*resolve.ResultField e exec.Executor execStmt *ExecStmt } -func (c *chunkRowRecordSet) Fields() []*ast.ResultField { +func (c *chunkRowRecordSet) Fields() []*resolve.ResultField { if c.fields == nil { c.fields = colNames2ResultFields(c.e.Schema(), c.execStmt.OutputNames, c.execStmt.Ctx.GetSessionVars().CurrentDB) } diff --git a/pkg/executor/admin.go b/pkg/executor/admin.go index 0fc1baa912261..926c17c26b790 100644 --- a/pkg/executor/admin.go +++ b/pkg/executor/admin.go @@ -25,8 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/table" @@ -405,7 +406,7 @@ func (e *RecoverIndexExec) buildIndexedValues(row chunk.Row, idxVals []types.Dat } if e.cols == nil { - columns, _, err := expression.ColumnInfos2ColumnsAndNames(e.Ctx().GetExprCtx(), model.NewCIStr("mock"), e.table.Meta().Name, e.table.Meta().Columns, e.table.Meta()) + columns, _, err := expression.ColumnInfos2ColumnsAndNames(e.Ctx().GetExprCtx(), pmodel.NewCIStr("mock"), e.table.Meta().Name, e.table.Meta().Columns, e.table.Meta()) if err != nil { return nil, err } diff --git a/pkg/executor/analyze_col.go b/pkg/executor/analyze_col.go index 78a5190b8d21b..788a8bfb56a16 100644 --- a/pkg/executor/analyze_col.go +++ b/pkg/executor/analyze_col.go @@ -26,8 +26,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" plannerutil "github.com/pingcap/tidb/pkg/planner/util" diff --git a/pkg/executor/analyze_col_v2.go b/pkg/executor/analyze_col_v2.go index 6eefe269fc865..dec8c15ce44a5 100644 --- a/pkg/executor/analyze_col_v2.go +++ b/pkg/executor/analyze_col_v2.go @@ -24,9 +24,9 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/executor/analyze_idx.go b/pkg/executor/analyze_idx.go index 1c26be7782d4f..7dcb05cc33f16 100644 --- a/pkg/executor/analyze_idx.go +++ b/pkg/executor/analyze_idx.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/distsql" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/executor/batch_checker.go b/pkg/executor/batch_checker.go index a3aed84315672..36d4b41eeb121 100644 --- a/pkg/executor/batch_checker.go +++ b/pkg/executor/batch_checker.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" diff --git a/pkg/executor/batch_point_get.go b/pkg/executor/batch_point_get.go index 88a2a442f158d..1a4a8d344a4fc 100644 --- a/pkg/executor/batch_point_get.go +++ b/pkg/executor/batch_point_get.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" @@ -55,7 +56,7 @@ type BatchPointGetExec struct { planPhysIDs []int64 // If != 0 then it is a single partition under Static Prune mode. singlePartID int64 - partitionNames []model.CIStr + partitionNames []pmodel.CIStr idxVals [][]types.Datum txn kv.Transaction lock bool @@ -107,7 +108,7 @@ func (e *BatchPointGetExec) Open(context.Context) error { lock := e.tblInfo.Lock if e.lock { batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), &PessimisticLockCacheGetter{txnCtx: txnCtx}, e.snapshot) - } else if lock != nil && (lock.Tp == model.TableLockRead || lock.Tp == model.TableLockReadOnly) && e.Ctx().GetSessionVars().EnablePointGetCache { + } else if lock != nil && (lock.Tp == pmodel.TableLockRead || lock.Tp == pmodel.TableLockReadOnly) && e.Ctx().GetSessionVars().EnablePointGetCache { batchGetter = newCacheBatchGetter(e.Ctx(), e.tblInfo.ID, e.snapshot) } else { batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), nil, e.snapshot) diff --git a/pkg/executor/benchmark_test.go b/pkg/executor/benchmark_test.go index dfe65f0954bb3..245369d3f05e2 100644 --- a/pkg/executor/benchmark_test.go +++ b/pkg/executor/benchmark_test.go @@ -35,8 +35,9 @@ import ( "github.com/pingcap/tidb/pkg/executor/sortexec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -1938,7 +1939,7 @@ func BenchmarkPipelinedRowNumberWindowFunctionExecution(b *testing.B) { func BenchmarkCompleteInsertErr(b *testing.B) { b.ReportAllocs() col := &model.ColumnInfo{ - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeBlob), } err := types.ErrWarnDataOutOfRange @@ -1950,7 +1951,7 @@ func BenchmarkCompleteInsertErr(b *testing.B) { func BenchmarkCompleteLoadErr(b *testing.B) { b.ReportAllocs() col := &model.ColumnInfo{ - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), } err := types.ErrDataTooLong for n := 0; n < b.N; n++ { diff --git a/pkg/executor/brie.go b/pkg/executor/brie.go index dd019e30b67b3..c0ec6528f50b9 100644 --- a/pkg/executor/brie.go +++ b/pkg/executor/brie.go @@ -38,9 +38,10 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -785,7 +786,7 @@ func (gs *tidbGlueSession) CreateDatabase(_ context.Context, schema *model.DBInf } // CreateTable implements glue.Session -func (gs *tidbGlueSession) CreateTable(_ context.Context, dbName model.CIStr, table *model.TableInfo, cs ...ddl.CreateTableOption) error { +func (gs *tidbGlueSession) CreateTable(_ context.Context, dbName pmodel.CIStr, table *model.TableInfo, cs ...ddl.CreateTableOption) error { return BRIECreateTable(gs.se, dbName, table, "", cs...) } diff --git a/pkg/executor/brie_test.go b/pkg/executor/brie_test.go index 6a94568772bbd..7bfc014a2d215 100644 --- a/pkg/executor/brie_test.go +++ b/pkg/executor/brie_test.go @@ -28,10 +28,10 @@ import ( "github.com/pingcap/kvproto/pkg/encryptionpb" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/executor/brie_utils.go b/pkg/executor/brie_utils.go index 7a9a981c640c0..78517c7ab90e9 100644 --- a/pkg/executor/brie_utils.go +++ b/pkg/executor/brie_utils.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "go.uber.org/zap" @@ -87,7 +88,7 @@ func showRestoredCreateTable(sctx sessionctx.Context, tbl *model.TableInfo, brCo // BRIECreateTable creates the table with OnExistIgnore option func BRIECreateTable( sctx sessionctx.Context, - dbName model.CIStr, + dbName pmodel.CIStr, table *model.TableInfo, brComment string, cs ...ddl.CreateTableOption, @@ -128,7 +129,7 @@ func BRIECreateTables( sctx.GetSessionVars().ForeignKeyChecks = originForeignKeyChecks }() for db, tablesInDB := range tables { - dbName := model.NewCIStr(db) + dbName := pmodel.NewCIStr(db) queryBuilder := strings.Builder{} cloneTables := make([]*model.TableInfo, 0, len(tablesInDB)) for _, table := range tablesInDB { @@ -158,7 +159,7 @@ func BRIECreateTables( // splitBatchCreateTable provide a way to split batch into small batch when batch size is large than 6 MB. // The raft entry has limit size of 6 MB, a batch of CreateTables may hit this limitation // TODO: shall query string be set for each split batch create, it looks does not matter if we set once for all. -func splitBatchCreateTable(sctx sessionctx.Context, schema model.CIStr, +func splitBatchCreateTable(sctx sessionctx.Context, schema pmodel.CIStr, infos []*model.TableInfo, cs ...ddl.CreateTableOption) error { var err error d := domain.GetDomain(sctx).DDLExecutor() diff --git a/pkg/executor/brie_utils_test.go b/pkg/executor/brie_utils_test.go index 761eaf0c1fabc..62ef6412d8e4d 100644 --- a/pkg/executor/brie_utils_test.go +++ b/pkg/executor/brie_utils_test.go @@ -25,9 +25,10 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -48,18 +49,18 @@ func TestSplitBatchCreateTableWithTableId(t *testing.T) { infos1 := []*model.TableInfo{} infos1 = append(infos1, &model.TableInfo{ ID: 124, - Name: model.NewCIStr("table_id_resued1"), + Name: pmodel.NewCIStr("table_id_resued1"), }) infos1 = append(infos1, &model.TableInfo{ ID: 125, - Name: model.NewCIStr("table_id_resued2"), + Name: pmodel.NewCIStr("table_id_resued2"), }) sctx := tk.Session() // keep/reused table id verification sctx.SetValue(sessionctx.QueryString, "skip") - err := executor.SplitBatchCreateTableForTest(sctx, model.NewCIStr("test"), infos1, ddl.WithIDAllocated(true)) + err := executor.SplitBatchCreateTableForTest(sctx, pmodel.NewCIStr("test"), infos1, ddl.WithIDAllocated(true)) require.NoError(t, err) require.Equal(t, "skip", sctx.Value(sessionctx.QueryString)) @@ -84,11 +85,11 @@ func TestSplitBatchCreateTableWithTableId(t *testing.T) { infos2 := []*model.TableInfo{} infos2 = append(infos2, &model.TableInfo{ ID: 124, - Name: model.NewCIStr("table_id_new"), + Name: pmodel.NewCIStr("table_id_new"), }) tk.Session().SetValue(sessionctx.QueryString, "skip") - err = executor.SplitBatchCreateTableForTest(sctx, model.NewCIStr("test"), infos2) + err = executor.SplitBatchCreateTableForTest(sctx, pmodel.NewCIStr("test"), infos2) require.NoError(t, err) require.Equal(t, "skip", sctx.Value(sessionctx.QueryString)) @@ -104,7 +105,7 @@ func TestSplitBatchCreateTableWithTableId(t *testing.T) { infos3 := []*model.TableInfo{} originQueryString := sctx.Value(sessionctx.QueryString) - err = executor.SplitBatchCreateTableForTest(sctx, model.NewCIStr("test"), infos3, ddl.WithIDAllocated(true)) + err = executor.SplitBatchCreateTableForTest(sctx, pmodel.NewCIStr("test"), infos3, ddl.WithIDAllocated(true)) require.NoError(t, err) require.Equal(t, originQueryString, sctx.Value(sessionctx.QueryString)) } @@ -124,15 +125,15 @@ func TestSplitBatchCreateTable(t *testing.T) { infos := []*model.TableInfo{} infos = append(infos, &model.TableInfo{ ID: 1234, - Name: model.NewCIStr("tables_1"), + Name: pmodel.NewCIStr("tables_1"), }) infos = append(infos, &model.TableInfo{ ID: 1235, - Name: model.NewCIStr("tables_2"), + Name: pmodel.NewCIStr("tables_2"), }) infos = append(infos, &model.TableInfo{ ID: 1236, - Name: model.NewCIStr("tables_3"), + Name: pmodel.NewCIStr("tables_3"), }) sctx := tk.Session() @@ -140,7 +141,7 @@ func TestSplitBatchCreateTable(t *testing.T) { // keep/reused table id verification tk.Session().SetValue(sessionctx.QueryString, "skip") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/RestoreBatchCreateTableEntryTooLarge", "return(1)")) - err := executor.SplitBatchCreateTableForTest(sctx, model.NewCIStr("test"), infos, ddl.WithIDAllocated(true)) + err := executor.SplitBatchCreateTableForTest(sctx, pmodel.NewCIStr("test"), infos, ddl.WithIDAllocated(true)) require.NoError(t, err) require.Equal(t, "skip", sctx.Value(sessionctx.QueryString)) @@ -193,20 +194,20 @@ func TestSplitBatchCreateTableFailWithEntryTooLarge(t *testing.T) { infos := []*model.TableInfo{} infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_1"), + Name: pmodel.NewCIStr("tables_1"), }) infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_2"), + Name: pmodel.NewCIStr("tables_2"), }) infos = append(infos, &model.TableInfo{ - Name: model.NewCIStr("tables_3"), + Name: pmodel.NewCIStr("tables_3"), }) sctx := tk.Session() tk.Session().SetValue(sessionctx.QueryString, "skip") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/RestoreBatchCreateTableEntryTooLarge", "return(0)")) - err := executor.SplitBatchCreateTableForTest(sctx, model.NewCIStr("test"), infos) + err := executor.SplitBatchCreateTableForTest(sctx, pmodel.NewCIStr("test"), infos) require.Equal(t, "skip", sctx.Value(sessionctx.QueryString)) require.True(t, kv.ErrEntryTooLarge.Equal(err)) @@ -226,7 +227,7 @@ func TestBRIECreateDatabase(t *testing.T) { originQueryString := sctx.Value(sessionctx.QueryString) schema1 := &model.DBInfo{ ID: 1230, - Name: model.NewCIStr("db_1"), + Name: pmodel.NewCIStr("db_1"), Charset: "utf8mb4", Collate: "utf8mb4_bin", State: model.StatePublic, @@ -236,7 +237,7 @@ func TestBRIECreateDatabase(t *testing.T) { schema2 := &model.DBInfo{ ID: 1240, - Name: model.NewCIStr("db_2"), + Name: pmodel.NewCIStr("db_2"), Charset: "utf8mb4", Collate: "utf8mb4_bin", State: model.StatePublic, @@ -269,14 +270,14 @@ func TestBRIECreateTable(t *testing.T) { sctx := tk.Session() originQueryString := sctx.Value(sessionctx.QueryString) - dbName := model.NewCIStr("test") + dbName := pmodel.NewCIStr("test") tableInfo := mockTableInfo(t, sctx, "create table test.table_1 (a int primary key, b json, c varchar(20))") tableInfo.ID = 1230 err := executor.BRIECreateTable(sctx, dbName, tableInfo, "/* from test */") require.NoError(t, err) tableInfo.ID = 1240 - tableInfo.Name = model.NewCIStr("table_2") + tableInfo.Name = pmodel.NewCIStr("table_2") err = executor.BRIECreateTable(sctx, dbName, tableInfo, "") require.NoError(t, err) require.Equal(t, originQueryString, sctx.Value(sessionctx.QueryString)) diff --git a/pkg/executor/builder.go b/pkg/executor/builder.go index 054f521185964..50fc453a513f1 100644 --- a/pkg/executor/builder.go +++ b/pkg/executor/builder.go @@ -56,8 +56,9 @@ import ( "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" @@ -869,12 +870,12 @@ func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) exec.Executor { BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), Tp: v.Tp, CountWarningsOrErrors: v.CountWarningsOrErrors, - DBName: model.NewCIStr(v.DBName), + DBName: pmodel.NewCIStr(v.DBName), Table: v.Table, Partition: v.Partition, Column: v.Column, IndexName: v.IndexName, - ResourceGroupName: model.NewCIStr(v.ResourceGroupName), + ResourceGroupName: pmodel.NewCIStr(v.ResourceGroupName), Flag: v.Flag, Roles: v.Roles, User: v.User, @@ -3066,7 +3067,7 @@ func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) exec.Executor { for _, task := range v.ColTasks { columns, _, err := expression.ColumnInfos2ColumnsAndNames( exprCtx, - model.NewCIStr(task.AnalyzeInfo.DBName), + pmodel.NewCIStr(task.AnalyzeInfo.DBName), task.TblInfo.Name, task.ColsInfo, task.TblInfo, @@ -5337,7 +5338,7 @@ func (builder *dataReaderBuilder) partitionPruning(tbl table.PartitionedTable, p func partitionPruning(ctx sessionctx.Context, tbl table.PartitionedTable, planPartInfo *plannercore.PhysPlanPartInfo) ([]table.PhysicalTable, error) { var pruningConds []expression.Expression - var partitionNames []model.CIStr + var partitionNames []pmodel.CIStr var columns []*expression.Column var columnNames types.NameSlice if planPartInfo != nil { diff --git a/pkg/executor/checksum.go b/pkg/executor/checksum.go index c4d349359a74d..f53c0b4fcbe4a 100644 --- a/pkg/executor/checksum.go +++ b/pkg/executor/checksum.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/distsql" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/chunk" diff --git a/pkg/executor/compact_table.go b/pkg/executor/compact_table.go index ea9f7f43bf36b..1e4248f167493 100644 --- a/pkg/executor/compact_table.go +++ b/pkg/executor/compact_table.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/driver/backoff" "github.com/pingcap/tidb/pkg/util/chunk" diff --git a/pkg/executor/coprocessor.go b/pkg/executor/coprocessor.go index 218d16627bd0f..8abcda99eaf7f 100644 --- a/pkg/executor/coprocessor.go +++ b/pkg/executor/coprocessor.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/privilege" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/executor/ddl.go b/pkg/executor/ddl.go index e29a8660431d5..fff2d2a751382 100644 --- a/pkg/executor/ddl.go +++ b/pkg/executor/ddl.go @@ -26,8 +26,9 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/core" @@ -75,7 +76,7 @@ func (e *DDLExec) toErr(err error) error { return err } -func (e *DDLExec) getLocalTemporaryTable(schema model.CIStr, table model.CIStr) (table.Table, bool) { +func (e *DDLExec) getLocalTemporaryTable(schema pmodel.CIStr, table pmodel.CIStr) (table.Table, bool) { tbl, err := e.Ctx().GetInfoSchema().(infoschema.InfoSchema).TableByName(context.Background(), schema, table) if infoschema.ErrTableNotExists.Equal(err) { return nil, false @@ -563,7 +564,7 @@ func (e *DDLExec) executeFlashbackTable(s *ast.FlashBackTableStmt) error { return err } if len(s.NewName) != 0 { - tblInfo.Name = model.NewCIStr(s.NewName) + tblInfo.Name = pmodel.NewCIStr(s.NewName) } // Check the table ID was not exists. is := domain.GetDomain(e.Ctx()).InfoSchema() @@ -598,7 +599,7 @@ func (e *DDLExec) executeFlashbackTable(s *ast.FlashBackTableStmt) error { func (e *DDLExec) executeFlashbackDatabase(s *ast.FlashBackDatabaseStmt) error { dbName := s.DBName if len(s.NewName) > 0 { - dbName = model.NewCIStr(s.NewName) + dbName = pmodel.NewCIStr(s.NewName) } // Check the Schema Name was not exists. is := domain.GetDomain(e.Ctx()).InfoSchema() @@ -619,7 +620,7 @@ func (e *DDLExec) executeFlashbackDatabase(s *ast.FlashBackDatabaseStmt) error { return err } -func (e *DDLExec) getRecoverDBByName(schemaName model.CIStr) (recoverSchemaInfo *ddl.RecoverSchemaInfo, err error) { +func (e *DDLExec) getRecoverDBByName(schemaName pmodel.CIStr) (recoverSchemaInfo *ddl.RecoverSchemaInfo, err error) { txn, err := e.Ctx().Txn(true) if err != nil { return nil, err diff --git a/pkg/executor/delete.go b/pkg/executor/delete.go index ede6be8faa853..23c1b77244279 100644 --- a/pkg/executor/delete.go +++ b/pkg/executor/delete.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/executor/distsql.go b/pkg/executor/distsql.go index ec6dc3abd10b4..56f26628db7b6 100644 --- a/pkg/executor/distsql.go +++ b/pkg/executor/distsql.go @@ -35,8 +35,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" isctx "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" planctx "github.com/pingcap/tidb/pkg/planner/context" diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index 33f625179e03b..686e898da5ca6 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -47,9 +47,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" planctx "github.com/pingcap/tidb/pkg/planner/context" diff --git a/pkg/executor/foreign_key.go b/pkg/executor/foreign_key.go index 5cfba96ff27b5..443b5598794fc 100644 --- a/pkg/executor/foreign_key.go +++ b/pkg/executor/foreign_key.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -135,7 +136,7 @@ func buildFKCheckExecs(sctx sessionctx.Context, tbl table.Table, fkChecks []*pla } func buildFKCheckExec(sctx sessionctx.Context, tbl table.Table, fkCheck *plannercore.FKCheck) (*FKCheckExec, error) { - var cols []model.CIStr + var cols []pmodel.CIStr if fkCheck.FK != nil { cols = fkCheck.FK.Cols } else if fkCheck.ReferredFK != nil { @@ -518,7 +519,7 @@ func (*fkValueHelper) hasNullValue(vals []types.Datum) bool { return false } -func getFKColumnsOffsets(tbInfo *model.TableInfo, cols []model.CIStr) ([]int, error) { +func getFKColumnsOffsets(tbInfo *model.TableInfo, cols []pmodel.CIStr) ([]int, error) { colsOffsets := make([]int, len(cols)) for i, col := range cols { offset := -1 @@ -683,7 +684,7 @@ func (fkc *FKCascadeExec) onUpdateRow(sc *stmtctx.StatementContext, oldRow, newR if err != nil || len(oldVals) == 0 { return err } - if model.ReferOptionType(fkc.fk.OnUpdate) == model.ReferOptionSetNull { + if pmodel.ReferOptionType(fkc.fk.OnUpdate) == pmodel.ReferOptionSetNull { fkc.fkValues = append(fkc.fkValues, oldVals) return nil } @@ -725,7 +726,7 @@ func (fkc *FKCascadeExec) buildFKCascadePlan(ctx context.Context) (base.Plan, er if len(fkc.fkValues) == 0 && len(fkc.fkUpdatedValuesMap) == 0 { return nil, nil } - var indexName model.CIStr + var indexName pmodel.CIStr if fkc.fkIdx != nil { indexName = fkc.fkIdx.Name } @@ -733,15 +734,15 @@ func (fkc *FKCascadeExec) buildFKCascadePlan(ctx context.Context) (base.Plan, er switch fkc.tp { case plannercore.FKCascadeOnDelete: fkValues := fkc.fetchOnDeleteOrUpdateFKValues() - switch model.ReferOptionType(fkc.fk.OnDelete) { - case model.ReferOptionCascade: + switch pmodel.ReferOptionType(fkc.fk.OnDelete) { + case pmodel.ReferOptionCascade: stmtNode = GenCascadeDeleteAST(fkc.referredFK.ChildSchema, fkc.childTable.Name, indexName, fkc.fkCols, fkValues) - case model.ReferOptionSetNull: + case pmodel.ReferOptionSetNull: stmtNode = GenCascadeSetNullAST(fkc.referredFK.ChildSchema, fkc.childTable.Name, indexName, fkc.fkCols, fkValues) } case plannercore.FKCascadeOnUpdate: - switch model.ReferOptionType(fkc.fk.OnUpdate) { - case model.ReferOptionCascade: + switch pmodel.ReferOptionType(fkc.fk.OnUpdate) { + case pmodel.ReferOptionCascade: couple := fkc.fetchUpdatedValuesCouple() if couple != nil && len(couple.NewValues) != 0 { if fkc.stats != nil { @@ -749,7 +750,7 @@ func (fkc *FKCascadeExec) buildFKCascadePlan(ctx context.Context) (base.Plan, er } stmtNode = GenCascadeUpdateAST(fkc.referredFK.ChildSchema, fkc.childTable.Name, indexName, fkc.fkCols, couple) } - case model.ReferOptionSetNull: + case pmodel.ReferOptionSetNull: fkValues := fkc.fetchOnDeleteOrUpdateFKValues() stmtNode = GenCascadeSetNullAST(fkc.referredFK.ChildSchema, fkc.childTable.Name, indexName, fkc.fkCols, fkValues) } @@ -802,7 +803,7 @@ func (fkc *FKCascadeExec) fetchUpdatedValuesCouple() *UpdatedValuesCouple { } // GenCascadeDeleteAST uses to generate cascade delete ast, export for test. -func GenCascadeDeleteAST(schema, table, idx model.CIStr, cols []*model.ColumnInfo, fkValues [][]types.Datum) *ast.DeleteStmt { +func GenCascadeDeleteAST(schema, table, idx pmodel.CIStr, cols []*model.ColumnInfo, fkValues [][]types.Datum) *ast.DeleteStmt { deleteStmt := &ast.DeleteStmt{ TableRefs: genTableRefsAST(schema, table, idx), Where: genWhereConditionAst(cols, fkValues), @@ -811,7 +812,7 @@ func GenCascadeDeleteAST(schema, table, idx model.CIStr, cols []*model.ColumnInf } // GenCascadeSetNullAST uses to generate foreign key `SET NULL` ast, export for test. -func GenCascadeSetNullAST(schema, table, idx model.CIStr, cols []*model.ColumnInfo, fkValues [][]types.Datum) *ast.UpdateStmt { +func GenCascadeSetNullAST(schema, table, idx pmodel.CIStr, cols []*model.ColumnInfo, fkValues [][]types.Datum) *ast.UpdateStmt { newValues := make([]types.Datum, len(cols)) for i := range cols { newValues[i] = types.NewDatum(nil) @@ -824,7 +825,7 @@ func GenCascadeSetNullAST(schema, table, idx model.CIStr, cols []*model.ColumnIn } // GenCascadeUpdateAST uses to generate cascade update ast, export for test. -func GenCascadeUpdateAST(schema, table, idx model.CIStr, cols []*model.ColumnInfo, couple *UpdatedValuesCouple) *ast.UpdateStmt { +func GenCascadeUpdateAST(schema, table, idx pmodel.CIStr, cols []*model.ColumnInfo, couple *UpdatedValuesCouple) *ast.UpdateStmt { list := make([]*ast.Assignment, 0, len(cols)) for i, col := range cols { v := &driver.ValueExpr{Datum: couple.NewValues[i]} @@ -843,11 +844,11 @@ func GenCascadeUpdateAST(schema, table, idx model.CIStr, cols []*model.ColumnInf return updateStmt } -func genTableRefsAST(schema, table, idx model.CIStr) *ast.TableRefsClause { +func genTableRefsAST(schema, table, idx pmodel.CIStr) *ast.TableRefsClause { tn := &ast.TableName{Schema: schema, Name: table} if idx.L != "" { tn.IndexHints = []*ast.IndexHint{{ - IndexNames: []model.CIStr{idx}, + IndexNames: []pmodel.CIStr{idx}, HintType: ast.HintUse, HintScope: ast.HintForScan, }} diff --git a/pkg/executor/grant.go b/pkg/executor/grant.go index cf32301eb793e..efe517b836685 100644 --- a/pkg/executor/grant.go +++ b/pkg/executor/grant.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/privilege" "github.com/pingcap/tidb/pkg/privilege/privileges" "github.com/pingcap/tidb/pkg/sessionctx" @@ -784,7 +785,7 @@ func getTargetSchemaAndTable(ctx context.Context, sctx sessionctx.Context, dbNam } // getRowsAndFields is used to extract rows from record sets. -func getRowsAndFields(sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, []*ast.ResultField, error) { +func getRowsAndFields(sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, []*resolve.ResultField, error) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if rs == nil { return nil, nil, errors.Errorf("nil recordset") diff --git a/pkg/executor/hot_regions_history_table_test.go b/pkg/executor/hot_regions_history_table_test.go index 301d90ed87a0d..87966916f82a8 100644 --- a/pkg/executor/hot_regions_history_table_test.go +++ b/pkg/executor/hot_regions_history_table_test.go @@ -31,7 +31,7 @@ import ( "github.com/pingcap/fn" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/store/helper" diff --git a/pkg/executor/importer/BUILD.bazel b/pkg/executor/importer/BUILD.bazel index 95572930a40f3..4fb84570ca2e3 100644 --- a/pkg/executor/importer/BUILD.bazel +++ b/pkg/executor/importer/BUILD.bazel @@ -37,11 +37,11 @@ go_library( "//pkg/lightning/mydump", "//pkg/lightning/verification", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/format", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/context", @@ -124,6 +124,7 @@ go_test( "//pkg/lightning/mydump", "//pkg/lightning/verification", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", diff --git a/pkg/executor/importer/import.go b/pkg/executor/importer/import.go index 984645015b415..0e26d17ee51ae 100644 --- a/pkg/executor/importer/import.go +++ b/pkg/executor/importer/import.go @@ -39,10 +39,10 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" litlog "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" pformat "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" planctx "github.com/pingcap/tidb/pkg/planner/context" diff --git a/pkg/executor/importer/importer_testkit_test.go b/pkg/executor/importer/importer_testkit_test.go index 90f31e6ca94c5..4f516b9a33585 100644 --- a/pkg/executor/importer/importer_testkit_test.go +++ b/pkg/executor/importer/importer_testkit_test.go @@ -36,8 +36,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/mydump" verify "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" @@ -65,7 +66,7 @@ func TestVerifyChecksum(t *testing.T) { plan := &importer.Plan{ DBName: "db", TableInfo: &model.TableInfo{ - Name: model.NewCIStr("tb"), + Name: pmodel.NewCIStr("tb"), }, Checksum: config.OpLevelRequired, DistSQLScanConcurrency: 50, @@ -90,7 +91,7 @@ func TestVerifyChecksum(t *testing.T) { plan2 := &importer.Plan{ DBName: "db", TableInfo: &model.TableInfo{ - Name: model.NewCIStr("tb2"), + Name: pmodel.NewCIStr("tb2"), }, Checksum: config.OpLevelRequired, } @@ -207,9 +208,9 @@ func TestPostProcess(t *testing.T) { tk.MustExec("insert into db.tb values(1)") do, err := session.GetDomain(store) require.NoError(t, err) - dbInfo, ok := do.InfoSchema().SchemaByName(model.NewCIStr("db")) + dbInfo, ok := do.InfoSchema().SchemaByName(pmodel.NewCIStr("db")) require.True(t, ok) - table, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("db"), model.NewCIStr("tb")) + table, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("db"), pmodel.NewCIStr("tb")) require.NoError(t, err) plan := &importer.Plan{ DBID: dbInfo.ID, @@ -230,7 +231,7 @@ func TestPostProcess(t *testing.T) { require.NoError(t, importer.PostProcess(ctx, tk.Session(), nil, plan, localChecksum, logger)) // rebase success tk.MustExec("create table db.tb2(id int auto_increment primary key)") - table, err = do.InfoSchema().TableByName(context.Background(), model.NewCIStr("db"), model.NewCIStr("tb2")) + table, err = do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("db"), pmodel.NewCIStr("tb2")) require.NoError(t, err) plan.TableInfo, plan.DesiredTableInfo = table.Meta(), table.Meta() integration.BeforeTestExternal(t) @@ -259,9 +260,9 @@ func getTableImporter(ctx context.Context, t *testing.T, store kv.Storage, table tk := testkit.NewTestKit(t, store) do, err := session.GetDomain(store) require.NoError(t, err) - dbInfo, ok := do.InfoSchema().SchemaByName(model.NewCIStr("test")) + dbInfo, ok := do.InfoSchema().SchemaByName(pmodel.NewCIStr("test")) require.True(t, ok) - table, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr(tableName)) + table, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr(tableName)) require.NoError(t, err) var selectPlan base.PhysicalPlan if path == "" { diff --git a/pkg/executor/importer/table_import_testkit_test.go b/pkg/executor/importer/table_import_testkit_test.go index 2c4f19c5cf5dc..d40e74437b200 100644 --- a/pkg/executor/importer/table_import_testkit_test.go +++ b/pkg/executor/importer/table_import_testkit_test.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/executor/importer" tidbkv "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lightning/backend/local" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/session" @@ -79,17 +80,17 @@ func TestImportFromSelectCleanup(t *testing.T) { tk.MustExec("create table t(a int)") do, err := session.GetDomain(store) require.NoError(t, err) - dbInfo, ok := do.InfoSchema().SchemaByName(model.NewCIStr("test")) + dbInfo, ok := do.InfoSchema().SchemaByName(pmodel.NewCIStr("test")) require.True(t, ok) - table, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) plan, err := importer.NewImportPlan(ctx, tk.Session(), plannercore.ImportInto{ Table: &resolve.TableNameW{ TableName: &ast.TableName{ - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), }, DBInfo: &model.DBInfo{ - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), ID: dbInfo.ID, }, }, diff --git a/pkg/executor/index_merge_reader.go b/pkg/executor/index_merge_reader.go index 84261164a5925..ce6629f46e47e 100644 --- a/pkg/executor/index_merge_reader.go +++ b/pkg/executor/index_merge_reader.go @@ -35,7 +35,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/executor/infoschema_reader.go b/pkg/executor/infoschema_reader.go index 1894c57b60467..bc0b266a3ac2c 100644 --- a/pkg/executor/infoschema_reader.go +++ b/pkg/executor/infoschema_reader.go @@ -42,9 +42,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -407,7 +408,7 @@ func (e *memtableRetriever) setDataForStatistics(ctx context.Context, sctx sessi } func (e *memtableRetriever) setDataForStatisticsInTable( - schema model.CIStr, + schema pmodel.CIStr, table *model.TableInfo, ex *plannercore.InfoSchemaStatisticsExtractor, ) { @@ -525,11 +526,11 @@ func (e *memtableRetriever) setDataFromReferConst(ctx context.Context, sctx sess continue } updateRule, deleteRule := "NO ACTION", "NO ACTION" - if model.ReferOptionType(fk.OnUpdate) != 0 { - updateRule = model.ReferOptionType(fk.OnUpdate).String() + if pmodel.ReferOptionType(fk.OnUpdate) != 0 { + updateRule = pmodel.ReferOptionType(fk.OnUpdate).String() } - if model.ReferOptionType(fk.OnDelete) != 0 { - deleteRule = model.ReferOptionType(fk.OnDelete).String() + if pmodel.ReferOptionType(fk.OnDelete) != 0 { + deleteRule = pmodel.ReferOptionType(fk.OnDelete).String() } record := types.MakeDatums( infoschema.CatalogVal, // CONSTRAINT_CATALOG @@ -565,7 +566,7 @@ func (e *memtableRetriever) setDataFromOneTable( sctx sessionctx.Context, loc *time.Location, checker privilege.Manager, - schema model.CIStr, + schema pmodel.CIStr, table *model.TableInfo, rows [][]types.Datum, useStatsCache bool, @@ -899,7 +900,7 @@ type hugeMemTableRetriever struct { retrieved bool initialized bool rows [][]types.Datum - dbs []model.CIStr + dbs []pmodel.CIStr curTables []*model.TableInfo dbsIdx int tblIdx int @@ -968,7 +969,7 @@ func (e *hugeMemTableRetriever) setDataForColumns(ctx context.Context, sctx sess func (e *hugeMemTableRetriever) setDataForColumnsWithOneTable( ctx context.Context, sctx sessionctx.Context, - schema model.CIStr, + schema pmodel.CIStr, table *model.TableInfo, checker privilege.Manager) bool { hasPrivs := false @@ -992,7 +993,7 @@ func (e *hugeMemTableRetriever) setDataForColumnsWithOneTable( func (e *hugeMemTableRetriever) dataForColumnsInTable( ctx context.Context, sctx sessionctx.Context, - schema model.CIStr, + schema pmodel.CIStr, tbl *model.TableInfo, priv mysql.PrivilegeType) { if tbl.IsView() { @@ -1238,9 +1239,9 @@ func (e *memtableRetriever) setDataFromPartitions(ctx context.Context, sctx sess } var partitionDesc string - if table.Partition.Type == model.PartitionTypeRange { + if table.Partition.Type == pmodel.PartitionTypeRange { partitionDesc = strings.Join(pi.LessThan, ",") - } else if table.Partition.Type == model.PartitionTypeList { + } else if table.Partition.Type == pmodel.PartitionTypeList { if len(pi.InValues) > 0 { buf := bytes.NewBuffer(nil) for i, vs := range pi.InValues { @@ -1263,11 +1264,11 @@ func (e *memtableRetriever) setDataFromPartitions(ctx context.Context, sctx sess partitionExpr := table.Partition.Expr if len(table.Partition.Columns) > 0 { switch table.Partition.Type { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: partitionMethod = "RANGE COLUMNS" - case model.PartitionTypeList: + case pmodel.PartitionTypeList: partitionMethod = "LIST COLUMNS" - case model.PartitionTypeKey: + case pmodel.PartitionTypeKey: partitionMethod = "KEY" default: return errors.Errorf("Inconsistent partition type, have type %v, but with COLUMNS > 0 (%d)", table.Partition.Type, len(table.Partition.Columns)) @@ -1352,7 +1353,7 @@ func (e *memtableRetriever) setDataFromIndexes(ctx context.Context, sctx session func (*memtableRetriever) setDataFromIndex( sctx sessionctx.Context, - schema model.CIStr, + schema pmodel.CIStr, tb *model.TableInfo, rows [][]types.Datum) ([][]types.Datum, error) { checker := privilege.GetPrivilegeManager(sctx) @@ -1823,7 +1824,7 @@ func (e *memtableRetriever) setDataForMetricTables() { e.rows = rows } -func keyColumnUsageInTable(schema model.CIStr, table *model.TableInfo, ex *plannercore.InfoSchemaKeyColumnUsageExtractor) [][]types.Datum { +func keyColumnUsageInTable(schema pmodel.CIStr, table *model.TableInfo, ex *plannercore.InfoSchemaKeyColumnUsageExtractor) [][]types.Datum { var rows [][]types.Datum if table.PKIsHandle && ex.HasPrimaryKey() { for _, col := range table.Columns { @@ -2302,7 +2303,7 @@ func (e *tableStorageStatsRetriever) initialize(ctx context.Context, sctx sessio for _, DB := range databases { // The user didn't specified the table, extract all tables of this db to initialTable. if len(tables) == 0 { - tbs, err := is.SchemaTableInfos(ctx, model.NewCIStr(DB)) + tbs, err := is.SchemaTableInfos(ctx, pmodel.NewCIStr(DB)) if err != nil { return errors.Trace(err) } @@ -2315,7 +2316,7 @@ func (e *tableStorageStatsRetriever) initialize(ctx context.Context, sctx sessio } else { // The user specified the table, extract the specified tables of this db to initialTable. for tb := range tables { - if tb, err := is.TableByName(context.Background(), model.NewCIStr(DB), model.NewCIStr(tb)); err == nil { + if tb, err := is.TableByName(context.Background(), pmodel.NewCIStr(DB), pmodel.NewCIStr(tb)); err == nil { // For every db.table, check it's privileges. if checker(DB, tb.Meta().Name.L) { e.initialTables = append(e.initialTables, &initialTable{DB, tb.Meta()}) @@ -2488,7 +2489,7 @@ func getRemainDurationForAnalyzeStatusHelper( } var tid int64 is := sessiontxn.GetTxnManager(sctx).GetTxnInfoSchema() - tb, err := is.TableByName(ctx, model.NewCIStr(dbName), model.NewCIStr(tableName)) + tb, err := is.TableByName(ctx, pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) if err != nil { return nil, percentage, totalCnt, err } @@ -3665,7 +3666,7 @@ func (e *memtableRetriever) setDataFromResourceGroups() error { for _, group := range resourceGroups { //mode := "" burstable := burstdisableStr - priority := model.PriorityValueToName(uint64(group.Priority)) + priority := pmodel.PriorityValueToName(uint64(group.Priority)) fillrate := unlimitedFillRate // RU_PER_SEC = unlimited like the default group settings. isDefaultInReservedSetting := group.RUSettings.RU.Settings.FillRate == math.MaxInt32 @@ -3680,13 +3681,13 @@ func (e *memtableRetriever) setDataFromResourceGroups() error { } dur := time.Duration(setting.Rule.ExecElapsedTimeMs) * time.Millisecond fmt.Fprintf(limitBuilder, "EXEC_ELAPSED='%s'", dur.String()) - fmt.Fprintf(limitBuilder, ", ACTION=%s", model.RunawayActionType(setting.Action).String()) + fmt.Fprintf(limitBuilder, ", ACTION=%s", pmodel.RunawayActionType(setting.Action).String()) if setting.Watch != nil { if setting.Watch.LastingDurationMs > 0 { dur := time.Duration(setting.Watch.LastingDurationMs) * time.Millisecond - fmt.Fprintf(limitBuilder, ", WATCH=%s DURATION='%s'", model.RunawayWatchType(setting.Watch.Type).String(), dur.String()) + fmt.Fprintf(limitBuilder, ", WATCH=%s DURATION='%s'", pmodel.RunawayWatchType(setting.Watch.Type).String(), dur.String()) } else { - fmt.Fprintf(limitBuilder, ", WATCH=%s DURATION=UNLIMITED", model.RunawayWatchType(setting.Watch.Type).String()) + fmt.Fprintf(limitBuilder, ", WATCH=%s DURATION=UNLIMITED", pmodel.RunawayWatchType(setting.Watch.Type).String()) } } } @@ -3871,7 +3872,7 @@ func decodeTableIDFromRule(rule *label.Rule) (tableID int64, err error) { func tableOrPartitionNotExist(ctx context.Context, dbName string, tableName string, partitionName string, is infoschema.InfoSchema, tableID int64) (tableNotExist bool) { if len(partitionName) == 0 { - curTable, _ := is.TableByName(ctx, model.NewCIStr(dbName), model.NewCIStr(tableName)) + curTable, _ := is.TableByName(ctx, pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) if curTable == nil { return true } diff --git a/pkg/executor/infoschema_reader_internal_test.go b/pkg/executor/infoschema_reader_internal_test.go index 8f4cbfa28215f..233101bb2d99f 100644 --- a/pkg/executor/infoschema_reader_internal_test.go +++ b/pkg/executor/infoschema_reader_internal_test.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/types" @@ -30,22 +31,22 @@ func TestSetDataFromCheckConstraints(t *testing.T) { tblInfos := []*model.TableInfo{ { ID: 1, - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }, { ID: 2, - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, }, }, Constraints: []*model.ConstraintInfo{ { - Name: model.NewCIStr("t2_c1"), - Table: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2_c1"), + Table: pmodel.NewCIStr("t2"), ExprString: "id<10", State: model.StatePublic, }, @@ -53,18 +54,18 @@ func TestSetDataFromCheckConstraints(t *testing.T) { }, { ID: 3, - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, }, }, Constraints: []*model.ConstraintInfo{ { - Name: model.NewCIStr("t3_c1"), - Table: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3_c1"), + Table: pmodel.NewCIStr("t3"), ExprString: "id<10", State: model.StateDeleteOnly, }, @@ -91,22 +92,22 @@ func TestSetDataFromTiDBCheckConstraints(t *testing.T) { tblInfos := []*model.TableInfo{ { ID: 1, - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }, { ID: 2, - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, }, }, Constraints: []*model.ConstraintInfo{ { - Name: model.NewCIStr("t2_c1"), - Table: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2_c1"), + Table: pmodel.NewCIStr("t2"), ExprString: "id<10", State: model.StatePublic, }, @@ -114,18 +115,18 @@ func TestSetDataFromTiDBCheckConstraints(t *testing.T) { }, { ID: 3, - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, }, }, Constraints: []*model.ConstraintInfo{ { - Name: model.NewCIStr("t3_c1"), - Table: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3_c1"), + Table: pmodel.NewCIStr("t3"), ExprString: "id<10", State: model.StateDeleteOnly, }, diff --git a/pkg/executor/insert.go b/pkg/executor/insert.go index a2b5174b075af..eb816bb6846af 100644 --- a/pkg/executor/insert.go +++ b/pkg/executor/insert.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/executor/insert_common.go b/pkg/executor/insert_common.go index 27d6918a468a9..810fb014221dc 100644 --- a/pkg/executor/insert_common.go +++ b/pkg/executor/insert_common.go @@ -27,8 +27,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/executor/inspection_summary.go b/pkg/executor/inspection_summary.go index 9b02880df5852..4c7f8e548aeb6 100644 --- a/pkg/executor/inspection_summary.go +++ b/pkg/executor/inspection_summary.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" plannerutil "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/executor/load_data.go b/pkg/executor/load_data.go index 298cef32299d1..e56cefa607993 100644 --- a/pkg/executor/load_data.go +++ b/pkg/executor/load_data.go @@ -30,8 +30,8 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/executor/lockstats/BUILD.bazel b/pkg/executor/lockstats/BUILD.bazel index 59888276e9c40..c0413217d7e18 100644 --- a/pkg/executor/lockstats/BUILD.bazel +++ b/pkg/executor/lockstats/BUILD.bazel @@ -29,6 +29,7 @@ go_test( flaky = True, deps = [ "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "@com_github_stretchr_testify//require", diff --git a/pkg/executor/lockstats/lock_stats_executor_test.go b/pkg/executor/lockstats/lock_stats_executor_test.go index 9e9d35cd8030e..61ca33cb3dbc7 100644 --- a/pkg/executor/lockstats/lock_stats_executor_test.go +++ b/pkg/executor/lockstats/lock_stats_executor_test.go @@ -18,8 +18,9 @@ import ( "testing" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -29,11 +30,11 @@ func TestPopulatePartitionIDAndNames(t *testing.T) { }) table := &ast.TableName{ - Schema: model.NewCIStr("test"), - Name: model.NewCIStr("t1"), - PartitionNames: []model.CIStr{ - model.NewCIStr("p1"), - model.NewCIStr("p2"), + Schema: pmodel.NewCIStr("test"), + Name: pmodel.NewCIStr("t1"), + PartitionNames: []pmodel.CIStr{ + pmodel.NewCIStr("p1"), + pmodel.NewCIStr("p2"), }, } @@ -58,16 +59,16 @@ func TestPopulateTableAndPartitionIDs(t *testing.T) { tables := []*ast.TableName{ { - Schema: model.NewCIStr("test"), - Name: model.NewCIStr("t1"), - PartitionNames: []model.CIStr{ - model.NewCIStr("p1"), - model.NewCIStr("p2"), + Schema: pmodel.NewCIStr("test"), + Name: pmodel.NewCIStr("t1"), + PartitionNames: []pmodel.CIStr{ + pmodel.NewCIStr("p1"), + pmodel.NewCIStr("p2"), }, }, { - Schema: model.NewCIStr("test"), - Name: model.NewCIStr("t2"), + Schema: pmodel.NewCIStr("test"), + Name: pmodel.NewCIStr("t2"), }, } @@ -85,7 +86,7 @@ func TestPopulateTableAndPartitionIDs(t *testing.T) { func tInfo(id int, tableName string, partitionNames ...string) *model.TableInfo { tbl := &model.TableInfo{ ID: int64(id), - Name: model.NewCIStr(tableName), + Name: pmodel.NewCIStr(tableName), } if len(partitionNames) > 0 { tbl.Partition = &model.PartitionInfo{ @@ -94,7 +95,7 @@ func tInfo(id int, tableName string, partitionNames ...string) *model.TableInfo for i, partitionName := range partitionNames { tbl.Partition.Definitions = append(tbl.Partition.Definitions, model.PartitionDefinition{ ID: int64(id + 1 + i), - Name: model.NewCIStr(partitionName), + Name: pmodel.NewCIStr(partitionName), }) } } diff --git a/pkg/executor/mem_reader.go b/pkg/executor/mem_reader.go index 9b287f74ee694..83abf8b26de68 100644 --- a/pkg/executor/mem_reader.go +++ b/pkg/executor/mem_reader.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" transaction "github.com/pingcap/tidb/pkg/store/driver/txn" diff --git a/pkg/executor/memtable_reader.go b/pkg/executor/memtable_reader.go index 316439a638b99..5601e686a4c3b 100644 --- a/pkg/executor/memtable_reader.go +++ b/pkg/executor/memtable_reader.go @@ -35,7 +35,7 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/executor/metrics_reader.go b/pkg/executor/metrics_reader.go index 31a0073148584..fb53164c488a8 100644 --- a/pkg/executor/metrics_reader.go +++ b/pkg/executor/metrics_reader.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" plannerutil "github.com/pingcap/tidb/pkg/planner/util" diff --git a/pkg/executor/mpp_gather.go b/pkg/executor/mpp_gather.go index 23fcc16da63a4..c273b59c5ce6f 100644 --- a/pkg/executor/mpp_gather.go +++ b/pkg/executor/mpp_gather.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/mpp" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/executor/point_get.go b/pkg/executor/point_get.go index 46d8a9af0cb03..4849040ac79e9 100644 --- a/pkg/executor/point_get.go +++ b/pkg/executor/point_get.go @@ -27,7 +27,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" @@ -137,7 +138,7 @@ type PointGetExecutor struct { handle kv.Handle idxInfo *model.IndexInfo partitionDefIdx *int - partitionNames []model.CIStr + partitionNames []pmodel.CIStr idxKey kv.Key handleVal []byte idxVals []types.Datum @@ -176,7 +177,7 @@ func GetPhysID(tblInfo *model.TableInfo, idx *int) int64 { return tblInfo.ID } -func matchPartitionNames(pid int64, partitionNames []model.CIStr, pi *model.PartitionInfo) bool { +func matchPartitionNames(pid int64, partitionNames []pmodel.CIStr, pi *model.PartitionInfo) bool { if len(partitionNames) == 0 { return true } @@ -638,7 +639,7 @@ func (e *PointGetExecutor) get(ctx context.Context, key kv.Key) ([]byte, error) } lock := e.tblInfo.Lock - if lock != nil && (lock.Tp == model.TableLockRead || lock.Tp == model.TableLockReadOnly) { + if lock != nil && (lock.Tp == pmodel.TableLockRead || lock.Tp == pmodel.TableLockReadOnly) { if e.Ctx().GetSessionVars().EnablePointGetCache { cacheDB := e.Ctx().GetStore().GetMemCache() val, err = cacheDB.UnionGet(ctx, e.tblInfo.ID, e.snapshot, key) diff --git a/pkg/executor/prepared.go b/pkg/executor/prepared.go index 7f1a8bf24522c..618231ad8ac59 100644 --- a/pkg/executor/prepared.go +++ b/pkg/executor/prepared.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/types" @@ -55,7 +56,7 @@ type PrepareExec struct { ID uint32 ParamCount int - Fields []*ast.ResultField + Fields []*resolve.ResultField Stmt any // If it's generated from executing "prepare stmt from '...'", the process is parse -> plan -> executor diff --git a/pkg/executor/prepared_test.go b/pkg/executor/prepared_test.go index 78a4b70d9ffa0..1d7d37dda5b30 100644 --- a/pkg/executor/prepared_test.go +++ b/pkg/executor/prepared_test.go @@ -22,8 +22,9 @@ import ( "sync/atomic" "testing" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -1039,7 +1040,7 @@ func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { // create virtual tiflash replica. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/executor/sample.go b/pkg/executor/sample.go index 813dd2d58d527..b55db0b68e09f 100644 --- a/pkg/executor/sample.go +++ b/pkg/executor/sample.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/executor/show.go b/pkg/executor/show.go index d1aa30bdc31d3..1291f0d54e214 100644 --- a/pkg/executor/show.go +++ b/pkg/executor/show.go @@ -42,12 +42,13 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" parserformat "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/parser/tidb" @@ -94,12 +95,12 @@ type ShowExec struct { exec.BaseExecutor Tp ast.ShowStmtType // Databases/Tables/Columns/.... - DBName model.CIStr + DBName pmodel.CIStr Table *resolve.TableNameW // Used for showing columns. - Partition model.CIStr // Used for showing partition + Partition pmodel.CIStr // Used for showing partition Column *ast.ColumnName // Used for `desc table column`. - IndexName model.CIStr // Used for show table regions. - ResourceGroupName model.CIStr // Used for showing resource group + IndexName pmodel.CIStr // Used for show table regions. + ResourceGroupName pmodel.CIStr // Used for showing resource group Flag int // Some flag parsed from sql, such as FULL. Roles []*auth.RoleIdentity // Used for show grants. User *auth.UserIdentity // Used by show grants, show create user. @@ -308,7 +309,7 @@ func (v *visibleChecker) Enter(in ast.Node) (out ast.Node, skipChildren bool) { if schema == "" { schema = v.defaultDB } - if !v.is.TableExists(model.NewCIStr(schema), x.Name) { + if !v.is.TableExists(pmodel.NewCIStr(schema), x.Name) { return in, true } activeRoles := v.ctx.GetSessionVars().ActiveRoles @@ -497,7 +498,7 @@ func (*ShowExec) fetchShowOpenTables() error { // showInfo represents the result of `SHOW TABLES`. type showInfo struct { - Name model.CIStr + Name pmodel.CIStr // only used for show full tables TableType string } @@ -518,7 +519,7 @@ func (e *ShowExec) getTableType(tb *model.TableInfo) string { // fetchShowInfoByName fetches the show info for `SHOW TABLES like 'xxx'` func (e *ShowExec) fetchShowInfoByName(ctx context.Context, name string) ([]*showInfo, error) { - tb, err := e.is.TableByName(ctx, e.DBName, model.NewCIStr(name)) + tb, err := e.is.TableByName(ctx, e.DBName, pmodel.NewCIStr(name)) if err != nil { // do nothing if table not exists if infoschema.ErrTableNotExists.Equal(err) { @@ -1030,7 +1031,7 @@ func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.T return constructResultOfShowCreateTable(ctx, nil, tableInfo, allocators, buf) } -func constructResultOfShowCreateTable(ctx sessionctx.Context, dbName *model.CIStr, tableInfo *model.TableInfo, allocators autoid.Allocators, buf *bytes.Buffer) (err error) { +func constructResultOfShowCreateTable(ctx sessionctx.Context, dbName *pmodel.CIStr, tableInfo *model.TableInfo, allocators autoid.Allocators, buf *bytes.Buffer) (err error) { if tableInfo.IsView() { fetchShowCreateTable4View(ctx, tableInfo, buf) return nil @@ -1234,7 +1235,7 @@ func constructResultOfShowCreateTable(ctx sessionctx.Context, dbName *model.CISt if idxInfo.Comment != "" { fmt.Fprintf(buf, ` COMMENT '%s'`, format.OutputFormat(idxInfo.Comment)) } - if idxInfo.Tp == model.IndexTypeHypo { + if idxInfo.Tp == pmodel.IndexTypeHypo { fmt.Fprintf(buf, ` /* HYPO INDEX */`) } if idxInfo.Primary { @@ -1271,11 +1272,11 @@ func constructResultOfShowCreateTable(ctx sessionctx.Context, dbName *model.CISt refColNames = append(refColNames, stringutil.Escape(refCol.O, sqlMode)) } fmt.Fprintf(buf, "(%s)", strings.Join(refColNames, ",")) - if model.ReferOptionType(fk.OnDelete) != 0 { - fmt.Fprintf(buf, " ON DELETE %s", model.ReferOptionType(fk.OnDelete).String()) + if pmodel.ReferOptionType(fk.OnDelete) != 0 { + fmt.Fprintf(buf, " ON DELETE %s", pmodel.ReferOptionType(fk.OnDelete).String()) } - if model.ReferOptionType(fk.OnUpdate) != 0 { - fmt.Fprintf(buf, " ON UPDATE %s", model.ReferOptionType(fk.OnUpdate).String()) + if pmodel.ReferOptionType(fk.OnUpdate) != 0 { + fmt.Fprintf(buf, " ON UPDATE %s", pmodel.ReferOptionType(fk.OnUpdate).String()) } if fk.Version < model.FKVersion1 { buf.WriteString(" /* FOREIGN KEY INVALID */") @@ -1333,8 +1334,8 @@ func constructResultOfShowCreateTable(ctx sessionctx.Context, dbName *model.CISt } } - if tableInfo.AutoIdCache != 0 { - fmt.Fprintf(buf, " /*T![auto_id_cache] AUTO_ID_CACHE=%d */", tableInfo.AutoIdCache) + if tableInfo.AutoIDCache != 0 { + fmt.Fprintf(buf, " /*T![auto_id_cache] AUTO_ID_CACHE=%d */", tableInfo.AutoIDCache) } randomAllocator := allocators.Get(autoid.AutoRandomType) @@ -2423,7 +2424,7 @@ func (e *ShowExec) fetchShowImportJobs(ctx context.Context) error { // tryFillViewColumnType fill the columns type info of a view. // Because view's underlying table's column could change or recreate, so view's column type may change over time. // To avoid this situation we need to generate a logical plan and extract current column types from Schema. -func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, dbName model.CIStr, tbl *model.TableInfo) error { +func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, dbName pmodel.CIStr, tbl *model.TableInfo) error { if !tbl.IsView() { return nil } diff --git a/pkg/executor/show_placement.go b/pkg/executor/show_placement.go index c4b6b8ed73843..45fee49b34616 100644 --- a/pkg/executor/show_placement.go +++ b/pkg/executor/show_placement.go @@ -27,8 +27,9 @@ import ( "github.com/pingcap/tidb/pkg/ddl/placement" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/privilege" "github.com/pingcap/tidb/pkg/table" @@ -281,7 +282,7 @@ func (e *ShowExec) fetchRangesPlacementPlocy(ctx context.Context) error { if err != nil { return err } - policy, ok := e.is.PolicyByName(model.NewCIStr(policyName)) + policy, ok := e.is.PolicyByName(pmodel.NewCIStr(policyName)) if !ok { return errors.Errorf("Policy with name '%s' not found", policyName) } @@ -302,7 +303,7 @@ func (e *ShowExec) fetchAllDBPlacements(ctx context.Context, scheduleState map[i activeRoles := e.Ctx().GetSessionVars().ActiveRoles dbs := e.is.AllSchemaNames() - slices.SortFunc(dbs, func(i, j model.CIStr) int { return cmp.Compare(i.O, j.O) }) + slices.SortFunc(dbs, func(i, j pmodel.CIStr) int { return cmp.Compare(i.O, j.O) }) for _, dbName := range dbs { if checker != nil && e.Ctx().GetSessionVars().User != nil && !checker.DBIsVisible(activeRoles, dbName.O) { @@ -358,7 +359,7 @@ func (e *ShowExec) fetchAllTablePlacements(ctx context.Context, scheduleState ma activeRoles := e.Ctx().GetSessionVars().ActiveRoles dbs := e.is.AllSchemaNames() - slices.SortFunc(dbs, func(i, j model.CIStr) int { return cmp.Compare(i.O, j.O) }) + slices.SortFunc(dbs, func(i, j pmodel.CIStr) int { return cmp.Compare(i.O, j.O) }) for _, dbName := range dbs { tableRowSets := make([]tableRowSet, 0) diff --git a/pkg/executor/show_stats.go b/pkg/executor/show_stats.go index cd5030f524bfb..38237baabc8c7 100644 --- a/pkg/executor/show_stats.go +++ b/pkg/executor/show_stats.go @@ -22,8 +22,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/cardinality" diff --git a/pkg/executor/slow_query.go b/pkg/executor/slow_query.go index 203d72d8f0e61..fc30786243de9 100644 --- a/pkg/executor/slow_query.go +++ b/pkg/executor/slow_query.go @@ -33,8 +33,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/executor/slow_query_sql_test.go b/pkg/executor/slow_query_sql_test.go index 7ba73ca9d05b5..6f9d0c92ffd3e 100644 --- a/pkg/executor/slow_query_sql_test.go +++ b/pkg/executor/slow_query_sql_test.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/executor" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" "github.com/pingcap/tidb/pkg/util/logutil" @@ -405,7 +406,7 @@ func TestWarningsInSlowQuery(t *testing.T) { tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int, b int, c int, d int, e int, f int, g int, h set('11', '22', '33')," + "primary key (a), unique key c_d_e (c, d, e), unique key f (f), unique key f_g (f, g), key g (g))") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} diff --git a/pkg/executor/split.go b/pkg/executor/split.go index 888224ba5d506..f43e642eff37f 100644 --- a/pkg/executor/split.go +++ b/pkg/executor/split.go @@ -25,7 +25,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx" @@ -46,7 +47,7 @@ type SplitIndexRegionExec struct { exec.BaseExecutor tableInfo *model.TableInfo - partitionNames []model.CIStr + partitionNames []pmodel.CIStr indexInfo *model.IndexInfo lower []types.Datum upper []types.Datum @@ -326,7 +327,7 @@ type SplitTableRegionExec struct { exec.BaseExecutor tableInfo *model.TableInfo - partitionNames []model.CIStr + partitionNames []pmodel.CIStr lower []types.Datum upper []types.Datum num int diff --git a/pkg/executor/split_test.go b/pkg/executor/split_test.go index cc15d6b321be6..0f029bc5d932f 100644 --- a/pkg/executor/split_test.go +++ b/pkg/executor/split_test.go @@ -26,7 +26,8 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" @@ -85,11 +86,11 @@ func TestGetStepValue(t *testing.T) { func TestSplitIndex(t *testing.T) { tbInfo := &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), ID: rand.Int63(), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("c0"), + Name: pmodel.NewCIStr("c0"), ID: 1, Offset: 1, DefaultValue: 0, @@ -101,14 +102,14 @@ func TestSplitIndex(t *testing.T) { idxCols := []*model.IndexColumn{{Name: tbInfo.Columns[0].Name, Offset: 0, Length: types.UnspecifiedLength}} idxInfo := &model.IndexInfo{ ID: 2, - Name: model.NewCIStr("idx1"), - Table: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("idx1"), + Table: pmodel.NewCIStr("t1"), Columns: idxCols, State: model.StatePublic, } firstIdxInfo0 := idxInfo.Clone() firstIdxInfo0.ID = 1 - firstIdxInfo0.Name = model.NewCIStr("idx") + firstIdxInfo0.Name = pmodel.NewCIStr("idx") tbInfo.Indices = []*model.IndexInfo{firstIdxInfo0, idxInfo} // Test for int index. @@ -287,11 +288,11 @@ func TestSplitIndex(t *testing.T) { func TestSplitTable(t *testing.T) { tbInfo := &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), ID: rand.Int63(), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("c0"), + Name: pmodel.NewCIStr("c0"), ID: 1, Offset: 1, DefaultValue: 0, @@ -364,11 +365,11 @@ func TestSplitTable(t *testing.T) { func TestStepShouldLargeThanMinStep(t *testing.T) { ctx := mock.NewContext() tbInfo := &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), ID: rand.Int63(), Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("c0"), + Name: pmodel.NewCIStr("c0"), ID: 1, Offset: 1, DefaultValue: 0, @@ -391,7 +392,7 @@ func TestStepShouldLargeThanMinStep(t *testing.T) { func TestClusterIndexSplitTable(t *testing.T) { tbInfo := &model.TableInfo{ - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), ID: 1, IsCommonHandle: true, CommonHandleVersion: 1, @@ -408,21 +409,21 @@ func TestClusterIndexSplitTable(t *testing.T) { }, Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("c0"), + Name: pmodel.NewCIStr("c0"), ID: 1, Offset: 0, State: model.StatePublic, FieldType: *types.NewFieldType(mysql.TypeDouble), }, { - Name: model.NewCIStr("c1"), + Name: pmodel.NewCIStr("c1"), ID: 2, Offset: 1, State: model.StatePublic, FieldType: *types.NewFieldType(mysql.TypeLonglong), }, { - Name: model.NewCIStr("c2"), + Name: pmodel.NewCIStr("c2"), ID: 3, Offset: 2, State: model.StatePublic, diff --git a/pkg/executor/staticrecordset/BUILD.bazel b/pkg/executor/staticrecordset/BUILD.bazel index 24c6d4afd75c9..200f9bb044020 100644 --- a/pkg/executor/staticrecordset/BUILD.bazel +++ b/pkg/executor/staticrecordset/BUILD.bazel @@ -10,7 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/executor/internal/exec", - "//pkg/parser/ast", + "//pkg/planner/core/resolve", "//pkg/session/cursor", "//pkg/util", "//pkg/util/chunk", diff --git a/pkg/executor/staticrecordset/cursorrecordset.go b/pkg/executor/staticrecordset/cursorrecordset.go index 98c87cb8343b7..fefe856a2a2c5 100644 --- a/pkg/executor/staticrecordset/cursorrecordset.go +++ b/pkg/executor/staticrecordset/cursorrecordset.go @@ -17,7 +17,7 @@ package staticrecordset import ( "context" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/session/cursor" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/sqlexec" @@ -32,7 +32,7 @@ type cursorRecordSet struct { recordSet sqlexec.RecordSet } -func (c *cursorRecordSet) Fields() []*ast.ResultField { +func (c *cursorRecordSet) Fields() []*resolve.ResultField { return c.recordSet.Fields() } diff --git a/pkg/executor/staticrecordset/recordset.go b/pkg/executor/staticrecordset/recordset.go index 78cef1a5b5f0c..e8f2e2b066edd 100644 --- a/pkg/executor/staticrecordset/recordset.go +++ b/pkg/executor/staticrecordset/recordset.go @@ -18,7 +18,7 @@ import ( "context" "github.com/pingcap/tidb/pkg/executor/internal/exec" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/logutil" @@ -29,14 +29,14 @@ import ( var _ sqlexec.RecordSet = &staticRecordSet{} type staticRecordSet struct { - fields []*ast.ResultField + fields []*resolve.ResultField executor exec.Executor sqlText string } // New creates a new staticRecordSet -func New(fields []*ast.ResultField, executor exec.Executor, sqlText string) sqlexec.RecordSet { +func New(fields []*resolve.ResultField, executor exec.Executor, sqlText string) sqlexec.RecordSet { return &staticRecordSet{ fields: fields, executor: executor, @@ -44,7 +44,7 @@ func New(fields []*ast.ResultField, executor exec.Executor, sqlText string) sqle } } -func (s *staticRecordSet) Fields() []*ast.ResultField { +func (s *staticRecordSet) Fields() []*resolve.ResultField { return s.fields } diff --git a/pkg/executor/stmtsummary.go b/pkg/executor/stmtsummary.go index fe739c43e3e73..6bea11a1699b4 100644 --- a/pkg/executor/stmtsummary.go +++ b/pkg/executor/stmtsummary.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/executor/table_reader.go b/pkg/executor/table_reader.go index 60c83af717526..0c5e497b9de5a 100644 --- a/pkg/executor/table_reader.go +++ b/pkg/executor/table_reader.go @@ -36,7 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" isctx "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" planctx "github.com/pingcap/tidb/pkg/planner/context" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" diff --git a/pkg/executor/table_readers_required_rows_test.go b/pkg/executor/table_readers_required_rows_test.go index 04ada4e91da20..989b69142e42c 100644 --- a/pkg/executor/table_readers_required_rows_test.go +++ b/pkg/executor/table_readers_required_rows_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" diff --git a/pkg/executor/test/admintest/BUILD.bazel b/pkg/executor/test/admintest/BUILD.bazel index 32e5567cedfcb..f7a58178265aa 100644 --- a/pkg/executor/test/admintest/BUILD.bazel +++ b/pkg/executor/test/admintest/BUILD.bazel @@ -17,6 +17,7 @@ go_test( "//pkg/executor", "//pkg/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/session", "//pkg/sessionctx", diff --git a/pkg/executor/test/admintest/admin_test.go b/pkg/executor/test/admintest/admin_test.go index cee62ffd83f50..fa9eaf3e77281 100644 --- a/pkg/executor/test/admintest/admin_test.go +++ b/pkg/executor/test/admintest/admin_test.go @@ -30,7 +30,8 @@ import ( mysql "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -86,8 +87,8 @@ func TestAdminRecoverIndex(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) @@ -176,8 +177,8 @@ func TestAdminRecoverIndex(t *testing.T) { sctx.Store = store ctx = sctx.GetTableCtx() is = domain.InfoSchema() - dbName = model.NewCIStr("test") - tblName = model.NewCIStr("admin_test") + dbName = pmodel.NewCIStr("test") + tblName = pmodel.NewCIStr("admin_test") tbl, err = is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) @@ -222,8 +223,8 @@ func TestAdminRecoverMVIndex(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("t") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("t") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -267,8 +268,8 @@ func TestAdminCleanupMVIndex(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("t") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("t") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -302,8 +303,8 @@ func TestClusteredIndexAdminRecoverIndex(t *testing.T) { tk.MustExec("create database test_cluster_index_admin_recover;") tk.MustExec("use test_cluster_index_admin_recover;") tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - dbName := model.NewCIStr("test_cluster_index_admin_recover") - tblName := model.NewCIStr("t") + dbName := pmodel.NewCIStr("test_cluster_index_admin_recover") + tblName := pmodel.NewCIStr("t") // Test no corruption case. tk.MustExec("create table t (a varchar(255), b int, c char(10), primary key(a, c), index idx(b), index idx1(c));") @@ -366,8 +367,8 @@ func TestAdminRecoverPartitionTableIndex(t *testing.T) { ctx := mock.NewContext() ctx.Store = store is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) return tbl @@ -435,8 +436,8 @@ func TestAdminRecoverIndex1(t *testing.T) { sctx := mock.NewContext() sctx.Store = store ctx := sctx.GetTableCtx() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly @@ -505,8 +506,8 @@ func TestAdminCleanupIndex(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) @@ -574,8 +575,8 @@ func TestAdminCleanupIndexForPartitionTable(t *testing.T) { ctx := mock.NewContext() ctx.Store = store is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) return tbl @@ -665,8 +666,8 @@ func TestAdminCleanupIndexPKNotHandle(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) @@ -716,8 +717,8 @@ func TestAdminCleanupIndexMore(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) @@ -785,7 +786,7 @@ func TestClusteredAdminCleanupIndex(t *testing.T) { sctx := mock.NewContext() sctx.Store = store ctx := sctx.GetTableCtx() - tbl, err := domain.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("admin_test")) + tbl, err := domain.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("admin_test")) require.NoError(t, err) // cleanup clustered primary key takes no effect. @@ -863,8 +864,8 @@ func TestAdminCheckTableWithMultiValuedIndex(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("t") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("t") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -918,8 +919,8 @@ func TestAdminCheckPartitionTableFailed(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test_p") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test_p") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1051,7 +1052,7 @@ func newInconsistencyKit(t *testing.T, tk *testkit.AsyncTestKit, opt *kitOpt) *i func (tk *inconsistencyTestKit) rebuild() { tk.MustExec(tk.ctx, "truncate table "+tblName) is := domain.GetDomain(testkit.TryRetrieveSession(tk.ctx)).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tblName)) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tblName)) require.NoError(tk.t, err) tk.uniqueIndex = tables.NewIndex(tbl.Meta().ID, tbl.Meta(), tbl.Meta().Indices[0]) tk.plainIndex = tables.NewIndex(tbl.Meta().ID, tbl.Meta(), tbl.Meta().Indices[1]) @@ -1303,8 +1304,8 @@ func TestAdminCheckWithSnapshot(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_t_s") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_t_s") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) @@ -1362,8 +1363,8 @@ func TestAdminCheckTableFailed(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1527,8 +1528,8 @@ func TestAdminCheckTableErrorLocate(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1659,8 +1660,8 @@ func TestAdminCheckTableErrorLocateForClusterIndex(t *testing.T) { sctx.Store = store ctx := sctx.GetTableCtx() is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1794,8 +1795,8 @@ func TestAdminCleanUpGlobalIndex(t *testing.T) { sctx := mock.NewContext() sctx.Store = store is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1839,8 +1840,8 @@ func TestAdminRecoverGlobalIndex(t *testing.T) { sctx := mock.NewContext() sctx.Store = store is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1889,8 +1890,8 @@ func TestAdminCheckGlobalIndex(t *testing.T) { sctx := mock.NewContext() sctx.Store = store is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() @@ -1986,8 +1987,8 @@ func TestAdminCheckGlobalIndexWithClusterIndex(t *testing.T) { sctx := mock.NewContext() sctx.Store = store is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") + dbName := pmodel.NewCIStr("test") + tblName := pmodel.NewCIStr("admin_test") tbl, err := is.TableByName(context.Background(), dbName, tblName) require.NoError(t, err) tblInfo := tbl.Meta() diff --git a/pkg/executor/test/executor/BUILD.bazel b/pkg/executor/test/executor/BUILD.bazel index 22e09810a3cae..09967dbe61a4b 100644 --- a/pkg/executor/test/executor/BUILD.bazel +++ b/pkg/executor/test/executor/BUILD.bazel @@ -21,6 +21,7 @@ go_test( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/executor/test/executor/executor_test.go b/pkg/executor/test/executor/executor_test.go index 7fd91f5840812..004c6bb683fa4 100644 --- a/pkg/executor/test/executor/executor_test.go +++ b/pkg/executor/test/executor/executor_test.go @@ -43,8 +43,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner" @@ -301,11 +302,11 @@ func TestCheckIndex(t *testing.T) { require.NoError(t, err) is := dom.InfoSchema() - db := model.NewCIStr("test_admin") + db := pmodel.NewCIStr("test_admin") dbInfo, ok := is.SchemaByName(db) require.True(t, ok) - tblName := model.NewCIStr("t") + tblName := pmodel.NewCIStr("t") tbl, err := is.TableByName(context.Background(), db, tblName) require.NoError(t, err) tbInfo := tbl.Meta() @@ -421,7 +422,7 @@ func TestTimestampDefaultValueTimeZone(t *testing.T) { // Test the column's version is greater than ColumnInfoVersion1. is := domain.GetDomain(tk.Session()).InfoSchema() require.NotNil(t, is) - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tb.Cols()[1].Version = model.ColumnInfoVersion1 + 1 tk.MustExec("insert into t set a=3") @@ -682,7 +683,7 @@ func TestIssue19148(t *testing.T) { tk.MustExec("create table t(a decimal(16, 2));") tk.MustExec("select * from t where a > any_value(a);") is := domain.GetDomain(tk.Session()).InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) require.Zero(t, tblInfo.Meta().Columns[0].GetFlag()) } @@ -2571,7 +2572,7 @@ func TestAdmin(t *testing.T) { dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() require.NotNil(t, is) - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("admin_test")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("admin_test")) require.NoError(t, err) require.Len(t, tb.Indices(), 1) _, err = tb.Indices()[0].Create(mock.NewContext().GetTableCtx(), txn, types.MakeDatums(int64(10)), kv.IntHandle(1), nil) diff --git a/pkg/executor/test/fktest/BUILD.bazel b/pkg/executor/test/fktest/BUILD.bazel index 721e587b1e71e..c0900fe4f8ac7 100644 --- a/pkg/executor/test/fktest/BUILD.bazel +++ b/pkg/executor/test/fktest/BUILD.bazel @@ -14,6 +14,7 @@ go_test( "//pkg/executor", "//pkg/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", diff --git a/pkg/executor/test/fktest/foreign_key_test.go b/pkg/executor/test/fktest/foreign_key_test.go index 30a10b548bf31..d4893105c7192 100644 --- a/pkg/executor/test/fktest/foreign_key_test.go +++ b/pkg/executor/test/fktest/foreign_key_test.go @@ -26,11 +26,12 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" @@ -1220,8 +1221,8 @@ func TestForeignKeyGenerateCascadeAST(t *testing.T) { {types.NewDatum(2), types.NewDatum("b")}, } cols := []*model.ColumnInfo{ - {ID: 1, Name: model.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeLonglong)}, - {ID: 2, Name: model.NewCIStr("name"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {ID: 1, Name: pmodel.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeLonglong)}, + {ID: 2, Name: pmodel.NewCIStr("name"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, } restoreFn := func(stmt ast.StmtNode) string { var sb strings.Builder @@ -1237,29 +1238,29 @@ func TestForeignKeyGenerateCascadeAST(t *testing.T) { require.Equal(t, restoreFn(expectedStmt), restoreFn(stmt)) } var stmt ast.StmtNode - stmt = executor.GenCascadeDeleteAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr(""), cols, fkValues) + stmt = executor.GenCascadeDeleteAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr(""), cols, fkValues) checkStmtFn(stmt, "delete from test.t2 where (a,name) in ((1,'a'), (2,'b'))") - stmt = executor.GenCascadeDeleteAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr("idx"), cols, fkValues) + stmt = executor.GenCascadeDeleteAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr("idx"), cols, fkValues) checkStmtFn(stmt, "delete from test.t2 use index(idx) where (a,name) in ((1,'a'), (2,'b'))") - stmt = executor.GenCascadeSetNullAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr(""), cols, fkValues) + stmt = executor.GenCascadeSetNullAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr(""), cols, fkValues) checkStmtFn(stmt, "update test.t2 set a = null, name = null where (a,name) in ((1,'a'), (2,'b'))") - stmt = executor.GenCascadeSetNullAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr("idx"), cols, fkValues) + stmt = executor.GenCascadeSetNullAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr("idx"), cols, fkValues) checkStmtFn(stmt, "update test.t2 use index(idx) set a = null, name = null where (a,name) in ((1,'a'), (2,'b'))") newValue1 := []types.Datum{types.NewDatum(10), types.NewDatum("aa")} couple := &executor.UpdatedValuesCouple{ NewValues: newValue1, OldValuesList: fkValues, } - stmt = executor.GenCascadeUpdateAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr(""), cols, couple) + stmt = executor.GenCascadeUpdateAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr(""), cols, couple) checkStmtFn(stmt, "update test.t2 set a = 10, name = 'aa' where (a,name) in ((1,'a'), (2,'b'))") - stmt = executor.GenCascadeUpdateAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr("idx"), cols, couple) + stmt = executor.GenCascadeUpdateAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr("idx"), cols, couple) checkStmtFn(stmt, "update test.t2 use index(idx) set a = 10, name = 'aa' where (a,name) in ((1,'a'), (2,'b'))") // Test for 1 fk column. fkValues = [][]types.Datum{{types.NewDatum(1)}, {types.NewDatum(2)}} - cols = []*model.ColumnInfo{{ID: 1, Name: model.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeLonglong)}} - stmt = executor.GenCascadeDeleteAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr(""), cols, fkValues) + cols = []*model.ColumnInfo{{ID: 1, Name: pmodel.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeLonglong)}} + stmt = executor.GenCascadeDeleteAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr(""), cols, fkValues) checkStmtFn(stmt, "delete from test.t2 where a in (1,2)") - stmt = executor.GenCascadeDeleteAST(model.NewCIStr("test"), model.NewCIStr("t2"), model.NewCIStr("idx"), cols, fkValues) + stmt = executor.GenCascadeDeleteAST(pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), pmodel.NewCIStr("idx"), cols, fkValues) checkStmtFn(stmt, "delete from test.t2 use index(idx) where a in (1,2)") } diff --git a/pkg/executor/test/showtest/BUILD.bazel b/pkg/executor/test/showtest/BUILD.bazel index 7c4bea9975dad..82136b4033b6b 100644 --- a/pkg/executor/test/showtest/BUILD.bazel +++ b/pkg/executor/test/showtest/BUILD.bazel @@ -15,6 +15,7 @@ go_test( "//pkg/executor", "//pkg/infoschema", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/executor/test/showtest/show_test.go b/pkg/executor/test/showtest/show_test.go index 33aaf72484f9a..7d57dbcb33e24 100644 --- a/pkg/executor/test/showtest/show_test.go +++ b/pkg/executor/test/showtest/show_test.go @@ -25,8 +25,9 @@ import ( _ "github.com/pingcap/tidb/pkg/autoid_service" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" parsertypes "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/privilege/privileges" @@ -288,7 +289,7 @@ func TestShowWarningsForExprPushdown(t *testing.T) { // create tiflash replica { is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("show_warnings_expr_pushdown")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("show_warnings_expr_pushdown")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -531,7 +532,7 @@ func TestShow2(t *testing.T) { tk.MustQuery("SHOW FULL TABLES in metrics_schema like 'uptime'").Check(testkit.Rows("uptime SYSTEM VIEW")) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) createTime := model.TSConvert2Time(tblInfo.Meta().UpdateTS).Format(time.DateTime) @@ -674,7 +675,7 @@ func TestUnprivilegedShow(t *testing.T) { tk.Session().Auth(&auth.UserIdentity{Username: "lowprivuser", Hostname: "192.168.0.1", AuthUsername: "lowprivuser", AuthHostname: "%"}, nil, []byte("012345678901234567890"), nil) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("testshow"), model.NewCIStr("t1")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("testshow"), pmodel.NewCIStr("t1")) require.NoError(t, err) createTime := model.TSConvert2Time(tblInfo.Meta().UpdateTS).Format(time.DateTime) diff --git a/pkg/executor/union_scan.go b/pkg/executor/union_scan.go index c1d6a7cc0c9ef..5799ad478093f 100644 --- a/pkg/executor/union_scan.go +++ b/pkg/executor/union_scan.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannerutil "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" diff --git a/pkg/executor/write.go b/pkg/executor/write.go index 56538d29a7c35..a05c1eaac69b7 100644 --- a/pkg/executor/write.go +++ b/pkg/executor/write.go @@ -25,8 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/expression/BUILD.bazel b/pkg/expression/BUILD.bazel index 160913b6abff4..b92787e83f494 100644 --- a/pkg/expression/BUILD.bazel +++ b/pkg/expression/BUILD.bazel @@ -80,6 +80,7 @@ go_library( "//pkg/extension", "//pkg/infoschema/context", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", @@ -206,6 +207,7 @@ go_test( "//pkg/expression/context", "//pkg/expression/contextstatic", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", diff --git a/pkg/expression/column.go b/pkg/expression/column.go index 690e1c9a41932..68f3eb900d416 100644 --- a/pkg/expression/column.go +++ b/pkg/expression/column.go @@ -23,9 +23,9 @@ import ( "github.com/pingcap/errors" exprctx "github.com/pingcap/tidb/pkg/expression/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cascades/base" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/expression/column_test.go b/pkg/expression/column_test.go index df80e6053cdd9..78dcd8c061443 100644 --- a/pkg/expression/column_test.go +++ b/pkg/expression/column_test.go @@ -18,8 +18,9 @@ import ( "fmt" "testing" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cascades/base" "github.com/pingcap/tidb/pkg/types" @@ -134,9 +135,9 @@ func TestColInfo2Col(t *testing.T) { func TestIndexInfo2Cols(t *testing.T) { col0 := &Column{UniqueID: 0, ID: 0, RetType: types.NewFieldType(mysql.TypeLonglong)} col1 := &Column{UniqueID: 1, ID: 1, RetType: types.NewFieldType(mysql.TypeLonglong)} - colInfo0 := &model.ColumnInfo{ID: 0, Name: model.NewCIStr("0")} - colInfo1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("1")} - indexCol0, indexCol1 := &model.IndexColumn{Name: model.NewCIStr("0")}, &model.IndexColumn{Name: model.NewCIStr("1")} + colInfo0 := &model.ColumnInfo{ID: 0, Name: pmodel.NewCIStr("0")} + colInfo1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("1")} + indexCol0, indexCol1 := &model.IndexColumn{Name: pmodel.NewCIStr("0")}, &model.IndexColumn{Name: pmodel.NewCIStr("1")} indexInfo := &model.IndexInfo{Columns: []*model.IndexColumn{indexCol0, indexCol1}} cols := []*Column{col0} diff --git a/pkg/expression/contextopt/BUILD.bazel b/pkg/expression/contextopt/BUILD.bazel index cf4ca98fa02e5..ac7a6457a01c1 100644 --- a/pkg/expression/contextopt/BUILD.bazel +++ b/pkg/expression/contextopt/BUILD.bazel @@ -19,8 +19,8 @@ go_library( "//pkg/expression/context", "//pkg/infoschema/context", "//pkg/kv", - "//pkg/parser/ast", "//pkg/parser/auth", + "//pkg/planner/core/resolve", "//pkg/sessionctx/variable", "//pkg/util/chunk", "//pkg/util/intest", diff --git a/pkg/expression/contextopt/sqlexec.go b/pkg/expression/contextopt/sqlexec.go index 6e623dd8b2d7b..3f3fc0eb4268d 100644 --- a/pkg/expression/contextopt/sqlexec.go +++ b/pkg/expression/contextopt/sqlexec.go @@ -18,7 +18,7 @@ import ( "context" exprctx "github.com/pingcap/tidb/pkg/expression/context" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/sqlexec" ) @@ -34,7 +34,7 @@ type SQLExecutor interface { opts []sqlexec.OptionFuncAlias, sql string, args ...any, - ) ([]chunk.Row, []*ast.ResultField, error) + ) ([]chunk.Row, []*resolve.ResultField, error) } // SQLExecutorPropProvider provides the SQLExecutor diff --git a/pkg/expression/explain.go b/pkg/expression/explain.go index cf43d6291c9fb..6ff136115abd1 100644 --- a/pkg/expression/explain.go +++ b/pkg/expression/explain.go @@ -21,8 +21,8 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/intest" @@ -42,7 +42,7 @@ func (expr *ScalarFunction) explainInfo(ctx EvalContext, normalized bool) string // convert `in(_tidb_tid, -1)` to `in(_tidb_tid, dual)` whether normalized equals to true or false. if expr.FuncName.L == ast.In { args := expr.GetArgs() - if len(args) == 2 && strings.HasSuffix(args[0].ExplainNormalizedInfo(), model.ExtraPhysTblIdName.L) && args[1].(*Constant).Value.GetInt64() == -1 { + if len(args) == 2 && strings.HasSuffix(args[0].ExplainNormalizedInfo(), model.ExtraPhysTblIDName.L) && args[1].(*Constant).Value.GetInt64() == -1 { buffer.WriteString(args[0].ExplainNormalizedInfo() + ", dual)") return buffer.String() } diff --git a/pkg/expression/expression.go b/pkg/expression/expression.go index 56a5bc0122073..07f83c1f06609 100644 --- a/pkg/expression/expression.go +++ b/pkg/expression/expression.go @@ -21,8 +21,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/errctx" exprctx "github.com/pingcap/tidb/pkg/expression/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/parser/terror" @@ -55,7 +56,7 @@ type BuildOptions struct { // InputNames is the input names for expression to build InputNames types.NameSlice // SourceTableDB is the database that the source table located - SourceTableDB model.CIStr + SourceTableDB pmodel.CIStr // SourceTable is used to provide some extra column info. SourceTable *model.TableInfo // AllowCastArray specifies whether to allow casting to an array type. @@ -71,7 +72,7 @@ type BuildOption func(*BuildOptions) // When this option is specified, it will use the table meta to resolve column names. func WithTableInfo(db string, tblInfo *model.TableInfo) BuildOption { return func(options *BuildOptions) { - options.SourceTableDB = model.NewCIStr(db) + options.SourceTableDB = pmodel.NewCIStr(db) options.SourceTable = tblInfo } } @@ -838,7 +839,7 @@ func FlattenCNFConditions(CNFCondition *ScalarFunction) []Expression { type Assignment struct { Col *Column // ColName indicates its original column name in table schema. It's used for outputting helping message when executing meets some errors. - ColName model.CIStr + ColName pmodel.CIStr Expr Expression // LazyErr is used in statement like `INSERT INTO t1 (a) VALUES (1) ON DUPLICATE KEY UPDATE a= (SELECT b FROM source);`, ErrSubqueryMoreThan1Row // should be evaluated after the duplicate situation is detected in the executing procedure. @@ -1006,7 +1007,7 @@ func evaluateExprWithNullInNullRejectCheck(ctx BuildContext, schema *Schema, exp } // TableInfo2SchemaAndNames converts the TableInfo to the schema and name slice. -func TableInfo2SchemaAndNames(ctx BuildContext, dbName model.CIStr, tbl *model.TableInfo) (*Schema, []*types.FieldName, error) { +func TableInfo2SchemaAndNames(ctx BuildContext, dbName pmodel.CIStr, tbl *model.TableInfo) (*Schema, []*types.FieldName, error) { cols, names, err := ColumnInfos2ColumnsAndNames(ctx, dbName, tbl.Name, tbl.Cols(), tbl) if err != nil { return nil, nil, err @@ -1053,7 +1054,7 @@ func TableInfo2SchemaAndNames(ctx BuildContext, dbName model.CIStr, tbl *model.T } // ColumnInfos2ColumnsAndNames converts the ColumnInfo to the *Column and NameSlice. -func ColumnInfos2ColumnsAndNames(ctx BuildContext, dbName, tblName model.CIStr, colInfos []*model.ColumnInfo, tblInfo *model.TableInfo) ([]*Column, types.NameSlice, error) { +func ColumnInfos2ColumnsAndNames(ctx BuildContext, dbName, tblName pmodel.CIStr, colInfos []*model.ColumnInfo, tblInfo *model.TableInfo) ([]*Column, types.NameSlice, error) { columns := make([]*Column, 0, len(colInfos)) names := make([]*types.FieldName, 0, len(colInfos)) for i, col := range colInfos { @@ -1116,7 +1117,7 @@ func NewValuesFunc(ctx BuildContext, offset int, retTp *types.FieldType) *Scalar bt, err := fc.getFunction(ctx, nil) terror.Log(err) return &ScalarFunction{ - FuncName: model.NewCIStr(ast.Values), + FuncName: pmodel.NewCIStr(ast.Values), RetType: retTp, Function: bt, } @@ -1157,12 +1158,12 @@ func wrapWithIsTrue(ctx BuildContext, keepNull bool, arg Expression, wrapForInt return nil, err } sf := &ScalarFunction{ - FuncName: model.NewCIStr(ast.IsTruthWithoutNull), + FuncName: pmodel.NewCIStr(ast.IsTruthWithoutNull), Function: f, RetType: f.getRetTp(), } if keepNull { - sf.FuncName = model.NewCIStr(ast.IsTruthWithNull) + sf.FuncName = pmodel.NewCIStr(ast.IsTruthWithNull) } return FoldConstant(ctx, sf), nil } diff --git a/pkg/expression/expression_test.go b/pkg/expression/expression_test.go index ebac4c1b32256..0f09e7ca2b298 100644 --- a/pkg/expression/expression_test.go +++ b/pkg/expression/expression_test.go @@ -19,8 +19,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression/contextstatic" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" @@ -220,7 +221,7 @@ func (builder *testTableBuilder) add(name string, tp byte, flag uint) *testTable func (builder *testTableBuilder) build() *model.TableInfo { ti := &model.TableInfo{ ID: 1, - Name: model.NewCIStr(builder.tableName), + Name: pmodel.NewCIStr(builder.tableName), State: model.StatePublic, } for i, colName := range builder.columnNames { @@ -235,7 +236,7 @@ func (builder *testTableBuilder) build() *model.TableInfo { fieldType.SetFlag(builder.flags[i]) ti.Columns = append(ti.Columns, &model.ColumnInfo{ ID: int64(i + 1), - Name: model.NewCIStr(colName), + Name: pmodel.NewCIStr(colName), Offset: i, FieldType: *fieldType, State: model.StatePublic, diff --git a/pkg/expression/integration_test/BUILD.bazel b/pkg/expression/integration_test/BUILD.bazel index 8006de9b71aca..42f4b40dd89df 100644 --- a/pkg/expression/integration_test/BUILD.bazel +++ b/pkg/expression/integration_test/BUILD.bazel @@ -15,6 +15,7 @@ go_test( "//pkg/errno", "//pkg/expression", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/expression/integration_test/integration_test.go b/pkg/expression/integration_test/integration_test.go index c70449a4c014a..3e81cdf8bed58 100644 --- a/pkg/expression/integration_test/integration_test.go +++ b/pkg/expression/integration_test/integration_test.go @@ -36,8 +36,9 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" @@ -985,7 +986,7 @@ func TestTiDBDecodeKeyFunc(t *testing.T) { tk.MustExec("create table t (a varchar(255), b int, c datetime, primary key (a, b, c));") dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) getTime := func(year, month, day int, timeType byte) types.Time { ret := types.NewTime(types.FromDate(year, month, day, 0, 0, 0, 0), timeType, types.DefaultFsp) @@ -1016,7 +1017,7 @@ func TestTiDBDecodeKeyFunc(t *testing.T) { tk.MustExec("create table t (a varchar(255), b int, c datetime, index idx(a, b, c));") dom = domain.GetDomain(tk.Session()) is = dom.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) buildIndexKeyFromData := func(tableID, indexID int64, data []types.Datum) string { k, err := codec.EncodeKey(tk.Session().GetSessionVars().StmtCtx.TimeZone(), nil, data...) @@ -1055,7 +1056,7 @@ func TestTiDBDecodeKeyFunc(t *testing.T) { tk.MustExec("create table t (a int primary key nonclustered, b int, key bk (b));") dom = domain.GetDomain(tk.Session()) is = dom.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) buildTableRowKey := func(tableID, rowID int64) string { return hex.EncodeToString( @@ -1074,7 +1075,7 @@ func TestTiDBDecodeKeyFunc(t *testing.T) { tk.MustExec("create table t (a int primary key clustered, b int, key bk (b));") dom = domain.GetDomain(tk.Session()) is = dom.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) hexKey = buildTableRowKey(tbl.Meta().ID, rowID) sql = fmt.Sprintf("select tidb_decode_key( '%s' )", hexKey) @@ -1086,7 +1087,7 @@ func TestTiDBDecodeKeyFunc(t *testing.T) { tk.MustExec("create table t (a int primary key clustered, b int, key bk (b)) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (1), PARTITION p1 VALUES LESS THAN (2));") dom = domain.GetDomain(tk.Session()) is = dom.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) require.NotNil(t, tbl.Meta().Partition) hexKey = buildTableRowKey(tbl.Meta().Partition.Definitions[0].ID, rowID) @@ -1148,7 +1149,7 @@ func TestShardIndexOnTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1184,7 +1185,7 @@ func TestExprPushdownBlacklist(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/expression/schema.go b/pkg/expression/schema.go index 3d915c8434742..c811af42b0d71 100644 --- a/pkg/expression/schema.go +++ b/pkg/expression/schema.go @@ -18,7 +18,7 @@ import ( "strings" "unsafe" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/size" ) diff --git a/pkg/expression/simple_rewriter.go b/pkg/expression/simple_rewriter.go index b83ef99485865..f99d6cb3bf0e8 100644 --- a/pkg/expression/simple_rewriter.go +++ b/pkg/expression/simple_rewriter.go @@ -18,9 +18,9 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/intest" diff --git a/pkg/infoschema/BUILD.bazel b/pkg/infoschema/BUILD.bazel index 91898cf13180a..ede4fc100c553 100644 --- a/pkg/infoschema/BUILD.bazel +++ b/pkg/infoschema/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/auth", "//pkg/parser/charset", @@ -99,6 +100,7 @@ go_test( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx/variable", diff --git a/pkg/infoschema/builder.go b/pkg/infoschema/builder.go index ca1f0ccebb102..302f8c016f48c 100644 --- a/pkg/infoschema/builder.go +++ b/pkg/infoschema/builder.go @@ -29,8 +29,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" @@ -444,14 +445,14 @@ func (b *Builder) applyTableUpdate(m *meta.Meta, diff *model.SchemaDiff) ([]int6 func getKeptAllocators(diff *model.SchemaDiff, oldAllocs autoid.Allocators) autoid.Allocators { var autoIDChanged, autoRandomChanged bool switch diff.Type { - case model.ActionRebaseAutoID, model.ActionModifyTableAutoIdCache: + case model.ActionRebaseAutoID, model.ActionModifyTableAutoIDCache: autoIDChanged = true case model.ActionRebaseAutoRandomBase: autoRandomChanged = true case model.ActionMultiSchemaChange: for _, t := range diff.SubActionTypes { switch t { - case model.ActionRebaseAutoID, model.ActionModifyTableAutoIdCache: + case model.ActionRebaseAutoID, model.ActionModifyTableAutoIDCache: autoIDChanged = true case model.ActionRebaseAutoRandomBase: autoRandomChanged = true @@ -620,8 +621,8 @@ func (b *Builder) buildAllocsForCreateTable(tp model.ActionType, dbInfo *model.D if len(allocs.Allocs) != 0 { tblVer := autoid.AllocOptionTableInfoVersion(tblInfo.Version) switch tp { - case model.ActionRebaseAutoID, model.ActionModifyTableAutoIdCache: - idCacheOpt := autoid.CustomAutoIncCacheOption(tblInfo.AutoIdCache) + case model.ActionRebaseAutoID, model.ActionModifyTableAutoIDCache: + idCacheOpt := autoid.CustomAutoIncCacheOption(tblInfo.AutoIDCache) // If the allocator type might be AutoIncrementType, create both AutoIncrementType // and RowIDAllocType allocator for it. Because auto id and row id could share the same allocator. // Allocate auto id may route to allocate row id, if row id allocator is nil, the program panic! @@ -956,7 +957,7 @@ func (b *Builder) createSchemaTablesForDB(di *model.DBInfo, tableFromMeta tableF item := tableItem{ dbName: di.Name, dbID: di.ID, - tableName: model.NewCIStr(name), + tableName: pmodel.NewCIStr(name), tableID: id, schemaVersion: schemaVersion, } diff --git a/pkg/infoschema/builder_misc.go b/pkg/infoschema/builder_misc.go index f3c10ffc57a3b..f4adc1d4bc3a1 100644 --- a/pkg/infoschema/builder_misc.go +++ b/pkg/infoschema/builder_misc.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) func applyCreatePolicy(b *Builder, m *meta.Meta, diff *model.SchemaDiff) error { diff --git a/pkg/infoschema/builder_test.go b/pkg/infoschema/builder_test.go index cd9d34c45bcfd..55dd5cc51f036 100644 --- a/pkg/infoschema/builder_test.go +++ b/pkg/infoschema/builder_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/stretchr/testify/require" ) @@ -57,7 +57,7 @@ func TestGetKeptAllocators(t *testing.T) { expected: []autoid.AllocatorType{autoid.AutoRandomType}, }, { - diff: &model.SchemaDiff{Type: model.ActionModifyTableAutoIdCache}, + diff: &model.SchemaDiff{Type: model.ActionModifyTableAutoIDCache}, expected: []autoid.AllocatorType{autoid.AutoRandomType}, }, { @@ -71,7 +71,7 @@ func TestGetKeptAllocators(t *testing.T) { }, { diff: &model.SchemaDiff{Type: model.ActionMultiSchemaChange, - SubActionTypes: []model.ActionType{model.ActionModifyTableAutoIdCache}}, + SubActionTypes: []model.ActionType{model.ActionModifyTableAutoIDCache}}, expected: []autoid.AllocatorType{autoid.AutoRandomType}, }, { diff --git a/pkg/infoschema/bundle_builder.go b/pkg/infoschema/bundle_builder.go index a54ffc119357d..512d8c9431860 100644 --- a/pkg/infoschema/bundle_builder.go +++ b/pkg/infoschema/bundle_builder.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl/placement" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" ) diff --git a/pkg/infoschema/context/BUILD.bazel b/pkg/infoschema/context/BUILD.bazel index 8e7c9dcb10509..32f386dd6deea 100644 --- a/pkg/infoschema/context/BUILD.bazel +++ b/pkg/infoschema/context/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/ddl/placement", + "//pkg/meta/model", "//pkg/parser/model", ], ) diff --git a/pkg/infoschema/context/infoschema.go b/pkg/infoschema/context/infoschema.go index e808da697c9de..d3da8ae3aeb26 100644 --- a/pkg/infoschema/context/infoschema.go +++ b/pkg/infoschema/context/infoschema.go @@ -18,7 +18,8 @@ import ( stdctx "context" "github.com/pingcap/tidb/pkg/ddl/placement" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" ) // MetaOnlyInfoSchema is a workaround. @@ -26,29 +27,29 @@ import ( // But MetaOnlyInfoSchema is widely used for scenes that require meta only, so we give a convenience for that. type MetaOnlyInfoSchema interface { SchemaMetaVersion() int64 - SchemaByName(schema model.CIStr) (*model.DBInfo, bool) - SchemaExists(schema model.CIStr) bool - TableInfoByName(schema, table model.CIStr) (*model.TableInfo, error) + SchemaByName(schema pmodel.CIStr) (*model.DBInfo, bool) + SchemaExists(schema pmodel.CIStr) bool + TableInfoByName(schema, table pmodel.CIStr) (*model.TableInfo, error) TableInfoByID(id int64) (*model.TableInfo, bool) FindTableInfoByPartitionID(partitionID int64) (*model.TableInfo, *model.DBInfo, *model.PartitionDefinition) - TableExists(schema, table model.CIStr) bool + TableExists(schema, table pmodel.CIStr) bool SchemaByID(id int64) (*model.DBInfo, bool) SchemaAndTable - AllSchemaNames() []model.CIStr - SchemaSimpleTableInfos(ctx stdctx.Context, schema model.CIStr) ([]*model.TableNameInfo, error) + AllSchemaNames() []pmodel.CIStr + SchemaSimpleTableInfos(ctx stdctx.Context, schema pmodel.CIStr) ([]*model.TableNameInfo, error) Misc } // SchemaAndTable is define for iterating all the schemas and tables in the infoschema. type SchemaAndTable interface { AllSchemas() []*model.DBInfo - SchemaTableInfos(ctx stdctx.Context, schema model.CIStr) ([]*model.TableInfo, error) + SchemaTableInfos(ctx stdctx.Context, schema pmodel.CIStr) ([]*model.TableInfo, error) } // Misc contains the methods that are not closely related to InfoSchema. type Misc interface { - PolicyByName(name model.CIStr) (*model.PolicyInfo, bool) - ResourceGroupByName(name model.CIStr) (*model.ResourceGroupInfo, bool) + PolicyByName(name pmodel.CIStr) (*model.PolicyInfo, bool) + ResourceGroupByName(name pmodel.CIStr) (*model.ResourceGroupInfo, bool) // PlacementBundleByPhysicalTableID is used to get a rule bundle. PlacementBundleByPhysicalTableID(id int64) (*placement.Bundle, bool) // AllPlacementBundles is used to get all placement bundles @@ -76,7 +77,7 @@ func (d DBInfoAsInfoSchema) AllSchemas() []*model.DBInfo { } // SchemaTableInfos implement infoschema.SchemaAndTable interface. -func (d DBInfoAsInfoSchema) SchemaTableInfos(ctx stdctx.Context, schema model.CIStr) ([]*model.TableInfo, error) { +func (d DBInfoAsInfoSchema) SchemaTableInfos(ctx stdctx.Context, schema pmodel.CIStr) ([]*model.TableInfo, error) { for _, db := range d { if db.Name == schema { return db.Deprecated.Tables, nil diff --git a/pkg/infoschema/infoschema.go b/pkg/infoschema/infoschema.go index 9051f0236608b..2f7b611c05f13 100644 --- a/pkg/infoschema/infoschema.go +++ b/pkg/infoschema/infoschema.go @@ -26,7 +26,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/placement" "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/table" @@ -100,7 +101,7 @@ type SchemaAndTableName struct { // MockInfoSchema only serves for test. func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { result := newInfoSchema() - dbInfo := &model.DBInfo{ID: 1, Name: model.NewCIStr("test")} + dbInfo := &model.DBInfo{ID: 1, Name: pmodel.NewCIStr("test")} dbInfo.Deprecated.Tables = tbList tableNames := &schemaTables{ dbInfo: dbInfo, @@ -129,19 +130,19 @@ func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { { // Use a very big ID to avoid conflict with normal tables. ID: 9999, - Name: model.NewCIStr("stats_meta"), + Name: pmodel.NewCIStr("stats_meta"), Columns: []*model.ColumnInfo{ { State: model.StatePublic, Offset: 0, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), ID: 1, }, }, State: model.StatePublic, }, } - mysqlDBInfo := &model.DBInfo{ID: 2, Name: model.NewCIStr("mysql")} + mysqlDBInfo := &model.DBInfo{ID: 2, Name: pmodel.NewCIStr("mysql")} mysqlDBInfo.Deprecated.Tables = tables tableNames = &schemaTables{ dbInfo: mysqlDBInfo, @@ -166,7 +167,7 @@ func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { // MockInfoSchemaWithSchemaVer only serves for test. func MockInfoSchemaWithSchemaVer(tbList []*model.TableInfo, schemaVer int64) InfoSchema { result := newInfoSchema() - dbInfo := &model.DBInfo{ID: 1, Name: model.NewCIStr("test")} + dbInfo := &model.DBInfo{ID: 1, Name: pmodel.NewCIStr("test")} dbInfo.Deprecated.Tables = tbList tableNames := &schemaTables{ dbInfo: dbInfo, @@ -209,7 +210,7 @@ func newInfoSchema() *infoSchema { } } -func (is *infoSchema) SchemaByName(schema model.CIStr) (val *model.DBInfo, ok bool) { +func (is *infoSchema) SchemaByName(schema pmodel.CIStr) (val *model.DBInfo, ok bool) { return is.schemaByName(schema.L) } @@ -221,12 +222,12 @@ func (is *infoSchema) schemaByName(name string) (val *model.DBInfo, ok bool) { return tableNames.dbInfo, true } -func (is *infoSchema) SchemaExists(schema model.CIStr) bool { +func (is *infoSchema) SchemaExists(schema pmodel.CIStr) bool { _, ok := is.schemaMap[schema.L] return ok } -func (is *infoSchema) TableByName(ctx stdctx.Context, schema, table model.CIStr) (t table.Table, err error) { +func (is *infoSchema) TableByName(ctx stdctx.Context, schema, table pmodel.CIStr) (t table.Table, err error) { if tbNames, ok := is.schemaMap[schema.L]; ok { if t, ok = tbNames.tables[table.L]; ok { return @@ -236,13 +237,13 @@ func (is *infoSchema) TableByName(ctx stdctx.Context, schema, table model.CIStr) } // TableInfoByName implements InfoSchema.TableInfoByName -func (is *infoSchema) TableInfoByName(schema, table model.CIStr) (*model.TableInfo, error) { +func (is *infoSchema) TableInfoByName(schema, table pmodel.CIStr) (*model.TableInfo, error) { tbl, err := is.TableByName(stdctx.Background(), schema, table) return getTableInfo(tbl), err } // TableIsView indicates whether the schema.table is a view. -func TableIsView(is InfoSchema, schema, table model.CIStr) bool { +func TableIsView(is InfoSchema, schema, table pmodel.CIStr) bool { tbl, err := is.TableByName(stdctx.Background(), schema, table) if err == nil { return tbl.Meta().IsView() @@ -251,7 +252,7 @@ func TableIsView(is InfoSchema, schema, table model.CIStr) bool { } // TableIsSequence indicates whether the schema.table is a sequence. -func TableIsSequence(is InfoSchema, schema, table model.CIStr) bool { +func TableIsSequence(is InfoSchema, schema, table pmodel.CIStr) bool { tbl, err := is.TableByName(stdctx.Background(), schema, table) if err == nil { return tbl.Meta().IsSequence() @@ -259,7 +260,7 @@ func TableIsSequence(is InfoSchema, schema, table model.CIStr) bool { return false } -func (is *infoSchema) TableExists(schema, table model.CIStr) bool { +func (is *infoSchema) TableExists(schema, table pmodel.CIStr) bool { if tbNames, ok := is.schemaMap[schema.L]; ok { if _, ok = tbNames.tables[table.L]; ok { return true @@ -322,7 +323,7 @@ func (is *infoSchema) FindTableInfoByPartitionID( } // SchemaTableInfos implements MetaOnlyInfoSchema. -func (is *infoSchema) SchemaTableInfos(ctx stdctx.Context, schema model.CIStr) ([]*model.TableInfo, error) { +func (is *infoSchema) SchemaTableInfos(ctx stdctx.Context, schema pmodel.CIStr) ([]*model.TableInfo, error) { schemaTables, ok := is.schemaMap[schema.L] if !ok { return nil, nil @@ -335,7 +336,7 @@ func (is *infoSchema) SchemaTableInfos(ctx stdctx.Context, schema model.CIStr) ( } // SchemaSimpleTableInfos implements MetaOnlyInfoSchema. -func (is *infoSchema) SchemaSimpleTableInfos(ctx stdctx.Context, schema model.CIStr) ([]*model.TableNameInfo, error) { +func (is *infoSchema) SchemaSimpleTableInfos(ctx stdctx.Context, schema pmodel.CIStr) ([]*model.TableNameInfo, error) { schemaTables, ok := is.schemaMap[schema.L] if !ok { return nil, nil @@ -351,7 +352,7 @@ func (is *infoSchema) SchemaSimpleTableInfos(ctx stdctx.Context, schema model.CI } type tableInfoResult struct { - DBName model.CIStr + DBName pmodel.CIStr TableInfos []*model.TableInfo } @@ -388,8 +389,8 @@ func (is *infoSchema) AllSchemas() (schemas []*model.DBInfo) { return } -func (is *infoSchema) AllSchemaNames() (schemas []model.CIStr) { - rs := make([]model.CIStr, 0, len(is.schemaMap)) +func (is *infoSchema) AllSchemaNames() (schemas []pmodel.CIStr) { + rs := make([]pmodel.CIStr, 0, len(is.schemaMap)) for _, v := range is.schemaMap { rs = append(rs, v.dbInfo.Name) } @@ -437,7 +438,7 @@ func (is *infoSchemaMisc) SchemaMetaVersion() int64 { } // GetSequenceByName gets the sequence by name. -func GetSequenceByName(is InfoSchema, schema, sequence model.CIStr) (util.SequenceTable, error) { +func GetSequenceByName(is InfoSchema, schema, sequence pmodel.CIStr) (util.SequenceTable, error) { tbl, err := is.TableByName(stdctx.Background(), schema, sequence) if err != nil { return nil, err @@ -475,7 +476,7 @@ func init() { } infoSchemaDB.Deprecated.Tables = infoSchemaTables RegisterVirtualTable(infoSchemaDB, createInfoSchemaTable) - util.GetSequenceByName = func(is context.MetaOnlyInfoSchema, schema, sequence model.CIStr) (util.SequenceTable, error) { + util.GetSequenceByName = func(is context.MetaOnlyInfoSchema, schema, sequence pmodel.CIStr) (util.SequenceTable, error) { return GetSequenceByName(is.(InfoSchema), schema, sequence) } mock.MockInfoschema = func(tbList []*model.TableInfo) context.MetaOnlyInfoSchema { @@ -494,7 +495,7 @@ func HasAutoIncrementColumn(tbInfo *model.TableInfo) (bool, string) { } // PolicyByName is used to find the policy. -func (is *infoSchemaMisc) PolicyByName(name model.CIStr) (*model.PolicyInfo, bool) { +func (is *infoSchemaMisc) PolicyByName(name pmodel.CIStr) (*model.PolicyInfo, bool) { is.policyMutex.RLock() defer is.policyMutex.RUnlock() t, r := is.policyMap[name.L] @@ -502,7 +503,7 @@ func (is *infoSchemaMisc) PolicyByName(name model.CIStr) (*model.PolicyInfo, boo } // ResourceGroupByName is used to find the resource group. -func (is *infoSchemaMisc) ResourceGroupByName(name model.CIStr) (*model.ResourceGroupInfo, bool) { +func (is *infoSchemaMisc) ResourceGroupByName(name pmodel.CIStr) (*model.ResourceGroupInfo, bool) { is.resourceGroupMutex.RLock() defer is.resourceGroupMutex.RUnlock() t, r := is.resourceGroupMap[name.L] @@ -592,7 +593,7 @@ func (is *infoSchemaMisc) deletePolicy(name string) { delete(is.policyMap, name) } -func (is *infoSchemaMisc) addReferredForeignKeys(schema model.CIStr, tbInfo *model.TableInfo) { +func (is *infoSchemaMisc) addReferredForeignKeys(schema pmodel.CIStr, tbInfo *model.TableInfo) { for _, fk := range tbInfo.ForeignKeys { if fk.Version < model.FKVersion1 { continue @@ -632,7 +633,7 @@ func (is *infoSchemaMisc) addReferredForeignKeys(schema model.CIStr, tbInfo *mod } } -func (is *infoSchemaMisc) deleteReferredForeignKeys(schema model.CIStr, tbInfo *model.TableInfo) { +func (is *infoSchemaMisc) deleteReferredForeignKeys(schema pmodel.CIStr, tbInfo *model.TableInfo) { for _, fk := range tbInfo.ForeignKeys { if fk.Version < model.FKVersion1 { continue @@ -677,7 +678,7 @@ func NewSessionTables() *SessionTables { } // TableByName get table by name -func (is *SessionTables) TableByName(ctx stdctx.Context, schema, table model.CIStr) (table.Table, bool) { +func (is *SessionTables) TableByName(ctx stdctx.Context, schema, table pmodel.CIStr) (table.Table, bool) { if tbNames, ok := is.schemaMap[schema.L]; ok { if t, ok := tbNames.tables[table.L]; ok { return t, true @@ -687,7 +688,7 @@ func (is *SessionTables) TableByName(ctx stdctx.Context, schema, table model.CIS } // TableExists check if table with the name exists -func (is *SessionTables) TableExists(schema, table model.CIStr) (ok bool) { +func (is *SessionTables) TableExists(schema, table pmodel.CIStr) (ok bool) { _, ok = is.TableByName(stdctx.Background(), schema, table) return } @@ -718,7 +719,7 @@ func (is *SessionTables) AddTable(db *model.DBInfo, tbl table.Table) error { } // RemoveTable remove a table -func (is *SessionTables) RemoveTable(schema, table model.CIStr) (exist bool) { +func (is *SessionTables) RemoveTable(schema, table pmodel.CIStr) (exist bool) { tbls := is.schemaTables(schema) if tbls == nil { return false @@ -763,7 +764,7 @@ func (is *SessionTables) ensureSchema(db *model.DBInfo) *schemaTables { return tbls } -func (is *SessionTables) schemaTables(schema model.CIStr) *schemaTables { +func (is *SessionTables) schemaTables(schema pmodel.CIStr) *schemaTables { if is.schemaMap == nil { return nil } @@ -786,7 +787,7 @@ type SessionExtendedInfoSchema struct { } // TableByName implements InfoSchema.TableByName -func (ts *SessionExtendedInfoSchema) TableByName(ctx stdctx.Context, schema, table model.CIStr) (table.Table, error) { +func (ts *SessionExtendedInfoSchema) TableByName(ctx stdctx.Context, schema, table pmodel.CIStr) (table.Table, error) { if ts.LocalTemporaryTables != nil { if tbl, ok := ts.LocalTemporaryTables.TableByName(ctx, schema, table); ok { return tbl, nil @@ -803,7 +804,7 @@ func (ts *SessionExtendedInfoSchema) TableByName(ctx stdctx.Context, schema, tab } // TableInfoByName implements InfoSchema.TableInfoByName -func (ts *SessionExtendedInfoSchema) TableInfoByName(schema, table model.CIStr) (*model.TableInfo, error) { +func (ts *SessionExtendedInfoSchema) TableInfoByName(schema, table pmodel.CIStr) (*model.TableInfo, error) { tbl, err := ts.TableByName(stdctx.Background(), schema, table) return getTableInfo(tbl), err } diff --git a/pkg/infoschema/infoschema_nokit_test.go b/pkg/infoschema/infoschema_nokit_test.go index add60b0eb6c3e..35aefe7961680 100644 --- a/pkg/infoschema/infoschema_nokit_test.go +++ b/pkg/infoschema/infoschema_nokit_test.go @@ -17,7 +17,8 @@ package infoschema import ( "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -31,11 +32,11 @@ func (is *infoschemaV2) HasCache(tableID int64, schemaVersion int64) bool { func TestInfoSchemaAddDel(t *testing.T) { is := newInfoSchema() is.addSchema(&schemaTables{ - dbInfo: &model.DBInfo{ID: 1, Name: model.NewCIStr("test")}, + dbInfo: &model.DBInfo{ID: 1, Name: pmodel.NewCIStr("test")}, }) require.Contains(t, is.schemaMap, "test") require.Contains(t, is.schemaID2Name, int64(1)) - is.delSchema(&model.DBInfo{ID: 1, Name: model.NewCIStr("test")}) + is.delSchema(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("test")}) require.Empty(t, is.schemaMap) require.Empty(t, is.schemaID2Name) } diff --git a/pkg/infoschema/infoschema_test.go b/pkg/infoschema/infoschema_test.go index 226fd33949244..d79af4fc72e1e 100644 --- a/pkg/infoschema/infoschema_test.go +++ b/pkg/infoschema/infoschema_test.go @@ -30,7 +30,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" @@ -48,11 +49,11 @@ func TestBasic(t *testing.T) { require.NoError(t, err) }() - dbName := model.NewCIStr("Test") - tbName := model.NewCIStr("T") - colName := model.NewCIStr("A") - idxName := model.NewCIStr("idx") - noexist := model.NewCIStr("noexist") + dbName := pmodel.NewCIStr("Test") + tbName := pmodel.NewCIStr("T") + colName := pmodel.NewCIStr("A") + idxName := pmodel.NewCIStr("idx") + noexist := pmodel.NewCIStr("noexist") colID, err := internal.GenGlobalID(re.Store()) require.NoError(t, err) @@ -220,7 +221,7 @@ func TestBasic(t *testing.T) { require.Len(t, tblInfos, 0) // Make sure partitions table exists - tb, err = is.TableByName(context.Background(), model.NewCIStr("information_schema"), model.NewCIStr("partitions")) + tb, err = is.TableByName(context.Background(), pmodel.NewCIStr("information_schema"), pmodel.NewCIStr("partitions")) require.NoError(t, err) require.NotNil(t, tb) @@ -247,7 +248,7 @@ func TestBasic(t *testing.T) { func TestMockInfoSchema(t *testing.T) { tblID := int64(1234) - tblName := model.NewCIStr("tbl_m") + tblName := pmodel.NewCIStr("tbl_m") tableInfo := &model.TableInfo{ ID: tblID, Name: tblName, @@ -256,7 +257,7 @@ func TestMockInfoSchema(t *testing.T) { colInfo := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, - Name: model.NewCIStr("h"), + Name: pmodel.NewCIStr("h"), FieldType: *types.NewFieldType(mysql.TypeLong), ID: 1, } @@ -333,7 +334,7 @@ func TestInfoTables(t *testing.T) { "RESOURCE_GROUPS", } for _, tbl := range infoTables { - tb, err1 := is.TableByName(context.Background(), util.InformationSchemaName, model.NewCIStr(tbl)) + tb, err1 := is.TableByName(context.Background(), util.InformationSchemaName, pmodel.NewCIStr(tbl)) require.Nil(t, err1) require.NotNil(t, tb) } @@ -348,7 +349,7 @@ func TestBuildSchemaWithGlobalTemporaryTable(t *testing.T) { dbInfo := &model.DBInfo{ ID: 1, - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), State: model.StatePublic, } dbInfo.Deprecated.Tables = []*model.TableInfo{} @@ -359,7 +360,7 @@ func TestBuildSchemaWithGlobalTemporaryTable(t *testing.T) { require.NoError(t, err) is := builder.Build(math.MaxUint64) require.False(t, is.HasTemporaryTable()) - db, ok := is.SchemaByName(model.NewCIStr("test")) + db, ok := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, ok) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) err = kv.RunInNewTxn(ctx, re.Store(), true, func(ctx context.Context, txn kv.Transaction) error { @@ -454,7 +455,7 @@ func TestBuildSchemaWithGlobalTemporaryTable(t *testing.T) { // full load data = infoschema.NewData() - newDB, ok := newIS.SchemaByName(model.NewCIStr("test")) + newDB, ok := newIS.SchemaByName(pmodel.NewCIStr("test")) tblInfos, err := newIS.SchemaTableInfos(context.Background(), newDB.Name) require.NoError(t, err) newDB.Deprecated.Tables = tblInfos @@ -534,13 +535,13 @@ func TestBuildBundle(t *testing.T) { }() is := domain.GetDomain(tk.Session()).InfoSchema() - db, ok := is.SchemaByName(model.NewCIStr("test")) + db, ok := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, ok) - tbl1, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) - tbl2, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) var p1 model.PartitionDefinition @@ -608,7 +609,7 @@ func TestWithRefillOption(t *testing.T) { tk.MustExec("create table t1 (id int)") tk.MustQuery("select * from t1").Check(testkit.Rows()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tblInfo := tbl.Meta() ok, v2 := infoschema.IsV2(is) @@ -631,7 +632,7 @@ func TestWithRefillOption(t *testing.T) { for i, testCase := range testCases { // Mock t1 schema cache been evicted. - v2.EvictTable(model.NewCIStr("test"), model.NewCIStr("t1")) + v2.EvictTable(pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) // Test the API switch testCase.OP { @@ -639,7 +640,7 @@ func TestWithRefillOption(t *testing.T) { _, found := is.TableByID(testCase.ctx, tblInfo.ID) require.True(t, found) case "TableByName": - _, err := is.TableByName(testCase.ctx, model.NewCIStr("test"), model.NewCIStr("t1")) + _, err := is.TableByName(testCase.ctx, pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) } @@ -661,7 +662,7 @@ func TestLocalTemporaryTables(t *testing.T) { require.NoError(t, err) return &model.DBInfo{ ID: schemaID, - Name: model.NewCIStr(schemaName), + Name: pmodel.NewCIStr(schemaName), State: model.StatePublic, } } @@ -672,7 +673,7 @@ func TestLocalTemporaryTables(t *testing.T) { colInfo := &model.ColumnInfo{ ID: colID, - Name: model.NewCIStr("col1"), + Name: pmodel.NewCIStr("col1"), Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, @@ -683,7 +684,7 @@ func TestLocalTemporaryTables(t *testing.T) { tblInfo := &model.TableInfo{ ID: tbID, - Name: model.NewCIStr(tbName), + Name: pmodel.NewCIStr(tbName), Columns: []*model.ColumnInfo{colInfo}, Indices: []*model.IndexInfo{}, State: model.StatePublic, @@ -698,7 +699,7 @@ func TestLocalTemporaryTables(t *testing.T) { } assertTableByName := func(sc *infoschema.SessionTables, schemaName, tableName string, schema *model.DBInfo, tb table.Table) { - got, ok := sc.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr(tableName)) + got, ok := sc.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr(tableName)) if tb == nil { require.Nil(t, schema) require.False(t, ok) @@ -711,7 +712,7 @@ func TestLocalTemporaryTables(t *testing.T) { } assertTableExists := func(sc *infoschema.SessionTables, schemaName, tableName string, exists bool) { - got := sc.TableExists(model.NewCIStr(schemaName), model.NewCIStr(tableName)) + got := sc.TableExists(pmodel.NewCIStr(schemaName), pmodel.NewCIStr(tableName)) require.Equal(t, exists, got) } @@ -816,11 +817,11 @@ func TestLocalTemporaryTables(t *testing.T) { assertTableByName(sc, db1.Name.L, tb11.Meta().Name.L, db1, tb11) // delete some tables - require.True(t, sc.RemoveTable(model.NewCIStr("db1"), model.NewCIStr("tb1"))) - require.True(t, sc.RemoveTable(model.NewCIStr("Db2"), model.NewCIStr("tB2"))) + require.True(t, sc.RemoveTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("tb1"))) + require.True(t, sc.RemoveTable(pmodel.NewCIStr("Db2"), pmodel.NewCIStr("tB2"))) tb22.Meta().DBID = 0 // SchemaByTable will get incorrect result if not reset here. - require.False(t, sc.RemoveTable(model.NewCIStr("db1"), model.NewCIStr("tbx"))) - require.False(t, sc.RemoveTable(model.NewCIStr("dbx"), model.NewCIStr("tbx"))) + require.False(t, sc.RemoveTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("tbx"))) + require.False(t, sc.RemoveTable(pmodel.NewCIStr("dbx"), pmodel.NewCIStr("tbx"))) // test non exist tables by name for _, c := range []struct{ dbName, tbName string }{ @@ -973,14 +974,14 @@ func TestInfoSchemaCreateTableLike(t *testing.T) { tk.MustExec("create table t1 like information_schema.variables_info;") tk.MustExec("alter table t1 add column c varchar(32);") is := domain.GetDomain(tk.Session()).InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tblInfo := tbl.Meta() require.Equal(t, tblInfo.Columns[8].Name.O, "c") require.Equal(t, tblInfo.Columns[8].ID, int64(9)) tk.MustExec("alter table t1 add index idx(c);") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tblInfo = tbl.Meta() require.Equal(t, tblInfo.Indices[0].Name.O, "idx") @@ -990,14 +991,14 @@ func TestInfoSchemaCreateTableLike(t *testing.T) { tk.MustExec("create table t2 like metrics_schema.up;") tk.MustExec("alter table t2 add column c varchar(32);") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tblInfo = tbl.Meta() require.Equal(t, tblInfo.Columns[4].Name.O, "c") require.Equal(t, tblInfo.Columns[4].ID, int64(5)) tk.MustExec("alter table t2 add index idx(c);") is = domain.GetDomain(tk.Session()).InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tblInfo = tbl.Meta() require.Equal(t, tblInfo.Indices[0].Name.O, "idx") @@ -1166,7 +1167,7 @@ func (tc *infoschemaTestContext) runModifyTable(tblName string, tp model.ActionT } func (tc *infoschemaTestContext) runAddColumn(tblName string) { - tbl, err := tc.is.TableByName(context.Background(), tc.dbInfo.Name, model.NewCIStr(tblName)) + tbl, err := tc.is.TableByName(context.Background(), tc.dbInfo.Name, pmodel.NewCIStr(tblName)) require.NoError(tc.t, err) tc.addColumn(tbl.Meta()) @@ -1178,7 +1179,7 @@ func (tc *infoschemaTestContext) runAddColumn(tblName string) { } func (tc *infoschemaTestContext) addColumn(tblInfo *model.TableInfo) { - colName := model.NewCIStr("b") + colName := pmodel.NewCIStr("b") colID, err := internal.GenGlobalID(tc.re.Store()) require.NoError(tc.t, err) colInfo := &model.ColumnInfo{ @@ -1199,7 +1200,7 @@ func (tc *infoschemaTestContext) addColumn(tblInfo *model.TableInfo) { } func (tc *infoschemaTestContext) runModifyColumn(tblName string) { - tbl, err := tc.is.TableByName(context.Background(), tc.dbInfo.Name, model.NewCIStr(tblName)) + tbl, err := tc.is.TableByName(context.Background(), tc.dbInfo.Name, pmodel.NewCIStr(tblName)) require.NoError(tc.t, err) tc.modifyColumn(tbl.Meta()) @@ -1300,7 +1301,7 @@ func TestApplyDiff(t *testing.T) { tc.runModifySchemaCharsetAndCollate("utf8mb4", "utf8mb4_general_ci") tc.runModifySchemaCharsetAndCollate("utf8", "utf8_unicode_ci") tc.runModifySchemaDefaultPlacement(&model.PolicyRefInfo{ - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), }) tc.runCreateTables([]string{"test1", "test2"}) } diff --git a/pkg/infoschema/infoschema_v2.go b/pkg/infoschema/infoschema_v2.go index f61b7eaa90383..62667679f980b 100644 --- a/pkg/infoschema/infoschema_v2.go +++ b/pkg/infoschema/infoschema_v2.go @@ -29,8 +29,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util" @@ -42,9 +43,9 @@ import ( // tableItem is the btree item sorted by name or by id. type tableItem struct { - dbName model.CIStr + dbName pmodel.CIStr dbID int64 - tableName model.CIStr + tableName pmodel.CIStr tableID int64 schemaVersion int64 tomb bool @@ -59,7 +60,7 @@ type schemaItem struct { type schemaIDName struct { schemaVersion int64 id int64 - name model.CIStr + name pmodel.CIStr tomb bool } @@ -123,7 +124,7 @@ type Data struct { } type tableInfoItem struct { - dbName model.CIStr + dbName pmodel.CIStr tableID int64 schemaVersion int64 tableInfo *model.TableInfo @@ -647,8 +648,8 @@ func (is *infoschemaV2) TableByID(ctx context.Context, id int64) (val table.Tabl // TableItem is exported from tableItem. type TableItem struct { - DBName model.CIStr - TableName model.CIStr + DBName pmodel.CIStr + TableName pmodel.CIStr } // IterateAllTableItems is used for special performance optimization. @@ -681,7 +682,7 @@ func IsSpecialDB(dbName string) bool { } // EvictTable is exported for testing only. -func (is *infoschemaV2) EvictTable(schema, tbl model.CIStr) { +func (is *infoschemaV2) EvictTable(schema, tbl pmodel.CIStr) { eq := func(a, b *tableItem) bool { return a.dbName == b.dbName && a.tableName == b.tableName } itm, ok := search(is.byName, is.infoSchema.schemaMetaVersion, tableItem{dbName: schema, tableName: tbl, schemaVersion: math.MaxInt64}, eq) if !ok { @@ -715,7 +716,7 @@ func (h *tableByNameHelper) onItem(item tableItem) bool { // TableByName implements the InfoSchema interface. // When schema cache miss, it will fetch the TableInfo from TikV and refill cache. -func (is *infoschemaV2) TableByName(ctx context.Context, schema, tbl model.CIStr) (t table.Table, err error) { +func (is *infoschemaV2) TableByName(ctx context.Context, schema, tbl pmodel.CIStr) (t table.Table, err error) { if IsSpecialDB(schema.L) { if raw, ok := is.specials.Load(schema.L); ok { tbNames := raw.(*schemaTables) @@ -765,7 +766,7 @@ func (is *infoschemaV2) TableByName(ctx context.Context, schema, tbl model.CIStr } // TableInfoByName implements InfoSchema.TableInfoByName -func (is *infoschemaV2) TableInfoByName(schema, table model.CIStr) (*model.TableInfo, error) { +func (is *infoschemaV2) TableInfoByName(schema, table pmodel.CIStr) (*model.TableInfo, error) { tbl, err := is.TableByName(context.Background(), schema, table) return getTableInfo(tbl), err } @@ -777,7 +778,7 @@ func (is *infoschemaV2) TableInfoByID(id int64) (*model.TableInfo, bool) { } // SchemaTableInfos implements MetaOnlyInfoSchema. -func (is *infoschemaV2) SchemaTableInfos(ctx context.Context, schema model.CIStr) ([]*model.TableInfo, error) { +func (is *infoschemaV2) SchemaTableInfos(ctx context.Context, schema pmodel.CIStr) ([]*model.TableInfo, error) { if IsSpecialDB(schema.L) { raw, ok := is.Data.specials.Load(schema.L) if ok { @@ -823,7 +824,7 @@ retry: } // SchemaSimpleTableInfos implements MetaOnlyInfoSchema. -func (is *infoschemaV2) SchemaSimpleTableInfos(ctx context.Context, schema model.CIStr) ([]*model.TableNameInfo, error) { +func (is *infoschemaV2) SchemaSimpleTableInfos(ctx context.Context, schema pmodel.CIStr) ([]*model.TableNameInfo, error) { if IsSpecialDB(schema.L) { raw, ok := is.Data.specials.Load(schema.L) if ok { @@ -880,7 +881,7 @@ func (is *infoschemaV2) FindTableInfoByPartitionID( return getTableInfo(tbl), db, partDef } -func (is *infoschemaV2) SchemaByName(schema model.CIStr) (val *model.DBInfo, ok bool) { +func (is *infoschemaV2) SchemaByName(schema pmodel.CIStr) (val *model.DBInfo, ok bool) { if IsSpecialDB(schema.L) { raw, ok := is.Data.specials.Load(schema.L) if !ok { @@ -945,15 +946,15 @@ func (is *infoschemaV2) AllSchemas() (schemas []*model.DBInfo) { return } -func (is *infoschemaV2) AllSchemaNames() []model.CIStr { - rs := make([]model.CIStr, 0, is.Data.schemaMap.Len()) +func (is *infoschemaV2) AllSchemaNames() []pmodel.CIStr { + rs := make([]pmodel.CIStr, 0, is.Data.schemaMap.Len()) is.allSchemas(func(di *model.DBInfo) { rs = append(rs, di.Name) }) return rs } -func (is *infoschemaV2) SchemaExists(schema model.CIStr) bool { +func (is *infoschemaV2) SchemaExists(schema pmodel.CIStr) bool { _, ok := is.SchemaByName(schema) return ok } @@ -1007,7 +1008,7 @@ func (is *infoschemaV2) FindTableByPartitionID(partitionID int64) (table.Table, return tbl, dbInfo, def } -func (is *infoschemaV2) TableExists(schema, table model.CIStr) bool { +func (is *infoschemaV2) TableExists(schema, table pmodel.CIStr) bool { _, err := is.TableByName(context.Background(), schema, table) return err == nil } @@ -1029,7 +1030,7 @@ func (is *infoschemaV2) SchemaByID(id int64) (*model.DBInfo, bool) { return st.dbInfo, true } var ok bool - var name model.CIStr + var name pmodel.CIStr is.Data.schemaID2Name.Descend(schemaIDName{ id: id, schemaVersion: math.MaxInt64, diff --git a/pkg/infoschema/infoschema_v2_test.go b/pkg/infoschema/infoschema_v2_test.go index 50c5d71525ba7..8ba6faeff1b70 100644 --- a/pkg/infoschema/infoschema_v2_test.go +++ b/pkg/infoschema/infoschema_v2_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/stretchr/testify/require" ) @@ -35,8 +36,8 @@ func TestV2Basic(t *testing.T) { }() is := NewInfoSchemaV2(r, nil, NewData()) - schemaName := model.NewCIStr("testDB") - tableName := model.NewCIStr("test") + schemaName := pmodel.NewCIStr("testDB") + tableName := pmodel.NewCIStr("test") dbInfo := internal.MockDBInfo(t, r.Store(), schemaName.O) is.Data.addDB(1, dbInfo) @@ -70,7 +71,7 @@ func TestV2Basic(t *testing.T) { require.NoError(t, err) require.Same(t, gotTblInfo, getTableInfo.Meta()) - gotTblInfo, err = is.TableInfoByName(schemaName, model.NewCIStr("notexist")) + gotTblInfo, err = is.TableInfoByName(schemaName, pmodel.NewCIStr("notexist")) require.Error(t, err) require.Nil(t, gotTblInfo) @@ -111,11 +112,11 @@ func TestV2Basic(t *testing.T) { require.Equal(t, 1, len(tblInfos)) require.Equal(t, tables[0], tblInfos[0]) - tables, err = is.SchemaTableInfos(context.Background(), model.NewCIStr("notexist")) + tables, err = is.SchemaTableInfos(context.Background(), pmodel.NewCIStr("notexist")) require.NoError(t, err) require.Equal(t, 0, len(tables)) - tblInfos, err = is.SchemaTableInfos(context.Background(), model.NewCIStr("notexist")) + tblInfos, err = is.SchemaTableInfos(context.Background(), pmodel.NewCIStr("notexist")) require.NoError(t, err) require.Equal(t, 0, len(tblInfos)) @@ -250,8 +251,8 @@ func TestBundles(t *testing.T) { r.Store().Close() }() - schemaName := model.NewCIStr("testDB") - tableName := model.NewCIStr("test") + schemaName := pmodel.NewCIStr("testDB") + tableName := pmodel.NewCIStr("test") builder := NewBuilder(r, nil, NewData(), variable.SchemaCacheSize.Load() > 0) err := builder.InitWithDBInfos(nil, nil, nil, 1) require.NoError(t, err) @@ -371,8 +372,8 @@ func TestReferredFKInfo(t *testing.T) { r.Store().Close() }() - schemaName := model.NewCIStr("testDB") - tableName := model.NewCIStr("testTable") + schemaName := pmodel.NewCIStr("testDB") + tableName := pmodel.NewCIStr("testTable") builder := NewBuilder(r, nil, NewData(), variable.SchemaCacheSize.Load() > 0) err := builder.InitWithDBInfos(nil, nil, nil, 1) require.NoError(t, err) @@ -392,9 +393,9 @@ func TestReferredFKInfo(t *testing.T) { tblInfo := internal.MockTableInfo(t, r.Store(), tableName.O) tblInfo.ForeignKeys = []*model.FKInfo{{ ID: 1, - Name: model.NewCIStr("fk_1"), - RefSchema: model.NewCIStr("t1"), - RefTable: model.NewCIStr("parent"), + Name: pmodel.NewCIStr("fk_1"), + RefSchema: pmodel.NewCIStr("t1"), + RefTable: pmodel.NewCIStr("parent"), Version: 1, }} internal.AddTable(t, r.Store(), dbInfo, tblInfo) @@ -411,9 +412,9 @@ func TestReferredFKInfo(t *testing.T) { // check ReferredFKInfo after add foreign key tblInfo.ForeignKeys = append(tblInfo.ForeignKeys, &model.FKInfo{ ID: 2, - Name: model.NewCIStr("fk_2"), - RefSchema: model.NewCIStr("t1"), - RefTable: model.NewCIStr("parent"), + Name: pmodel.NewCIStr("fk_2"), + RefSchema: pmodel.NewCIStr("t1"), + RefTable: pmodel.NewCIStr("parent"), Version: 1, }) internal.UpdateTable(t, r.Store(), dbInfo, tblInfo) @@ -477,8 +478,8 @@ func TestSpecialAttributeCorrectnessInSchemaChange(t *testing.T) { r.Store().Close() }() - schemaName := model.NewCIStr("testDB") - tableName := model.NewCIStr("testTable") + schemaName := pmodel.NewCIStr("testDB") + tableName := pmodel.NewCIStr("testTable") builder := NewBuilder(r, nil, NewData(), variable.SchemaCacheSize.Load() > 0) err := builder.InitWithDBInfos(nil, nil, nil, 1) require.NoError(t, err) @@ -512,12 +513,12 @@ func TestSpecialAttributeCorrectnessInSchemaChange(t *testing.T) { // tests partition info correctness in schema change tblInfo.Partition = &model.PartitionInfo{ Expr: "aa+1", - Columns: []model.CIStr{ - model.NewCIStr("aa"), + Columns: []pmodel.CIStr{ + pmodel.NewCIStr("aa"), }, Definitions: []model.PartitionDefinition{ - {ID: 1, Name: model.NewCIStr("p1")}, - {ID: 2, Name: model.NewCIStr("p2")}, + {ID: 1, Name: pmodel.NewCIStr("p1")}, + {ID: 2, Name: pmodel.NewCIStr("p2")}, }, Enable: true, DDLState: model.StatePublic, @@ -533,7 +534,7 @@ func TestSpecialAttributeCorrectnessInSchemaChange(t *testing.T) { // test placement policy correctness in schema change tblInfo.PlacementPolicyRef = &model.PolicyRefInfo{ ID: 1, - Name: model.NewCIStr("p3"), + Name: pmodel.NewCIStr("p3"), } tblInfo1 = updateTableSpecialAttribute(t, dbInfo, tblInfo, builder, r, model.ActionAlterTablePlacement, 5, PlacementPolicyAttribute, true) require.Equal(t, tblInfo.PlacementPolicyRef, tblInfo1.PlacementPolicyRef) @@ -553,7 +554,7 @@ func TestSpecialAttributeCorrectnessInSchemaChange(t *testing.T) { // test table lock correctness in schema change tblInfo.Lock = &model.TableLockInfo{ - Tp: model.TableLockRead, + Tp: pmodel.TableLockRead, State: model.TableLockStatePublic, TS: 1, } @@ -565,11 +566,11 @@ func TestSpecialAttributeCorrectnessInSchemaChange(t *testing.T) { // test foreign key correctness in schema change tblInfo.ForeignKeys = []*model.FKInfo{{ ID: 1, - Name: model.NewCIStr("fk_1"), - RefSchema: model.NewCIStr("t"), - RefTable: model.NewCIStr("t"), - RefCols: []model.CIStr{model.NewCIStr("a")}, - Cols: []model.CIStr{model.NewCIStr("t_a")}, + Name: pmodel.NewCIStr("fk_1"), + RefSchema: pmodel.NewCIStr("t"), + RefTable: pmodel.NewCIStr("t"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("a")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("t_a")}, State: model.StateWriteOnly, }} tblInfo1 = updateTableSpecialAttribute(t, dbInfo, tblInfo, builder, r, model.ActionAddForeignKey, 11, ForeignKeysAttribute, true) @@ -584,8 +585,8 @@ func TestDataStructFieldsCorrectnessInSchemaChange(t *testing.T) { r.Store().Close() }() - schemaName := model.NewCIStr("testDB") - tableName := model.NewCIStr("testTable") + schemaName := pmodel.NewCIStr("testDB") + tableName := pmodel.NewCIStr("testTable") builder := NewBuilder(r, nil, NewData(), variable.SchemaCacheSize.Load() > 0) err := builder.InitWithDBInfos(nil, nil, nil, 1) require.NoError(t, err) @@ -628,8 +629,8 @@ func TestDataStructFieldsCorrectnessInSchemaChange(t *testing.T) { require.Equal(t, v2.Data.pid2tid.Len(), 0) tblInfo.Partition = &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ - {ID: 1, Name: model.NewCIStr("p1")}, - {ID: 2, Name: model.NewCIStr("p2")}, + {ID: 1, Name: pmodel.NewCIStr("p1")}, + {ID: 2, Name: pmodel.NewCIStr("p2")}, }, Enable: true, DDLState: model.StatePublic, diff --git a/pkg/infoschema/interface.go b/pkg/infoschema/interface.go index 71796c0329923..e6922e5e1ac53 100644 --- a/pkg/infoschema/interface.go +++ b/pkg/infoschema/interface.go @@ -18,7 +18,8 @@ import ( stdctx "context" "github.com/pingcap/tidb/pkg/infoschema/context" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table" ) @@ -27,7 +28,7 @@ import ( // InfoSchema is read-only, and the returned value is a copy. type InfoSchema interface { context.MetaOnlyInfoSchema - TableByName(ctx stdctx.Context, schema, table model.CIStr) (table.Table, error) + TableByName(ctx stdctx.Context, schema, table pmodel.CIStr) (table.Table, error) TableByID(ctx stdctx.Context, id int64) (table.Table, bool) FindTableByPartitionID(partitionID int64) (table.Table, *model.DBInfo, *model.PartitionDefinition) ListTablesWithSpecialAttribute(filter specialAttributeFilter) []tableInfoResult diff --git a/pkg/infoschema/internal/BUILD.bazel b/pkg/infoschema/internal/BUILD.bazel index b1bc2989c0704..978699d88c52d 100644 --- a/pkg/infoschema/internal/BUILD.bazel +++ b/pkg/infoschema/internal/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/store/mockstore", diff --git a/pkg/infoschema/internal/testkit.go b/pkg/infoschema/internal/testkit.go index c4c60a064546e..7c6ec968d45b5 100644 --- a/pkg/infoschema/internal/testkit.go +++ b/pkg/infoschema/internal/testkit.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/table" @@ -152,7 +153,7 @@ func MockDBInfo(t testing.TB, store kv.Storage, DBName string) *model.DBInfo { require.NoError(t, err) dbInfo := &model.DBInfo{ ID: id, - Name: model.NewCIStr(DBName), + Name: pmodel.NewCIStr(DBName), State: model.StatePublic, } dbInfo.Deprecated.Tables = []*model.TableInfo{} @@ -165,7 +166,7 @@ func MockTableInfo(t testing.TB, store kv.Storage, tblName string) *model.TableI require.NoError(t, err) colInfo := &model.ColumnInfo{ ID: colID, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, @@ -176,7 +177,7 @@ func MockTableInfo(t testing.TB, store kv.Storage, tblName string) *model.TableI return &model.TableInfo{ ID: tblID, - Name: model.NewCIStr(tblName), + Name: pmodel.NewCIStr(tblName), Columns: []*model.ColumnInfo{colInfo}, State: model.StatePublic, } @@ -195,7 +196,7 @@ func MockResourceGroupInfo(t *testing.T, store kv.Storage, groupName string) *mo require.NoError(t, err) return &model.ResourceGroupInfo{ ID: id, - Name: model.NewCIStr(groupName), + Name: pmodel.NewCIStr(groupName), } } @@ -205,7 +206,7 @@ func MockPolicyInfo(t *testing.T, store kv.Storage, policyName string) *model.Po require.NoError(t, err) return &model.PolicyInfo{ ID: id, - Name: model.NewCIStr(policyName), + Name: pmodel.NewCIStr(policyName), } } @@ -215,7 +216,7 @@ func MockPolicyRefInfo(t *testing.T, store kv.Storage, policyName string) *model require.NoError(t, err) return &model.PolicyRefInfo{ ID: id, - Name: model.NewCIStr(policyName), + Name: pmodel.NewCIStr(policyName), } } diff --git a/pkg/infoschema/metrics_schema.go b/pkg/infoschema/metrics_schema.go index 3efd2724f4b3e..b6d5a304eb7f8 100644 --- a/pkg/infoschema/metrics_schema.go +++ b/pkg/infoschema/metrics_schema.go @@ -24,7 +24,8 @@ import ( "github.com/ngaut/pools" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util" @@ -55,7 +56,7 @@ func init() { } dbInfo := &model.DBInfo{ ID: dbID, - Name: model.NewCIStr(util.MetricSchemaName.O), + Name: pmodel.NewCIStr(util.MetricSchemaName.O), Charset: mysql.DefaultCharset, Collate: mysql.DefaultCollationName, } diff --git a/pkg/infoschema/perfschema/BUILD.bazel b/pkg/infoschema/perfschema/BUILD.bazel index 0c3b81094b9e8..e002f5168387d 100644 --- a/pkg/infoschema/perfschema/BUILD.bazel +++ b/pkg/infoschema/perfschema/BUILD.bazel @@ -15,9 +15,9 @@ go_library( "//pkg/infoschema", "//pkg/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/sessionctx", diff --git a/pkg/infoschema/perfschema/init.go b/pkg/infoschema/perfschema/init.go index f10651e84ef47..d79593b129f36 100644 --- a/pkg/infoschema/perfschema/init.go +++ b/pkg/infoschema/perfschema/init.go @@ -22,9 +22,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/util" ) diff --git a/pkg/infoschema/perfschema/tables.go b/pkg/infoschema/perfschema/tables.go index d242bd7560a71..f43ca58c96bd5 100644 --- a/pkg/infoschema/perfschema/tables.go +++ b/pkg/infoschema/perfschema/tables.go @@ -30,7 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/infoschema/tables.go b/pkg/infoschema/tables.go index 8d7ce2cfef688..acff3937252aa 100644 --- a/pkg/infoschema/tables.go +++ b/pkg/infoschema/tables.go @@ -40,9 +40,10 @@ import ( "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/privilege" @@ -375,7 +376,7 @@ func buildColumnInfo(col columnInfo) *model.ColumnInfo { fieldType.SetFlag(col.flag) fieldType.SetElems(col.enumElems) return &model.ColumnInfo{ - Name: model.NewCIStr(col.name), + Name: pmodel.NewCIStr(col.name), FieldType: fieldType, State: model.StatePublic, DefaultValue: col.deflt, @@ -387,7 +388,7 @@ func buildTableMeta(tableName string, cs []columnInfo) *model.TableInfo { cols := make([]*model.ColumnInfo, 0, len(cs)) primaryIndices := make([]*model.IndexInfo, 0, 1) tblInfo := &model.TableInfo{ - Name: model.NewCIStr(tableName), + Name: pmodel.NewCIStr(tableName), State: model.StatePublic, Charset: mysql.DefaultCharset, Collate: mysql.DefaultCollationName, @@ -402,12 +403,12 @@ func buildTableMeta(tableName string, cs []columnInfo) *model.TableInfo { tblInfo.IsCommonHandle = true tblInfo.CommonHandleVersion = 1 index := &model.IndexInfo{ - Name: model.NewCIStr("primary"), + Name: pmodel.NewCIStr("primary"), State: model.StatePublic, Primary: true, Unique: true, Columns: []*model.IndexColumn{ - {Name: model.NewCIStr(c.name), Offset: offset, Length: types.UnspecifiedLength}}, + {Name: pmodel.NewCIStr(c.name), Offset: offset, Length: types.UnspecifiedLength}}, } primaryIndices = append(primaryIndices, index) tblInfo.Indices = primaryIndices @@ -1714,7 +1715,7 @@ var tableTiDBIndexUsage = []columnInfo{ // // The returned nil indicates that sharding information is not suitable for the table(for example, when the table is a View). // This function is exported for unit test. -func GetShardingInfo(dbInfo model.CIStr, tableInfo *model.TableInfo) any { +func GetShardingInfo(dbInfo pmodel.CIStr, tableInfo *model.TableInfo) any { if tableInfo == nil || tableInfo.IsView() || util.IsMemOrSysDB(dbInfo.L) { return nil } diff --git a/pkg/infoschema/test/clustertablestest/BUILD.bazel b/pkg/infoschema/test/clustertablestest/BUILD.bazel index 05d801917705c..d61f60d5edf02 100644 --- a/pkg/infoschema/test/clustertablestest/BUILD.bazel +++ b/pkg/infoschema/test/clustertablestest/BUILD.bazel @@ -20,6 +20,7 @@ go_test( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/auth", "//pkg/parser/model", diff --git a/pkg/infoschema/test/clustertablestest/tables_test.go b/pkg/infoschema/test/clustertablestest/tables_test.go index 6c6bb35f589ae..c135d3bb1217b 100644 --- a/pkg/infoschema/test/clustertablestest/tables_test.go +++ b/pkg/infoschema/test/clustertablestest/tables_test.go @@ -33,9 +33,10 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" @@ -340,7 +341,7 @@ func TestTableRowIDShardingInfo(t *testing.T) { testFunc := func(dbName string, expectInfo any) { tableInfo := model.TableInfo{} - info := infoschema.GetShardingInfo(model.NewCIStr(dbName), &tableInfo) + info := infoschema.GetShardingInfo(pmodel.NewCIStr(dbName), &tableInfo) require.Equal(t, expectInfo, info) } @@ -607,11 +608,11 @@ func TestReloadDropDatabase(t *testing.T) { tk.MustExec("create table t2 (a int)") tk.MustExec("create table t3 (a int)") is := domain.GetDomain(tk.Session()).InfoSchema() - t2, err := is.TableByName(context.Background(), model.NewCIStr("test_dbs"), model.NewCIStr("t2")) + t2, err := is.TableByName(context.Background(), pmodel.NewCIStr("test_dbs"), pmodel.NewCIStr("t2")) require.NoError(t, err) tk.MustExec("drop database test_dbs") is = domain.GetDomain(tk.Session()).InfoSchema() - _, err = is.TableByName(context.Background(), model.NewCIStr("test_dbs"), model.NewCIStr("t2")) + _, err = is.TableByName(context.Background(), pmodel.NewCIStr("test_dbs"), pmodel.NewCIStr("t2")) require.True(t, terror.ErrorEqual(infoschema.ErrTableNotExists, err)) _, ok := is.TableByID(context.Background(), t2.Meta().ID) require.False(t, ok) @@ -629,11 +630,11 @@ func TestSystemSchemaID(t *testing.T) { func checkSystemSchemaTableID(t *testing.T, dom *domain.Domain, dbName string, dbID, start, end int64, uniqueIDMap map[int64]string) { is := dom.InfoSchema() require.NotNil(t, is) - db, ok := is.SchemaByName(model.NewCIStr(dbName)) + db, ok := is.SchemaByName(pmodel.NewCIStr(dbName)) require.True(t, ok) require.Equal(t, dbID, db.ID) // Test for information_schema table id. - tables, err := is.SchemaTableInfos(context.Background(), model.NewCIStr(dbName)) + tables, err := is.SchemaTableInfos(context.Background(), pmodel.NewCIStr(dbName)) require.NoError(t, err) require.Greater(t, len(tables), 0) for _, tbl := range tables { @@ -666,7 +667,7 @@ func TestSelectHiddenColumn(t *testing.T) { tk.MustExec("USE test_hidden;") tk.MustExec("CREATE TABLE hidden (a int , b int, c int);") tk.MustQuery("select count(*) from INFORMATION_SCHEMA.COLUMNS where table_name = 'hidden'").Check(testkit.Rows("3")) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test_hidden"), model.NewCIStr("hidden")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test_hidden"), pmodel.NewCIStr("hidden")) require.NoError(t, err) tbInfo := tb.Meta() colInfo := tbInfo.Columns diff --git a/pkg/kv/BUILD.bazel b/pkg/kv/BUILD.bazel index d82e1da2996fb..c7955b8c5358a 100644 --- a/pkg/kv/BUILD.bazel +++ b/pkg/kv/BUILD.bazel @@ -24,7 +24,7 @@ go_library( deps = [ "//pkg/config", "//pkg/errno", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/resourcegroup", @@ -80,7 +80,7 @@ go_test( flaky = True, shard_count = 23, deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/sessionctx/stmtctx", diff --git a/pkg/kv/interface_mock_test.go b/pkg/kv/interface_mock_test.go index 54345af455c29..d0241b9fd732d 100644 --- a/pkg/kv/interface_mock_test.go +++ b/pkg/kv/interface_mock_test.go @@ -19,7 +19,7 @@ import ( deadlockpb "github.com/pingcap/kvproto/pkg/deadlock" "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" ) diff --git a/pkg/kv/kv.go b/pkg/kv/kv.go index d2df9824862c7..184d8b1eac8a0 100644 --- a/pkg/kv/kv.go +++ b/pkg/kv/kv.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/resourcegroup" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/tiflash" diff --git a/pkg/lightning/backend/BUILD.bazel b/pkg/lightning/backend/BUILD.bazel index bbf614f84da06..5570b50981824 100644 --- a/pkg/lightning/backend/BUILD.bazel +++ b/pkg/lightning/backend/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/lightning/log", "//pkg/lightning/metric", "//pkg/lightning/mydump", - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/lightning/backend/backend.go b/pkg/lightning/backend/backend.go index 4ed8498a5b508..a9bbd6f33c54f 100644 --- a/pkg/lightning/backend/backend.go +++ b/pkg/lightning/backend/backend.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/metric" "github.com/pingcap/tidb/pkg/lightning/mydump" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "go.uber.org/zap" ) diff --git a/pkg/lightning/backend/kv/BUILD.bazel b/pkg/lightning/backend/kv/BUILD.bazel index 19103035e7a3f..b407e962252c9 100644 --- a/pkg/lightning/backend/kv/BUILD.bazel +++ b/pkg/lightning/backend/kv/BUILD.bazel @@ -26,7 +26,7 @@ go_library( "//pkg/lightning/metric", "//pkg/lightning/verification", "//pkg/meta/autoid", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/planner/context", "//pkg/sessionctx", @@ -71,6 +71,7 @@ go_test( "//pkg/lightning/log", "//pkg/lightning/verification", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/lightning/backend/kv/base.go b/pkg/lightning/backend/kv/base.go index 0c67492c323d0..b2f6148066acc 100644 --- a/pkg/lightning/backend/kv/base.go +++ b/pkg/lightning/backend/kv/base.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/lightning/backend/kv/base_test.go b/pkg/lightning/backend/kv/base_test.go index b373085dfdec1..ee3bae9d0eb57 100644 --- a/pkg/lightning/backend/kv/base_test.go +++ b/pkg/lightning/backend/kv/base_test.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/backend/encode" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" @@ -36,7 +37,7 @@ func TestLogKVConvertFailed(t *testing.T) { err := log.InitLogger(logCfg, "info") require.NoError(t, err) - modelName := model.NewCIStr("c1") + modelName := pmodel.NewCIStr("c1") modelState := model.StatePublic modelFieldType := *types.NewFieldType(mysql.TypeTiny) c1 := &model.ColumnInfo{ID: 1, Name: modelName, State: modelState, Offset: 0, FieldType: modelFieldType} diff --git a/pkg/lightning/backend/kv/kv2sql.go b/pkg/lightning/backend/kv/kv2sql.go index 14b8538944478..502eea5cc2748 100644 --- a/pkg/lightning/backend/kv/kv2sql.go +++ b/pkg/lightning/backend/kv/kv2sql.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lightning/backend/encode" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/lightning/backend/kv/kv2sql_test.go b/pkg/lightning/backend/kv/kv2sql_test.go index 27939dc65f739..9c5ca613d4c15 100644 --- a/pkg/lightning/backend/kv/kv2sql_test.go +++ b/pkg/lightning/backend/kv/kv2sql_test.go @@ -21,9 +21,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/backend/encode" "github.com/pingcap/tidb/pkg/lightning/backend/kv" "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/pkg/lightning/backend/kv/session.go b/pkg/lightning/backend/kv/session.go index 06da45ac79174..a4bce1a34f757 100644 --- a/pkg/lightning/backend/kv/session.go +++ b/pkg/lightning/backend/kv/session.go @@ -35,7 +35,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/manual" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" planctx "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/lightning/backend/kv/sql2kv.go b/pkg/lightning/backend/kv/sql2kv.go index 1321266e1174e..e303d682398b7 100644 --- a/pkg/lightning/backend/kv/sql2kv.go +++ b/pkg/lightning/backend/kv/sql2kv.go @@ -30,7 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/metric" "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" //nolint: goimports "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/lightning/backend/kv/sql2kv_test.go b/pkg/lightning/backend/kv/sql2kv_test.go index 737eb86befade..dcece5617704b 100644 --- a/pkg/lightning/backend/kv/sql2kv_test.go +++ b/pkg/lightning/backend/kv/sql2kv_test.go @@ -28,9 +28,10 @@ import ( "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/verification" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" _ "github.com/pingcap/tidb/pkg/planner/core" // to setup expression.EvalAstExpr. Otherwise we cannot parse the default value "github.com/pingcap/tidb/pkg/table" @@ -76,7 +77,7 @@ func (mockTable) AddRecord(ctx table.MutateContext, txn kv.Transaction, r []type } func TestEncode(t *testing.T) { - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} + c1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} cols := []*model.ColumnInfo{c1} tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} tbl, err := tables.TableFromMeta(lkv.NewPanickingAllocators(tblInfo.SepAutoInc(), 0), tblInfo) @@ -159,7 +160,7 @@ func TestEncode(t *testing.T) { } func TestDecode(t *testing.T) { - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} + c1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} cols := []*model.ColumnInfo{c1} tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} tbl, err := tables.TableFromMeta(lkv.NewPanickingAllocators(tblInfo.SepAutoInc(), 0), tblInfo) @@ -200,7 +201,7 @@ func TestDecodeIndex(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 2, - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), Columns: []*model.IndexColumn{ {Offset: 0}, {Offset: 1}, @@ -210,8 +211,8 @@ func TestDecodeIndex(t *testing.T) { }, }, Columns: []*model.ColumnInfo{ - {ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeInt24)}, - {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, + {ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeInt24)}, + {ID: 2, Name: pmodel.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, }, State: model.StatePublic, PKIsHandle: false, @@ -258,7 +259,7 @@ func TestDecodeIndex(t *testing.T) { func TestEncodeRowFormatV2(t *testing.T) { // Test encoding in row format v2, as described in . - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} + c1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} cols := []*model.ColumnInfo{c1} tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} tbl, err := tables.TableFromMeta(lkv.NewPanickingAllocators(tblInfo.SepAutoInc(), 0), tblInfo) @@ -303,7 +304,7 @@ func TestEncodeTimestamp(t *testing.T) { ty.AddFlag(mysql.NotNullFlag) c1 := &model.ColumnInfo{ ID: 1, - Name: model.NewCIStr("c1"), + Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: ty, diff --git a/pkg/lightning/backend/local/BUILD.bazel b/pkg/lightning/backend/local/BUILD.bazel index 71c064fc2f845..ce561f9ef22c1 100644 --- a/pkg/lightning/backend/local/BUILD.bazel +++ b/pkg/lightning/backend/local/BUILD.bazel @@ -47,8 +47,8 @@ go_library( "//pkg/lightning/mydump", "//pkg/lightning/tikv", "//pkg/lightning/verification", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/sessionctx/variable", @@ -142,6 +142,7 @@ go_test( "//pkg/lightning/config", "//pkg/lightning/log", "//pkg/lightning/mydump", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/lightning/backend/local/checksum_test.go b/pkg/lightning/backend/local/checksum_test.go index 5bd6ba5260389..fb24a7e630f56 100644 --- a/pkg/lightning/backend/local/checksum_test.go +++ b/pkg/lightning/backend/local/checksum_test.go @@ -29,7 +29,8 @@ import ( tmysql "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" . "github.com/pingcap/tidb/pkg/lightning/checkpoints" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" pmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" @@ -171,11 +172,11 @@ func TestDoChecksumWithTikv(t *testing.T) { tableInfo := &model.TableInfo{ ID: 999, - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), Columns: []*model.ColumnInfo{ { ID: 1, - Name: model.NewCIStr("c1"), + Name: pmodel.NewCIStr("c1"), FieldType: *fieldType, }, }, diff --git a/pkg/lightning/backend/local/duplicate.go b/pkg/lightning/backend/local/duplicate.go index 1de2af5597547..297c2b7163dd3 100644 --- a/pkg/lightning/backend/local/duplicate.go +++ b/pkg/lightning/backend/local/duplicate.go @@ -42,7 +42,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/errormanager" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/lightning/backend/local/duplicate_test.go b/pkg/lightning/backend/local/duplicate_test.go index fc17ec9293308..87511239cbb31 100644 --- a/pkg/lightning/backend/local/duplicate_test.go +++ b/pkg/lightning/backend/local/duplicate_test.go @@ -25,9 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/backend/local" "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/pkg/lightning/backend/local/engine.go b/pkg/lightning/backend/local/engine.go index 9374b2ade74ff..371465ef55c19 100644 --- a/pkg/lightning/backend/local/engine.go +++ b/pkg/lightning/backend/local/engine.go @@ -45,7 +45,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/checkpoints" "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/hack" "github.com/tikv/client-go/v2/tikv" "go.uber.org/atomic" diff --git a/pkg/lightning/backend/local/local.go b/pkg/lightning/backend/local/local.go index c38a01936e504..4d8e3d8fbe2ea 100644 --- a/pkg/lightning/backend/local/local.go +++ b/pkg/lightning/backend/local/local.go @@ -50,8 +50,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/metric" "github.com/pingcap/tidb/pkg/lightning/tikv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/codec" diff --git a/pkg/lightning/backend/tidb/BUILD.bazel b/pkg/lightning/backend/tidb/BUILD.bazel index d65342fd2a725..3c84f18714a0a 100644 --- a/pkg/lightning/backend/tidb/BUILD.bazel +++ b/pkg/lightning/backend/tidb/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/lightning/errormanager", "//pkg/lightning/log", "//pkg/lightning/verification", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/table", @@ -48,6 +49,7 @@ go_test( "//pkg/lightning/errormanager", "//pkg/lightning/log", "//pkg/lightning/verification", + "//pkg/meta/model", "//pkg/parser/charset", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/lightning/backend/tidb/tidb.go b/pkg/lightning/backend/tidb/tidb.go index 2c70cc1df82b9..a19e15b9a1a5e 100644 --- a/pkg/lightning/backend/tidb/tidb.go +++ b/pkg/lightning/backend/tidb/tidb.go @@ -37,7 +37,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/errormanager" "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/verification" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" @@ -152,7 +153,7 @@ func (b *targetInfoGetter) FetchRemoteDBModels(ctx context.Context) ([]*model.DB return e } dbInfo := &model.DBInfo{ - Name: model.NewCIStr(dbName), + Name: pmodel.NewCIStr(dbName), } results = append(results, dbInfo) } @@ -204,7 +205,7 @@ func (b *targetInfoGetter) FetchRemoteTableModels(ctx context.Context, schemaNam } if tableName != curTableName { curTable = &model.TableInfo{ - Name: model.NewCIStr(tableName), + Name: pmodel.NewCIStr(tableName), State: model.StatePublic, PKIsHandle: true, } @@ -225,7 +226,7 @@ func (b *targetInfoGetter) FetchRemoteTableModels(ctx context.Context, schemaNam ft := types.FieldType{} ft.SetFlag(flag) curTable.Columns = append(curTable.Columns, &model.ColumnInfo{ - Name: model.NewCIStr(columnName), + Name: pmodel.NewCIStr(columnName), Offset: curColOffset, State: model.StatePublic, FieldType: ft, diff --git a/pkg/lightning/backend/tidb/tidb_test.go b/pkg/lightning/backend/tidb/tidb_test.go index 91a3ecacb5fef..d32d3dfe45213 100644 --- a/pkg/lightning/backend/tidb/tidb_test.go +++ b/pkg/lightning/backend/tidb/tidb_test.go @@ -34,8 +34,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/errormanager" "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/verification" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" @@ -63,7 +64,7 @@ func createMysqlSuite(t *testing.T) *mysqlSuite { } cols := make([]*model.ColumnInfo, 0, len(tys)) for i, ty := range tys { - col := &model.ColumnInfo{ID: int64(i + 1), Name: model.NewCIStr(fmt.Sprintf("c%d", i)), State: model.StatePublic, Offset: i, FieldType: *types.NewFieldType(ty)} + col := &model.ColumnInfo{ID: int64(i + 1), Name: pmodel.NewCIStr(fmt.Sprintf("c%d", i)), State: model.StatePublic, Offset: i, FieldType: *types.NewFieldType(ty)} cols = append(cols, col) } tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} @@ -287,10 +288,10 @@ func testStrictMode(t *testing.T) { defer s.TearDownTest(t) ft := *types.NewFieldType(mysql.TypeVarchar) ft.SetCharset(charset.CharsetUTF8MB4) - col0 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("s0"), State: model.StatePublic, Offset: 0, FieldType: ft} + col0 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("s0"), State: model.StatePublic, Offset: 0, FieldType: ft} ft = *types.NewFieldType(mysql.TypeString) ft.SetCharset(charset.CharsetASCII) - col1 := &model.ColumnInfo{ID: 2, Name: model.NewCIStr("s1"), State: model.StatePublic, Offset: 1, FieldType: ft} + col1 := &model.ColumnInfo{ID: 2, Name: pmodel.NewCIStr("s1"), State: model.StatePublic, Offset: 1, FieldType: ft} tblInfo := &model.TableInfo{ID: 1, Columns: []*model.ColumnInfo{col0, col1}, PKIsHandle: false, State: model.StatePublic} tbl, err := tables.TableFromMeta(kv.NewPanickingAllocators(tblInfo.SepAutoInc(), 0), tblInfo) require.NoError(t, err) @@ -351,12 +352,12 @@ func TestFetchRemoteTableModels_3_x(t *testing.T) { ft.SetFlag(mysql.AutoIncrementFlag) require.Equal(t, []*model.TableInfo{ { - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), State: model.StatePublic, PKIsHandle: true, Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), Offset: 0, State: model.StatePublic, FieldType: ft, @@ -388,12 +389,12 @@ func TestFetchRemoteTableModels_4_0(t *testing.T) { ft.SetFlag(mysql.AutoIncrementFlag | mysql.UnsignedFlag) require.Equal(t, []*model.TableInfo{ { - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), State: model.StatePublic, PKIsHandle: true, Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), Offset: 0, State: model.StatePublic, FieldType: ft, @@ -425,12 +426,12 @@ func TestFetchRemoteTableModels_4_x_auto_increment(t *testing.T) { ft.SetFlag(mysql.AutoIncrementFlag) require.Equal(t, []*model.TableInfo{ { - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), State: model.StatePublic, PKIsHandle: true, Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), Offset: 0, State: model.StatePublic, FieldType: ft, @@ -462,13 +463,13 @@ func TestFetchRemoteTableModels_4_x_auto_random(t *testing.T) { ft.SetFlag(mysql.PriKeyFlag) require.Equal(t, []*model.TableInfo{ { - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), State: model.StatePublic, PKIsHandle: true, AutoRandomBits: 1, Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), Offset: 0, State: model.StatePublic, FieldType: ft, @@ -507,18 +508,18 @@ func TestFetchRemoteTableModelsDropTableHalfway(t *testing.T) { ft.SetFlag(mysql.AutoIncrementFlag) require.Equal(t, []*model.TableInfo{ { - Name: model.NewCIStr("tbl01"), + Name: pmodel.NewCIStr("tbl01"), State: model.StatePublic, PKIsHandle: true, Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), Offset: 0, State: model.StatePublic, FieldType: ft, }, { - Name: model.NewCIStr("val"), + Name: pmodel.NewCIStr("val"), Offset: 1, State: model.StatePublic, }, diff --git a/pkg/lightning/checkpoints/BUILD.bazel b/pkg/lightning/checkpoints/BUILD.bazel index d7c7882ee2042..72adef289c3a8 100644 --- a/pkg/lightning/checkpoints/BUILD.bazel +++ b/pkg/lightning/checkpoints/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//pkg/lightning/log", "//pkg/lightning/mydump", "//pkg/lightning/verification", - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_joho_sqltocsv//:sqltocsv", "@com_github_pingcap_errors//:errors", "@org_uber_go_zap//:zap", @@ -43,6 +43,7 @@ go_test( "//pkg/lightning/config", "//pkg/lightning/mydump", "//pkg/lightning/verification", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/testkit/testsetup", "@com_github_data_dog_go_sqlmock//:go-sqlmock", diff --git a/pkg/lightning/checkpoints/checkpoints.go b/pkg/lightning/checkpoints/checkpoints.go index 81497cc4dc5c0..1904ddbe9ef15 100644 --- a/pkg/lightning/checkpoints/checkpoints.go +++ b/pkg/lightning/checkpoints/checkpoints.go @@ -38,7 +38,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/log" "github.com/pingcap/tidb/pkg/lightning/mydump" verify "github.com/pingcap/tidb/pkg/lightning/verification" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "go.uber.org/zap" ) diff --git a/pkg/lightning/checkpoints/checkpoints_file_test.go b/pkg/lightning/checkpoints/checkpoints_file_test.go index 7213dc9aa3e09..e938e060cad29 100644 --- a/pkg/lightning/checkpoints/checkpoints_file_test.go +++ b/pkg/lightning/checkpoints/checkpoints_file_test.go @@ -25,7 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/mydump" "github.com/pingcap/tidb/pkg/lightning/verification" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -64,7 +65,7 @@ func newFileCheckpointsDB(t *testing.T, addIndexBySQL bool) *checkpoints.FileChe "t3": { Name: "t3", Desired: &model.TableInfo{ - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), }, }, }, @@ -218,7 +219,7 @@ func TestGet(t *testing.T) { }, }, TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), }, } diff --git a/pkg/lightning/checkpoints/checkpoints_sql_test.go b/pkg/lightning/checkpoints/checkpoints_sql_test.go index e3e7081e768b0..b4776ea8b9c05 100644 --- a/pkg/lightning/checkpoints/checkpoints_sql_test.go +++ b/pkg/lightning/checkpoints/checkpoints_sql_test.go @@ -28,7 +28,8 @@ import ( "github.com/pingcap/tidb/pkg/lightning/checkpoints" "github.com/pingcap/tidb/pkg/lightning/mydump" "github.com/pingcap/tidb/pkg/lightning/verification" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -101,7 +102,7 @@ func TestNormalOperations(t *testing.T) { Name: "t2", ID: 2, Desired: &model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }, }, }, @@ -190,15 +191,15 @@ func TestNormalOperationsWithAddIndexBySQL(t *testing.T) { // 2. initialize with checkpoint data. t1Info, err := json.Marshal(&model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }) require.NoError(t, err) t2Info, err := json.Marshal(&model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }) require.NoError(t, err) t3Info, err := json.Marshal(&model.TableInfo{ - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), }) require.NoError(t, err) @@ -232,14 +233,14 @@ func TestNormalOperationsWithAddIndexBySQL(t *testing.T) { Name: "t1", ID: 1, Desired: &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }, }, "t2": { Name: "t2", ID: 2, Desired: &model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }, }, }, @@ -251,7 +252,7 @@ func TestNormalOperationsWithAddIndexBySQL(t *testing.T) { Name: "t3", ID: 3, Desired: &model.TableInfo{ - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), }, }, }, @@ -423,7 +424,7 @@ func TestNormalOperationsWithAddIndexBySQL(t *testing.T) { AllocBase: 132861, TableID: int64(2), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }, Engines: map[int32]*checkpoints.EngineCheckpoint{ -1: {Status: checkpoints.CheckpointStatusLoaded}, diff --git a/pkg/lightning/checkpoints/tidb.go b/pkg/lightning/checkpoints/tidb.go index ad342c70c82b0..7818345c47c40 100644 --- a/pkg/lightning/checkpoints/tidb.go +++ b/pkg/lightning/checkpoints/tidb.go @@ -15,7 +15,7 @@ package checkpoints import ( - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // TidbDBInfo is the database info in TiDB. diff --git a/pkg/lightning/common/BUILD.bazel b/pkg/lightning/common/BUILD.bazel index 9d1a023b5a589..d6e94095e228c 100644 --- a/pkg/lightning/common/BUILD.bazel +++ b/pkg/lightning/common/BUILD.bazel @@ -29,7 +29,7 @@ go_library( "//pkg/errno", "//pkg/lightning/log", "//pkg/meta/autoid", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/sessionctx/variable", "//pkg/store/driver/error", @@ -121,9 +121,9 @@ go_test( "//pkg/lightning/log", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/store/driver/error", "//pkg/store/mockstore", "//pkg/testkit/testsetup", diff --git a/pkg/lightning/common/common.go b/pkg/lightning/common/common.go index bc5e1a9994d49..b735368e0f40d 100644 --- a/pkg/lightning/common/common.go +++ b/pkg/lightning/common/common.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) const ( diff --git a/pkg/lightning/common/common_test.go b/pkg/lightning/common/common_test.go index fdedbebfcabcc..dbbb429a3fe04 100644 --- a/pkg/lightning/common/common_test.go +++ b/pkg/lightning/common/common_test.go @@ -24,9 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" tmock "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" diff --git a/pkg/lightning/common/util.go b/pkg/lightning/common/util.go index ac3ba7d1efaa5..1dfcfc9f5170f 100644 --- a/pkg/lightning/common/util.go +++ b/pkg/lightning/common/util.go @@ -36,7 +36,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" tmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/pkg/lightning/errormanager/BUILD.bazel b/pkg/lightning/errormanager/BUILD.bazel index e8c1d13fc1841..e6d342044572c 100644 --- a/pkg/lightning/errormanager/BUILD.bazel +++ b/pkg/lightning/errormanager/BUILD.bazel @@ -47,6 +47,7 @@ go_test( "//pkg/lightning/backend/kv", "//pkg/lightning/config", "//pkg/lightning/log", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/lightning/errormanager/errormanager_test.go b/pkg/lightning/errormanager/errormanager_test.go index ed353b3629c55..dfc41f3eb290b 100644 --- a/pkg/lightning/errormanager/errormanager_test.go +++ b/pkg/lightning/errormanager/errormanager_test.go @@ -27,7 +27,8 @@ import ( tidbkv "github.com/pingcap/tidb/pkg/lightning/backend/kv" "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" @@ -162,7 +163,7 @@ func (c mockConn) QueryContext(_ context.Context, query string, args []driver.Na func TestReplaceConflictOneKey(t *testing.T) { column1 := &model.ColumnInfo{ ID: 1, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), Offset: 0, DefaultValue: 0, FieldType: *types.NewFieldType(mysql.TypeLong), @@ -173,7 +174,7 @@ func TestReplaceConflictOneKey(t *testing.T) { column2 := &model.ColumnInfo{ ID: 2, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Offset: 1, DefaultValue: 0, FieldType: *types.NewFieldType(mysql.TypeLong), @@ -183,7 +184,7 @@ func TestReplaceConflictOneKey(t *testing.T) { column3 := &model.ColumnInfo{ ID: 3, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Offset: 2, DefaultValue: 0, FieldType: *types.NewFieldType(mysql.TypeBlob), @@ -193,11 +194,11 @@ func TestReplaceConflictOneKey(t *testing.T) { index := &model.IndexInfo{ ID: 1, - Name: model.NewCIStr("key_b"), - Table: model.NewCIStr(""), + Name: pmodel.NewCIStr("key_b"), + Table: pmodel.NewCIStr(""), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Offset: 1, Length: -1, }}, @@ -208,7 +209,7 @@ func TestReplaceConflictOneKey(t *testing.T) { table := &model.TableInfo{ ID: 104, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), Charset: "utf8mb4", Collate: "utf8mb4_bin", Columns: []*model.ColumnInfo{column1, column2, column3}, @@ -350,7 +351,7 @@ func TestReplaceConflictOneKey(t *testing.T) { func TestReplaceConflictOneUniqueKey(t *testing.T) { column1 := &model.ColumnInfo{ ID: 1, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), Offset: 0, DefaultValue: 0, FieldType: *types.NewFieldType(mysql.TypeLong), @@ -361,7 +362,7 @@ func TestReplaceConflictOneUniqueKey(t *testing.T) { column2 := &model.ColumnInfo{ ID: 2, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Offset: 1, DefaultValue: 0, FieldType: *types.NewFieldType(mysql.TypeLong), @@ -372,7 +373,7 @@ func TestReplaceConflictOneUniqueKey(t *testing.T) { column3 := &model.ColumnInfo{ ID: 3, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Offset: 2, DefaultValue: 0, FieldType: *types.NewFieldType(mysql.TypeBlob), @@ -382,11 +383,11 @@ func TestReplaceConflictOneUniqueKey(t *testing.T) { index := &model.IndexInfo{ ID: 1, - Name: model.NewCIStr("uni_b"), - Table: model.NewCIStr(""), + Name: pmodel.NewCIStr("uni_b"), + Table: pmodel.NewCIStr(""), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Offset: 1, Length: -1, }}, @@ -397,7 +398,7 @@ func TestReplaceConflictOneUniqueKey(t *testing.T) { table := &model.TableInfo{ ID: 104, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), Charset: "utf8mb4", Collate: "utf8mb4_bin", Columns: []*model.ColumnInfo{column1, column2, column3}, diff --git a/pkg/lightning/errormanager/resolveconflict_test.go b/pkg/lightning/errormanager/resolveconflict_test.go index a76c7418aa54c..2f504fe0de2d0 100644 --- a/pkg/lightning/errormanager/resolveconflict_test.go +++ b/pkg/lightning/errormanager/resolveconflict_test.go @@ -28,9 +28,9 @@ import ( "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/errormanager" "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/pkg/lightning/tikv/BUILD.bazel b/pkg/lightning/tikv/BUILD.bazel index 89a51b2f89cac..1d36be8bd7072 100644 --- a/pkg/lightning/tikv/BUILD.bazel +++ b/pkg/lightning/tikv/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/lightning/common", "//pkg/lightning/config", "//pkg/lightning/log", - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_coreos_go_semver//semver", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/debugpb", diff --git a/pkg/lightning/tikv/tikv.go b/pkg/lightning/tikv/tikv.go index 0d31ef7d68ff3..c22a08cc2a51e 100644 --- a/pkg/lightning/tikv/tikv.go +++ b/pkg/lightning/tikv/tikv.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/tidb/pkg/lightning/common" "github.com/pingcap/tidb/pkg/lightning/config" "github.com/pingcap/tidb/pkg/lightning/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/tikv/client-go/v2/util" pdhttp "github.com/tikv/pd/client/http" "go.uber.org/zap" diff --git a/pkg/lock/context/BUILD.bazel b/pkg/lock/context/BUILD.bazel index 12d37593a6e6b..a38d9a52696b6 100644 --- a/pkg/lock/context/BUILD.bazel +++ b/pkg/lock/context/BUILD.bazel @@ -5,5 +5,8 @@ go_library( srcs = ["lockcontext.go"], importpath = "github.com/pingcap/tidb/pkg/lock/context", visibility = ["//visibility:public"], - deps = ["//pkg/parser/model"], + deps = [ + "//pkg/meta/model", + "//pkg/parser/model", + ], ) diff --git a/pkg/lock/context/lockcontext.go b/pkg/lock/context/lockcontext.go index 34e3ef5a7a7a1..7500a8178f170 100644 --- a/pkg/lock/context/lockcontext.go +++ b/pkg/lock/context/lockcontext.go @@ -14,12 +14,15 @@ package context -import "github.com/pingcap/tidb/pkg/parser/model" +import ( + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" +) // TableLockReadContext is the interface to get table lock information. type TableLockReadContext interface { // CheckTableLocked checks the table lock. - CheckTableLocked(tblID int64) (bool, model.TableLockType) + CheckTableLocked(tblID int64) (bool, pmodel.TableLockType) // GetAllTableLocks gets all table locks table id and db id hold by the session. GetAllTableLocks() []model.TableLockTpInfo // HasLockedTables uses to check whether this session locked any tables. diff --git a/pkg/meta/BUILD.bazel b/pkg/meta/BUILD.bazel index ffd98f9583563..4f616b0d1017b 100644 --- a/pkg/meta/BUILD.bazel +++ b/pkg/meta/BUILD.bazel @@ -11,6 +11,7 @@ go_library( deps = [ "//pkg/errno", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/model", "//pkg/parser/mysql", @@ -39,6 +40,7 @@ go_test( "//pkg/ddl", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/meta/autoid/BUILD.bazel b/pkg/meta/autoid/BUILD.bazel index 7a13113d39d81..0ccb2c2ff2c8d 100644 --- a/pkg/meta/autoid/BUILD.bazel +++ b/pkg/meta/autoid/BUILD.bazel @@ -15,8 +15,8 @@ go_library( "//pkg/errno", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/types", "//pkg/util/dbterror", @@ -52,6 +52,7 @@ go_test( ":autoid", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", diff --git a/pkg/meta/autoid/autoid.go b/pkg/meta/autoid/autoid.go index 237981497fcf9..cf3c6cbb0d76e 100644 --- a/pkg/meta/autoid/autoid.go +++ b/pkg/meta/autoid/autoid.go @@ -28,8 +28,8 @@ import ( "github.com/pingcap/kvproto/pkg/autoid" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/dbterror" @@ -654,7 +654,7 @@ func NewSequenceAllocator(store kv.Storage, dbID, tbID int64, info *model.Sequen func NewAllocatorsFromTblInfo(r Requirement, schemaID int64, tblInfo *model.TableInfo) Allocators { var allocs []Allocator dbID := tblInfo.GetAutoIDSchemaID(schemaID) - idCacheOpt := CustomAutoIncCacheOption(tblInfo.AutoIdCache) + idCacheOpt := CustomAutoIncCacheOption(tblInfo.AutoIDCache) tblVer := AllocOptionTableInfoVersion(tblInfo.Version) hasRowID := !tblInfo.PKIsHandle && !tblInfo.IsCommonHandle diff --git a/pkg/meta/autoid/autoid_test.go b/pkg/meta/autoid/autoid_test.go index a531c6ec1a5b4..d0a43c73b1e8d 100644 --- a/pkg/meta/autoid/autoid_test.go +++ b/pkg/meta/autoid/autoid_test.go @@ -29,7 +29,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/util" "github.com/stretchr/testify/require" @@ -63,17 +64,17 @@ func TestSignedAutoid(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: pmodel.NewCIStr("t")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: model.NewCIStr("t1")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: pmodel.NewCIStr("t1")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: pmodel.NewCIStr("t1")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: model.NewCIStr("t2")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: pmodel.NewCIStr("t2")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 5, Name: model.NewCIStr("t3")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 5, Name: pmodel.NewCIStr("t3")}) require.NoError(t, err) return nil }) @@ -268,17 +269,17 @@ func TestUnsignedAutoid(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: pmodel.NewCIStr("t")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: model.NewCIStr("t1")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: pmodel.NewCIStr("t1")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: pmodel.NewCIStr("t1")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: model.NewCIStr("t2")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: pmodel.NewCIStr("t2")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 5, Name: model.NewCIStr("t3")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 5, Name: pmodel.NewCIStr("t3")}) require.NoError(t, err) return nil }) @@ -432,9 +433,9 @@ func TestConcurrentAlloc(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) - err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: pmodel.NewCIStr("t")}) require.NoError(t, err) return nil }) @@ -518,9 +519,9 @@ func TestRollbackAlloc(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) - err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: pmodel.NewCIStr("t")}) require.NoError(t, err) return nil }) @@ -568,11 +569,11 @@ func TestAllocComputationIssue(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: pmodel.NewCIStr("t")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: model.NewCIStr("t1")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: pmodel.NewCIStr("t1")}) require.NoError(t, err) return nil }) @@ -619,9 +620,9 @@ func TestIssue40584(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) - err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: pmodel.NewCIStr("t")}) require.NoError(t, err) return nil }) diff --git a/pkg/meta/autoid/bench_test.go b/pkg/meta/autoid/bench_test.go index f2df4e67633c2..c1f293ec40e7e 100644 --- a/pkg/meta/autoid/bench_test.go +++ b/pkg/meta/autoid/bench_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" ) @@ -44,11 +45,11 @@ func BenchmarkAllocator_Alloc(b *testing.B) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: pmodel.NewCIStr("a")}) if err != nil { return err } - err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: pmodel.NewCIStr("t")}) if err != nil { return err } @@ -84,7 +85,7 @@ func BenchmarkAllocator_SequenceAlloc(b *testing.B) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("a")}) if err != nil { return err } @@ -99,7 +100,7 @@ func BenchmarkAllocator_SequenceAlloc(b *testing.B) { } seqTable := &model.TableInfo{ ID: 1, - Name: model.NewCIStr("seq"), + Name: pmodel.NewCIStr("seq"), Sequence: seq, } sequenceBase = seq.Start - 1 diff --git a/pkg/meta/autoid/memid.go b/pkg/meta/autoid/memid.go index 5ba66689b8bf3..f56e9f0e25524 100644 --- a/pkg/meta/autoid/memid.go +++ b/pkg/meta/autoid/memid.go @@ -18,7 +18,7 @@ import ( "context" "math" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // NewAllocatorFromTempTblInfo creates an in-memory allocator from a temporary table info. diff --git a/pkg/meta/autoid/memid_test.go b/pkg/meta/autoid/memid_test.go index f0b05fdec8183..c5a5fc738a9d1 100644 --- a/pkg/meta/autoid/memid_test.go +++ b/pkg/meta/autoid/memid_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/store/mockstore" diff --git a/pkg/meta/autoid/seq_autoid_test.go b/pkg/meta/autoid/seq_autoid_test.go index ae0d4508a1e75..35e616f69d6dc 100644 --- a/pkg/meta/autoid/seq_autoid_test.go +++ b/pkg/meta/autoid/seq_autoid_test.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/util" "github.com/stretchr/testify/require" @@ -43,7 +44,7 @@ func TestSequenceAutoid(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: pmodel.NewCIStr("a")}) require.NoError(t, err) seq = &model.SequenceInfo{ Start: 1, @@ -56,7 +57,7 @@ func TestSequenceAutoid(t *testing.T) { } seqTable := &model.TableInfo{ ID: 1, - Name: model.NewCIStr("seq"), + Name: pmodel.NewCIStr("seq"), Sequence: seq, } sequenceBase = seq.Start - 1 @@ -168,7 +169,7 @@ func TestConcurrentAllocSequence(t *testing.T) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) - err1 := m.CreateDatabase(&model.DBInfo{ID: 2, Name: model.NewCIStr("a")}) + err1 := m.CreateDatabase(&model.DBInfo{ID: 2, Name: pmodel.NewCIStr("a")}) require.NoError(t, err1) seq = &model.SequenceInfo{ Start: 100, @@ -181,7 +182,7 @@ func TestConcurrentAllocSequence(t *testing.T) { } seqTable := &model.TableInfo{ ID: 2, - Name: model.NewCIStr("seq"), + Name: pmodel.NewCIStr("seq"), Sequence: seq, } if seq.Increment >= 0 { diff --git a/pkg/meta/meta.go b/pkg/meta/meta.go index 8286975b9abc6..dce8a82353e7f 100644 --- a/pkg/meta/meta.go +++ b/pkg/meta/meta.go @@ -31,8 +31,9 @@ import ( rmpb "github.com/pingcap/kvproto/pkg/resource_manager" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/resourcegroup" "github.com/pingcap/tidb/pkg/structure" @@ -99,10 +100,10 @@ var ( ResourceGroupSettings: &model.ResourceGroupSettings{ RURate: math.MaxInt32, BurstLimit: -1, - Priority: model.MediumPriorityValue, + Priority: pmodel.MediumPriorityValue, }, ID: defaultGroupID, - Name: model.NewCIStr(resourcegroup.DefaultResourceGroupName), + Name: pmodel.NewCIStr(resourcegroup.DefaultResourceGroupName), State: model.StatePublic, } ) @@ -765,7 +766,7 @@ func (m *Meta) CreateMySQLDatabaseIfNotExists() (int64, error) { } db := model.DBInfo{ ID: id, - Name: model.NewCIStr(mysql.SystemDB), + Name: pmodel.NewCIStr(mysql.SystemDB), Charset: mysql.UTF8MB4Charset, Collate: mysql.UTF8MB4DefaultCollation, State: model.StatePublic, @@ -1228,7 +1229,7 @@ func FastUnmarshalTableNameInfo(data []byte) (*model.TableNameInfo, error) { return &model.TableNameInfo{ ID: id, - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), }, nil } diff --git a/pkg/meta/meta_autoid.go b/pkg/meta/meta_autoid.go index cef5903c758b8..21cc1cd45e421 100644 --- a/pkg/meta/meta_autoid.go +++ b/pkg/meta/meta_autoid.go @@ -18,7 +18,7 @@ import ( "strconv" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) var _ AutoIDAccessor = &autoIDAccessor{} diff --git a/pkg/meta/meta_test.go b/pkg/meta/meta_test.go index dc5415d236fad..69fa8609a6eb7 100644 --- a/pkg/meta/meta_test.go +++ b/pkg/meta/meta_test.go @@ -29,9 +29,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" _ "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/store/mockstore" @@ -59,7 +60,7 @@ func TestPlacementPolicy(t *testing.T) { // test the meta storage of placemnt policy. policy := &model.PolicyInfo{ ID: 1, - Name: model.NewCIStr("aa"), + Name: pmodel.NewCIStr("aa"), PlacementSettings: &model.PlacementSettings{ PrimaryRegion: "my primary", Regions: "my regions", @@ -84,7 +85,7 @@ func TestPlacementPolicy(t *testing.T) { require.Equal(t, policy, val) // mock updating the placement policy. - policy.Name = model.NewCIStr("bb") + policy.Name = pmodel.NewCIStr("bb") policy.LearnerConstraints = "+zone=nanjing" err = m.UpdatePolicy(policy) require.NoError(t, err) @@ -139,7 +140,7 @@ func TestResourceGroup(t *testing.T) { rg := &model.ResourceGroupInfo{ ID: groupID, - Name: model.NewCIStr("aa"), + Name: pmodel.NewCIStr("aa"), ResourceGroupSettings: &model.ResourceGroupSettings{ RURate: 100, }, @@ -210,7 +211,7 @@ func TestMeta(t *testing.T) { dbInfo := &model.DBInfo{ ID: 1, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), } err = m.CreateDatabase(dbInfo) require.NoError(t, err) @@ -223,7 +224,7 @@ func TestMeta(t *testing.T) { require.NoError(t, err) require.Equal(t, dbInfo, v) - dbInfo.Name = model.NewCIStr("aa") + dbInfo.Name = pmodel.NewCIStr("aa") err = m.UpdateDatabase(dbInfo) require.NoError(t, err) @@ -237,7 +238,7 @@ func TestMeta(t *testing.T) { tbInfo := &model.TableInfo{ ID: 1, - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), DBID: dbInfo.ID, } err = m.CreateTableOrView(1, tbInfo) @@ -255,7 +256,7 @@ func TestMeta(t *testing.T) { require.NotNil(t, err) require.True(t, meta.ErrTableExists.Equal(err)) - tbInfo.Name = model.NewCIStr("tt") + tbInfo.Name = pmodel.NewCIStr("tt") err = m.UpdateTable(1, tbInfo) require.NoError(t, err) @@ -275,7 +276,7 @@ func TestMeta(t *testing.T) { tbInfo2 := &model.TableInfo{ ID: 2, - Name: model.NewCIStr("bb"), + Name: pmodel.NewCIStr("bb"), DBID: dbInfo.ID, } err = m.CreateTableOrView(1, tbInfo2) @@ -341,7 +342,7 @@ func TestMeta(t *testing.T) { tid := int64(100) tbInfo100 := &model.TableInfo{ ID: tid, - Name: model.NewCIStr("t_rename"), + Name: pmodel.NewCIStr("t_rename"), } // Create table. err = m.CreateTableOrView(1, tbInfo100) @@ -368,7 +369,7 @@ func TestMeta(t *testing.T) { // Test case for CreateTableAndSetAutoID. tbInfo3 := &model.TableInfo{ ID: 3, - Name: model.NewCIStr("tbl3"), + Name: pmodel.NewCIStr("tbl3"), } err = m.CreateTableAndSetAutoID(1, tbInfo3, meta.AutoIDGroup{RowID: 123, IncrementID: 0}) require.NoError(t, err) @@ -730,7 +731,7 @@ func TestIsTableInfoMustLoadSubStringsOrder(t *testing.T) { func TestTableNameExtract(t *testing.T) { var tbl model.TableInfo - tbl.Name = model.NewCIStr(`a`) + tbl.Name = pmodel.NewCIStr(`a`) b, err := json.Marshal(tbl) require.NoError(t, err) @@ -739,28 +740,28 @@ func TestTableNameExtract(t *testing.T) { require.Len(t, nameLMatch, 2) require.Equal(t, "a", nameLMatch[1]) - tbl.Name = model.NewCIStr(`"a"`) + tbl.Name = pmodel.NewCIStr(`"a"`) b, err = json.Marshal(tbl) require.NoError(t, err) nameLMatch = nameLRegex.FindStringSubmatch(string(b)) require.Len(t, nameLMatch, 2) require.Equal(t, `"a"`, meta.Unescape(nameLMatch[1])) - tbl.Name = model.NewCIStr(`""a"`) + tbl.Name = pmodel.NewCIStr(`""a"`) b, err = json.Marshal(tbl) require.NoError(t, err) nameLMatch = nameLRegex.FindStringSubmatch(string(b)) require.Len(t, nameLMatch, 2) require.Equal(t, `""a"`, meta.Unescape(nameLMatch[1])) - tbl.Name = model.NewCIStr(`"\"a"`) + tbl.Name = pmodel.NewCIStr(`"\"a"`) b, err = json.Marshal(tbl) require.NoError(t, err) nameLMatch = nameLRegex.FindStringSubmatch(string(b)) require.Len(t, nameLMatch, 2) require.Equal(t, `"\"a"`, meta.Unescape(nameLMatch[1])) - tbl.Name = model.NewCIStr(`"\"啊"`) + tbl.Name = pmodel.NewCIStr(`"\"啊"`) b, err = json.Marshal(tbl) require.NoError(t, err) nameLMatch = nameLRegex.FindStringSubmatch(string(b)) @@ -870,28 +871,28 @@ func TestInfoSchemaV2SpecialAttributeCorrectnessAfterBootstrap(t *testing.T) { // create database dbInfo := &model.DBInfo{ ID: 10001, - Name: model.NewCIStr("sc"), + Name: pmodel.NewCIStr("sc"), State: model.StatePublic, } // create table with special attributes tblInfo := &model.TableInfo{ ID: 10002, - Name: model.NewCIStr("cs"), + Name: pmodel.NewCIStr("cs"), State: model.StatePublic, Partition: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ - {ID: 11, Name: model.NewCIStr("p1")}, - {ID: 22, Name: model.NewCIStr("p2")}, + {ID: 11, Name: pmodel.NewCIStr("p1")}, + {ID: 22, Name: pmodel.NewCIStr("p2")}, }, Enable: true, }, ForeignKeys: []*model.FKInfo{{ ID: 1, - Name: model.NewCIStr("fk"), - RefTable: model.NewCIStr("t"), - RefCols: []model.CIStr{model.NewCIStr("a")}, - Cols: []model.CIStr{model.NewCIStr("t_a")}, + Name: pmodel.NewCIStr("fk"), + RefTable: pmodel.NewCIStr("t"), + RefCols: []pmodel.CIStr{pmodel.NewCIStr("a")}, + Cols: []pmodel.CIStr{pmodel.NewCIStr("t_a")}, }}, TiFlashReplica: &model.TiFlashReplicaInfo{ Count: 0, @@ -899,13 +900,13 @@ func TestInfoSchemaV2SpecialAttributeCorrectnessAfterBootstrap(t *testing.T) { Available: true, }, Lock: &model.TableLockInfo{ - Tp: model.TableLockRead, + Tp: pmodel.TableLockRead, State: model.TableLockStatePreLock, TS: 0, }, PlacementPolicyRef: &model.PolicyRefInfo{ ID: 1, - Name: model.NewCIStr("r1"), + Name: pmodel.NewCIStr("r1"), }, TTLInfo: &model.TTLInfo{ IntervalExprStr: "1", @@ -966,7 +967,7 @@ func TestInfoSchemaV2DataFieldsCorrectnessAfterBootstrap(t *testing.T) { // create database dbInfo := &model.DBInfo{ ID: 10001, - Name: model.NewCIStr("sc"), + Name: pmodel.NewCIStr("sc"), Charset: "utf8", Collate: "utf8_general_ci", State: model.StatePublic, @@ -975,13 +976,13 @@ func TestInfoSchemaV2DataFieldsCorrectnessAfterBootstrap(t *testing.T) { // create table with partition info tblInfo := &model.TableInfo{ ID: 10002, - Name: model.NewCIStr("cs"), + Name: pmodel.NewCIStr("cs"), Charset: "latin1", Collate: "latin1_bin", State: model.StatePublic, Partition: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ - {ID: 1, Name: model.NewCIStr("p1")}, + {ID: 1, Name: pmodel.NewCIStr("p1")}, }, Enable: true, }, @@ -1009,7 +1010,7 @@ func TestInfoSchemaV2DataFieldsCorrectnessAfterBootstrap(t *testing.T) { require.Equal(t, tbl.Meta().ID, tblInfo.ID) //byName, traverse byName and load from store, - tbl, err = is.TableByName(context.Background(), model.NewCIStr("sc"), model.NewCIStr("cs")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("sc"), pmodel.NewCIStr("cs")) require.NoError(t, err) require.Equal(t, tbl.Meta().ID, tblInfo.ID) @@ -1019,7 +1020,7 @@ func TestInfoSchemaV2DataFieldsCorrectnessAfterBootstrap(t *testing.T) { require.Equal(t, tbl.Meta().ID, tblInfo.ID) //schemaMap, traverse schemaMap find dbInfo - db, ok := is.SchemaByName(model.NewCIStr("sc")) + db, ok := is.SchemaByName(pmodel.NewCIStr("sc")) require.True(t, ok) require.Equal(t, db.ID, dbInfo.ID) @@ -1048,12 +1049,12 @@ func TestInfoSchemaMiscFieldsCorrectnessAfterBootstrap(t *testing.T) { dbInfo := &model.DBInfo{ ID: 10001, - Name: model.NewCIStr("sc"), + Name: pmodel.NewCIStr("sc"), State: model.StatePublic, } policy := &model.PolicyInfo{ ID: 2, - Name: model.NewCIStr("policy_1"), + Name: pmodel.NewCIStr("policy_1"), PlacementSettings: &model.PlacementSettings{ PrimaryRegion: "r1", Regions: "r1,r2", @@ -1061,17 +1062,17 @@ func TestInfoSchemaMiscFieldsCorrectnessAfterBootstrap(t *testing.T) { } group := &model.ResourceGroupInfo{ ID: 3, - Name: model.NewCIStr("groupName_1"), + Name: pmodel.NewCIStr("groupName_1"), } tblInfo := &model.TableInfo{ ID: 10002, - Name: model.NewCIStr("cs"), + Name: pmodel.NewCIStr("cs"), State: model.StatePublic, ForeignKeys: []*model.FKInfo{{ ID: 1, - Name: model.NewCIStr("fk_1"), - RefSchema: model.NewCIStr("t1"), - RefTable: model.NewCIStr("parent"), + Name: pmodel.NewCIStr("fk_1"), + RefSchema: pmodel.NewCIStr("t1"), + RefTable: pmodel.NewCIStr("parent"), Version: 1, }}, PlacementPolicyRef: &model.PolicyRefInfo{ @@ -1081,7 +1082,7 @@ func TestInfoSchemaMiscFieldsCorrectnessAfterBootstrap(t *testing.T) { } tblInfo1 := &model.TableInfo{ ID: 10003, - Name: model.NewCIStr("cs"), + Name: pmodel.NewCIStr("cs"), State: model.StatePublic, TempTableType: model.TempTableLocal, } diff --git a/pkg/meta/model/BUILD.bazel b/pkg/meta/model/BUILD.bazel new file mode 100644 index 0000000000000..cd9ebad5665fb --- /dev/null +++ b/pkg/meta/model/BUILD.bazel @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "model", + srcs = [ + "bdr.go", + "column.go", + "db.go", + "flags.go", + "index.go", + "job.go", + "placement.go", + "reorg.go", + "resource_group.go", + "table.go", + ], + importpath = "github.com/pingcap/tidb/pkg/meta/model", + visibility = ["//visibility:public"], + deps = [ + "//pkg/parser/auth", + "//pkg/parser/charset", + "//pkg/parser/duration", + "//pkg/parser/model", + "//pkg/parser/mysql", + "//pkg/parser/terror", + "//pkg/parser/types", + "@com_github_pingcap_errors//:errors", + ], +) + +go_test( + name = "model_test", + timeout = "short", + srcs = [ + "bdr_test.go", + "column_test.go", + "index_test.go", + "job_test.go", + "placement_test.go", + "table_test.go", + ], + embed = [":model"], + flaky = True, + shard_count = 22, + deps = [ + "//pkg/parser/charset", + "//pkg/parser/model", + "//pkg/parser/mysql", + "//pkg/parser/terror", + "//pkg/parser/types", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/meta/model/bdr.go b/pkg/meta/model/bdr.go new file mode 100644 index 0000000000000..ada92039b9168 --- /dev/null +++ b/pkg/meta/model/bdr.go @@ -0,0 +1,130 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "time" +) + +// DDLBDRType is the type for DDL when BDR enable. +type DDLBDRType string + +const ( + // UnsafeDDL means the DDL can't be executed by user when cluster is Primary/Secondary. + UnsafeDDL DDLBDRType = "unsafe DDL" + // SafeDDL means the DDL can be executed by user when cluster is Primary. + SafeDDL DDLBDRType = "safe DDL" + // UnmanagementDDL means the DDL can't be synced by CDC. + UnmanagementDDL DDLBDRType = "unmanagement DDL" + // UnknownDDL means the DDL is unknown. + UnknownDDL DDLBDRType = "unknown DDL" +) + +// ActionBDRMap is the map of DDL ActionType to DDLBDRType. +var ActionBDRMap = map[ActionType]DDLBDRType{} + +// BDRActionMap is the map of DDLBDRType to ActionType (reversed from ActionBDRMap). +var BDRActionMap = map[DDLBDRType][]ActionType{ + SafeDDL: { + ActionCreateSchema, + ActionCreateTable, + ActionAddColumn, // add a new column to table if it’s nullable or with default value. + ActionAddIndex, //add non-unique index + ActionDropIndex, + ActionModifyColumn, // add or update comments for column, change default values of one particular column + ActionSetDefaultValue, + ActionModifyTableComment, + ActionRenameIndex, + ActionAddTablePartition, + ActionDropPrimaryKey, + ActionAlterIndexVisibility, + ActionCreateTables, + ActionAlterTTLInfo, + ActionAlterTTLRemove, + ActionCreateView, + ActionDropView, + }, + UnsafeDDL: { + ActionDropSchema, + ActionDropTable, + ActionDropColumn, + ActionAddForeignKey, + ActionDropForeignKey, + ActionTruncateTable, + ActionRebaseAutoID, + ActionRenameTable, + ActionShardRowID, + ActionDropTablePartition, + ActionModifyTableCharsetAndCollate, + ActionTruncateTablePartition, + ActionRecoverTable, + ActionModifySchemaCharsetAndCollate, + ActionLockTable, + ActionUnlockTable, + ActionRepairTable, + ActionSetTiFlashReplica, + ActionUpdateTiFlashReplicaStatus, + ActionAddPrimaryKey, + ActionCreateSequence, + ActionAlterSequence, + ActionDropSequence, + ActionModifyTableAutoIDCache, + ActionRebaseAutoRandomBase, + ActionExchangeTablePartition, + ActionAddCheckConstraint, + ActionDropCheckConstraint, + ActionAlterCheckConstraint, + ActionRenameTables, + ActionAlterTableAttributes, + ActionAlterTablePartitionAttributes, + ActionAlterTablePartitionPlacement, + ActionModifySchemaDefaultPlacement, + ActionAlterTablePlacement, + ActionAlterCacheTable, + ActionAlterTableStatsOptions, + ActionAlterNoCacheTable, + ActionMultiSchemaChange, + ActionFlashbackCluster, + ActionRecoverSchema, + ActionReorganizePartition, + ActionAlterTablePartitioning, + ActionRemovePartitioning, + }, + UnmanagementDDL: { + ActionCreatePlacementPolicy, + ActionAlterPlacementPolicy, + ActionDropPlacementPolicy, + ActionCreateResourceGroup, + ActionAlterResourceGroup, + ActionDropResourceGroup, + }, + UnknownDDL: { + _DEPRECATEDActionAlterTableAlterPartition, + }, +} + +// TSConvert2Time converts timestamp to time. +func TSConvert2Time(ts uint64) time.Time { + t := int64(ts >> 18) // 18 is for the logical time. + return time.UnixMilli(t) +} + +func init() { + for bdrType, v := range BDRActionMap { + for _, action := range v { + ActionBDRMap[action] = bdrType + } + } +} diff --git a/pkg/meta/model/bdr_test.go b/pkg/meta/model/bdr_test.go new file mode 100644 index 0000000000000..c247424fa1277 --- /dev/null +++ b/pkg/meta/model/bdr_test.go @@ -0,0 +1,35 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestActionBDRMap(t *testing.T) { + require.Equal(t, len(ActionMap), len(ActionBDRMap)) + + totalActions := 0 + for bdrType, actions := range BDRActionMap { + for _, action := range actions { + require.Equal(t, bdrType, ActionBDRMap[action], "action %s", action) + } + totalActions += len(actions) + } + + require.Equal(t, totalActions, len(ActionBDRMap)) +} diff --git a/pkg/meta/model/column.go b/pkg/meta/model/column.go new file mode 100644 index 0000000000000..58bd2f0ad70e9 --- /dev/null +++ b/pkg/meta/model/column.go @@ -0,0 +1,312 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "unsafe" + + "github.com/pingcap/tidb/pkg/parser/charset" + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/types" +) + +const ( + // ColumnInfoVersion0 means the column info version is 0. + ColumnInfoVersion0 = uint64(0) + // ColumnInfoVersion1 means the column info version is 1. + ColumnInfoVersion1 = uint64(1) + // ColumnInfoVersion2 means the column info version is 2. + // This is for v2.1.7 to Compatible with older versions charset problem. + // Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8. + // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. + // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. + ColumnInfoVersion2 = uint64(2) + + // CurrLatestColumnInfoVersion means the latest column info in the current TiDB. + CurrLatestColumnInfoVersion = ColumnInfoVersion2 +) + +// ChangeStateInfo is used for recording the information of schema changing. +type ChangeStateInfo struct { + // DependencyColumnOffset is the changing column offset that the current column depends on when executing modify/change column. + DependencyColumnOffset int `json:"relative_col_offset"` +} + +// ColumnInfo provides meta data describing of a table column. +type ColumnInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"name"` + Offset int `json:"offset"` + OriginDefaultValue any `json:"origin_default"` + OriginDefaultValueBit []byte `json:"origin_default_bit"` + DefaultValue any `json:"default"` + DefaultValueBit []byte `json:"default_bit"` + // DefaultIsExpr is indicates the default value string is expr. + DefaultIsExpr bool `json:"default_is_expr"` + GeneratedExprString string `json:"generated_expr_string"` + GeneratedStored bool `json:"generated_stored"` + Dependences map[string]struct{} `json:"dependences"` + FieldType types.FieldType `json:"type"` + State SchemaState `json:"state"` + Comment string `json:"comment"` + // A hidden column is used internally(expression index) and are not accessible by users. + Hidden bool `json:"hidden"` + *ChangeStateInfo `json:"change_state_info"` + // Version means the version of the column info. + // Version = 0: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in system time zone. + // That is a bug if multiple TiDB servers in different system time zone. + // Version = 1: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in UTC time zone. + // This will fix bug in version 0. For compatibility with version 0, we add version field in column info struct. + Version uint64 `json:"version"` +} + +// IsVirtualGenerated checks the column if it is virtual. +func (c *ColumnInfo) IsVirtualGenerated() bool { + return c.IsGenerated() && !c.GeneratedStored +} + +// Clone clones ColumnInfo. +func (c *ColumnInfo) Clone() *ColumnInfo { + if c == nil { + return nil + } + nc := *c + return &nc +} + +// GetType returns the type of ColumnInfo. +func (c *ColumnInfo) GetType() byte { + return c.FieldType.GetType() +} + +// GetFlag returns the flag of ColumnInfo. +func (c *ColumnInfo) GetFlag() uint { + return c.FieldType.GetFlag() +} + +// GetFlen returns the flen of ColumnInfo. +func (c *ColumnInfo) GetFlen() int { + return c.FieldType.GetFlen() +} + +// GetDecimal returns the decimal of ColumnInfo. +func (c *ColumnInfo) GetDecimal() int { + return c.FieldType.GetDecimal() +} + +// GetCharset returns the charset of ColumnInfo. +func (c *ColumnInfo) GetCharset() string { + return c.FieldType.GetCharset() +} + +// GetCollate returns the collation of ColumnInfo. +func (c *ColumnInfo) GetCollate() string { + return c.FieldType.GetCollate() +} + +// GetElems returns the elems of ColumnInfo. +func (c *ColumnInfo) GetElems() []string { + return c.FieldType.GetElems() +} + +// SetType set the type of ColumnInfo. +func (c *ColumnInfo) SetType(tp byte) { + c.FieldType.SetType(tp) +} + +// SetFlag set the flag of ColumnInfo. +func (c *ColumnInfo) SetFlag(flag uint) { + c.FieldType.SetFlag(flag) +} + +// AddFlag adds the flag of ColumnInfo. +func (c *ColumnInfo) AddFlag(flag uint) { + c.FieldType.AddFlag(flag) +} + +// AndFlag adds a flag to the column. +func (c *ColumnInfo) AndFlag(flag uint) { + c.FieldType.AndFlag(flag) +} + +// ToggleFlag flips the flag according to the value. +func (c *ColumnInfo) ToggleFlag(flag uint) { + c.FieldType.ToggleFlag(flag) +} + +// DelFlag removes the flag from the column's flag. +func (c *ColumnInfo) DelFlag(flag uint) { + c.FieldType.DelFlag(flag) +} + +// SetFlen sets the flen of ColumnInfo. +func (c *ColumnInfo) SetFlen(flen int) { + c.FieldType.SetFlen(flen) +} + +// SetDecimal sets the decimal of ColumnInfo. +func (c *ColumnInfo) SetDecimal(decimal int) { + c.FieldType.SetDecimal(decimal) +} + +// SetCharset sets charset of the ColumnInfo +func (c *ColumnInfo) SetCharset(charset string) { + c.FieldType.SetCharset(charset) +} + +// SetCollate sets the collation of the column. +func (c *ColumnInfo) SetCollate(collate string) { + c.FieldType.SetCollate(collate) +} + +// SetElems set the elements of enum column. +func (c *ColumnInfo) SetElems(elems []string) { + c.FieldType.SetElems(elems) +} + +// IsGenerated returns true if the column is generated column. +func (c *ColumnInfo) IsGenerated() bool { + return len(c.GeneratedExprString) != 0 +} + +// SetOriginDefaultValue sets the origin default value. +// For mysql.TypeBit type, the default value storage format must be a string. +// Other value such as int must convert to string format first. +// The mysql.TypeBit type supports the null default value. +func (c *ColumnInfo) SetOriginDefaultValue(value any) error { + c.OriginDefaultValue = value + if c.GetType() == mysql.TypeBit { + if value == nil { + return nil + } + if v, ok := value.(string); ok { + c.OriginDefaultValueBit = []byte(v) + return nil + } + return types.ErrInvalidDefault.GenWithStackByArgs(c.Name) + } + return nil +} + +// GetOriginDefaultValue gets the origin default value. +func (c *ColumnInfo) GetOriginDefaultValue() any { + if c.GetType() == mysql.TypeBit && c.OriginDefaultValueBit != nil { + // If the column type is BIT, both `OriginDefaultValue` and `DefaultValue` of ColumnInfo are corrupted, + // because the content before json.Marshal is INCONSISTENT with the content after json.Unmarshal. + return string(c.OriginDefaultValueBit) + } + return c.OriginDefaultValue +} + +// SetDefaultValue sets the default value. +func (c *ColumnInfo) SetDefaultValue(value any) error { + c.DefaultValue = value + if c.GetType() == mysql.TypeBit { + // For mysql.TypeBit type, the default value storage format must be a string. + // Other value such as int must convert to string format first. + // The mysql.TypeBit type supports the null default value. + if value == nil { + return nil + } + if v, ok := value.(string); ok { + c.DefaultValueBit = []byte(v) + return nil + } + return types.ErrInvalidDefault.GenWithStackByArgs(c.Name) + } + return nil +} + +// GetDefaultValue gets the default value of the column. +// Default value use to stored in DefaultValue field, but now, +// bit type default value will store in DefaultValueBit for fix bit default value decode/encode bug. +func (c *ColumnInfo) GetDefaultValue() any { + if c.GetType() == mysql.TypeBit && c.DefaultValueBit != nil { + return string(c.DefaultValueBit) + } + return c.DefaultValue +} + +// GetTypeDesc gets the description for column type. +func (c *ColumnInfo) GetTypeDesc() string { + desc := c.FieldType.CompactStr() + if mysql.HasUnsignedFlag(c.GetFlag()) && c.GetType() != mysql.TypeBit && c.GetType() != mysql.TypeYear { + desc += " unsigned" + } + if mysql.HasZerofillFlag(c.GetFlag()) && c.GetType() != mysql.TypeYear { + desc += " zerofill" + } + return desc +} + +// EmptyColumnInfoSize is the memory usage of ColumnInfoSize +const EmptyColumnInfoSize = int64(unsafe.Sizeof(ColumnInfo{})) + +// FindColumnInfo finds ColumnInfo in cols by name. +func FindColumnInfo(cols []*ColumnInfo, name string) *ColumnInfo { + name = strings.ToLower(name) + for _, col := range cols { + if col.Name.L == name { + return col + } + } + return nil +} + +// FindColumnInfoByID finds ColumnInfo in cols by id. +func FindColumnInfoByID(cols []*ColumnInfo, id int64) *ColumnInfo { + for _, col := range cols { + if col.ID == id { + return col + } + } + return nil +} + +// NewExtraHandleColInfo mocks a column info for extra handle column. +func NewExtraHandleColInfo() *ColumnInfo { + colInfo := &ColumnInfo{ + ID: ExtraHandleID, + Name: ExtraHandleName, + } + + colInfo.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag) + colInfo.SetType(mysql.TypeLonglong) + + flen, decimal := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeLonglong) + colInfo.SetFlen(flen) + colInfo.SetDecimal(decimal) + + colInfo.SetCharset(charset.CharsetBin) + colInfo.SetCollate(charset.CollationBin) + return colInfo +} + +// NewExtraPhysTblIDColInfo mocks a column info for extra partition id column. +func NewExtraPhysTblIDColInfo() *ColumnInfo { + colInfo := &ColumnInfo{ + ID: ExtraPhysTblID, + Name: ExtraPhysTblIDName, + } + colInfo.SetType(mysql.TypeLonglong) + flen, decimal := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeLonglong) + colInfo.SetFlen(flen) + colInfo.SetDecimal(decimal) + colInfo.SetCharset(charset.CharsetBin) + colInfo.SetCollate(charset.CollationBin) + return colInfo +} diff --git a/pkg/meta/model/column_test.go b/pkg/meta/model/column_test.go new file mode 100644 index 0000000000000..66752f386f0d4 --- /dev/null +++ b/pkg/meta/model/column_test.go @@ -0,0 +1,103 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/types" + "github.com/stretchr/testify/require" +) + +func TestDefaultValue(t *testing.T) { + srcCol := &ColumnInfo{ + ID: 1, + } + randPlainStr := "random_plain_string" + + oldPlainCol := srcCol.Clone() + oldPlainCol.Name = model.NewCIStr("oldPlainCol") + oldPlainCol.FieldType = *types.NewFieldType(mysql.TypeLong) + oldPlainCol.DefaultValue = randPlainStr + oldPlainCol.OriginDefaultValue = randPlainStr + + newPlainCol := srcCol.Clone() + newPlainCol.Name = model.NewCIStr("newPlainCol") + newPlainCol.FieldType = *types.NewFieldType(mysql.TypeLong) + err := newPlainCol.SetDefaultValue(1) + require.NoError(t, err) + require.Equal(t, 1, newPlainCol.GetDefaultValue()) + err = newPlainCol.SetDefaultValue(randPlainStr) + require.NoError(t, err) + require.Equal(t, randPlainStr, newPlainCol.GetDefaultValue()) + + randBitStr := string([]byte{25, 185}) + + oldBitCol := srcCol.Clone() + oldBitCol.Name = model.NewCIStr("oldBitCol") + oldBitCol.FieldType = *types.NewFieldType(mysql.TypeBit) + oldBitCol.DefaultValue = randBitStr + oldBitCol.OriginDefaultValue = randBitStr + + newBitCol := srcCol.Clone() + newBitCol.Name = model.NewCIStr("newBitCol") + newBitCol.FieldType = *types.NewFieldType(mysql.TypeBit) + err = newBitCol.SetDefaultValue(1) + // Only string type is allowed in BIT column. + require.Error(t, err) + require.Contains(t, err.Error(), "Invalid default value") + require.Equal(t, 1, newBitCol.GetDefaultValue()) + err = newBitCol.SetDefaultValue(randBitStr) + require.NoError(t, err) + require.Equal(t, randBitStr, newBitCol.GetDefaultValue()) + + nullBitCol := srcCol.Clone() + nullBitCol.Name = model.NewCIStr("nullBitCol") + nullBitCol.FieldType = *types.NewFieldType(mysql.TypeBit) + err = nullBitCol.SetOriginDefaultValue(nil) + require.NoError(t, err) + require.Nil(t, nullBitCol.GetOriginDefaultValue()) + + testCases := []struct { + col *ColumnInfo + isConsistent bool + }{ + {oldPlainCol, true}, + {oldBitCol, false}, + {newPlainCol, true}, + {newBitCol, true}, + {nullBitCol, true}, + } + for _, tc := range testCases { + col, isConsistent := tc.col, tc.isConsistent + comment := fmt.Sprintf("%s assertion failed", col.Name.O) + bytes, err := json.Marshal(col) + require.NoError(t, err, comment) + var newCol ColumnInfo + err = json.Unmarshal(bytes, &newCol) + require.NoError(t, err, comment) + if isConsistent { + require.Equal(t, col.GetDefaultValue(), newCol.GetDefaultValue(), comment) + require.Equal(t, col.GetOriginDefaultValue(), newCol.GetOriginDefaultValue(), comment) + } else { + require.NotEqual(t, col.GetDefaultValue(), newCol.GetDefaultValue(), comment) + require.NotEqual(t, col.GetOriginDefaultValue(), newCol.GetOriginDefaultValue(), comment) + } + } +} diff --git a/pkg/meta/model/db.go b/pkg/meta/model/db.go new file mode 100644 index 0000000000000..ece71f17c815b --- /dev/null +++ b/pkg/meta/model/db.go @@ -0,0 +1,58 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + + "github.com/pingcap/tidb/pkg/parser/model" +) + +// DBInfo provides meta data describing a DB. +type DBInfo struct { + ID int64 `json:"id"` // Database ID + Name model.CIStr `json:"db_name"` // DB name. + Charset string `json:"charset"` + Collate string `json:"collate"` + Deprecated struct { // Tables is not set in infoschema v2, use infoschema SchemaTableInfos() instead. + Tables []*TableInfo `json:"-"` // Tables in the DB. + } + State SchemaState `json:"state"` + PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"` + TableName2ID map[string]int64 `json:"-"` +} + +// Clone clones DBInfo. +func (db *DBInfo) Clone() *DBInfo { + newInfo := *db + newInfo.Deprecated.Tables = make([]*TableInfo, len(db.Deprecated.Tables)) + for i := range db.Deprecated.Tables { + newInfo.Deprecated.Tables[i] = db.Deprecated.Tables[i].Clone() + } + return &newInfo +} + +// Copy shallow copies DBInfo. +func (db *DBInfo) Copy() *DBInfo { + newInfo := *db + newInfo.Deprecated.Tables = make([]*TableInfo, len(db.Deprecated.Tables)) + copy(newInfo.Deprecated.Tables, db.Deprecated.Tables) + return &newInfo +} + +// LessDBInfo is used for sorting DBInfo by DBInfo.Name. +func LessDBInfo(a *DBInfo, b *DBInfo) int { + return strings.Compare(a.Name.L, b.Name.L) +} diff --git a/pkg/parser/model/flags.go b/pkg/meta/model/flags.go similarity index 97% rename from pkg/parser/model/flags.go rename to pkg/meta/model/flags.go index 18bfce3628587..233a6a554b683 100644 --- a/pkg/parser/model/flags.go +++ b/pkg/meta/model/flags.go @@ -8,6 +8,7 @@ // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. diff --git a/pkg/meta/model/index.go b/pkg/meta/model/index.go new file mode 100644 index 0000000000000..d33c82483251b --- /dev/null +++ b/pkg/meta/model/index.go @@ -0,0 +1,147 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/types" +) + +// IndexInfo provides meta data describing a DB index. +// It corresponds to the statement `CREATE INDEX Name ON Table (Column);` +// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html +type IndexInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"idx_name"` // Index name. + Table model.CIStr `json:"tbl_name"` // Table name. + Columns []*IndexColumn `json:"idx_cols"` // Index columns. + State SchemaState `json:"state"` + BackfillState BackfillState `json:"backfill_state"` + Comment string `json:"comment"` // Comment + Tp model.IndexType `json:"index_type"` // Index type: Btree, Hash or Rtree + Unique bool `json:"is_unique"` // Whether the index is unique. + Primary bool `json:"is_primary"` // Whether the index is primary key. + Invisible bool `json:"is_invisible"` // Whether the index is invisible. + Global bool `json:"is_global"` // Whether the index is global. + MVIndex bool `json:"mv_index"` // Whether the index is multivalued index. +} + +// Clone clones IndexInfo. +func (index *IndexInfo) Clone() *IndexInfo { + if index == nil { + return nil + } + ni := *index + ni.Columns = make([]*IndexColumn, len(index.Columns)) + for i := range index.Columns { + ni.Columns[i] = index.Columns[i].Clone() + } + return &ni +} + +// HasPrefixIndex returns whether any columns of this index uses prefix length. +func (index *IndexInfo) HasPrefixIndex() bool { + for _, ic := range index.Columns { + if ic.Length != types.UnspecifiedLength { + return true + } + } + return false +} + +// HasColumnInIndexColumns checks whether the index contains the column with the specified ID. +func (index *IndexInfo) HasColumnInIndexColumns(tblInfo *TableInfo, colID int64) bool { + for _, ic := range index.Columns { + if tblInfo.Columns[ic.Offset].ID == colID { + return true + } + } + return false +} + +// FindColumnByName finds the index column with the specified name. +func (index *IndexInfo) FindColumnByName(nameL string) *IndexColumn { + _, ret := FindIndexColumnByName(index.Columns, nameL) + return ret +} + +// IsPublic checks if the index state is public +func (index *IndexInfo) IsPublic() bool { + return index.State == StatePublic +} + +// FindIndexByColumns find IndexInfo in indices which is cover the specified columns. +func FindIndexByColumns(tbInfo *TableInfo, indices []*IndexInfo, cols ...model.CIStr) *IndexInfo { + for _, index := range indices { + if IsIndexPrefixCovered(tbInfo, index, cols...) { + return index + } + } + return nil +} + +// IsIndexPrefixCovered checks the index's columns beginning with the cols. +func IsIndexPrefixCovered(tbInfo *TableInfo, index *IndexInfo, cols ...model.CIStr) bool { + if len(index.Columns) < len(cols) { + return false + } + for i := range cols { + if cols[i].L != index.Columns[i].Name.L || + index.Columns[i].Offset >= len(tbInfo.Columns) { + return false + } + colInfo := tbInfo.Columns[index.Columns[i].Offset] + if index.Columns[i].Length != types.UnspecifiedLength && index.Columns[i].Length < colInfo.GetFlen() { + return false + } + } + return true +} + +// FindIndexInfoByID finds IndexInfo in indices by id. +func FindIndexInfoByID(indices []*IndexInfo, id int64) *IndexInfo { + for _, idx := range indices { + if idx.ID == id { + return idx + } + } + return nil +} + +// IndexColumn provides index column info. +type IndexColumn struct { + Name model.CIStr `json:"name"` // Index name + Offset int `json:"offset"` // Index offset + // Length of prefix when using column prefix + // for indexing; + // UnspecifedLength if not using prefix indexing + Length int `json:"length"` +} + +// Clone clones IndexColumn. +func (i *IndexColumn) Clone() *IndexColumn { + ni := *i + return &ni +} + +// FindIndexColumnByName finds IndexColumn by name. When IndexColumn is not found, returns (-1, nil). +func FindIndexColumnByName(indexCols []*IndexColumn, nameL string) (int, *IndexColumn) { + for i, ic := range indexCols { + if ic.Name.L == nameL { + return i, ic + } + } + return -1, nil +} diff --git a/pkg/meta/model/index_test.go b/pkg/meta/model/index_test.go new file mode 100644 index 0000000000000..1c65da0ea7e36 --- /dev/null +++ b/pkg/meta/model/index_test.go @@ -0,0 +1,71 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "testing" + + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/stretchr/testify/require" +) + +func newColumnForTest(id int64, offset int) *ColumnInfo { + return &ColumnInfo{ + ID: id, + Name: model.NewCIStr(fmt.Sprintf("c_%d", id)), + Offset: offset, + } +} + +func newIndexForTest(id int64, cols ...*ColumnInfo) *IndexInfo { + idxCols := make([]*IndexColumn, 0, len(cols)) + for _, c := range cols { + idxCols = append(idxCols, &IndexColumn{Offset: c.Offset, Name: c.Name}) + } + return &IndexInfo{ + ID: id, + Name: model.NewCIStr(fmt.Sprintf("i_%d", id)), + Columns: idxCols, + } +} + +func TestIsIndexPrefixCovered(t *testing.T) { + c0 := newColumnForTest(0, 0) + c1 := newColumnForTest(1, 1) + c2 := newColumnForTest(2, 2) + c3 := newColumnForTest(3, 3) + c4 := newColumnForTest(4, 4) + + i0 := newIndexForTest(0, c0, c1, c2) + i1 := newIndexForTest(1, c4, c2) + + tbl := &TableInfo{ + ID: 1, + Name: model.NewCIStr("t"), + Columns: []*ColumnInfo{c0, c1, c2, c3, c4}, + Indices: []*IndexInfo{i0, i1}, + } + require.Equal(t, true, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_0"))) + require.Equal(t, true, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_0"), model.NewCIStr("c_1"), model.NewCIStr("c_2"))) + require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_1"))) + require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_2"))) + require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_1"), model.NewCIStr("c_2"))) + require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_0"), model.NewCIStr("c_2"))) + + require.Equal(t, true, IsIndexPrefixCovered(tbl, i1, model.NewCIStr("c_4"))) + require.Equal(t, true, IsIndexPrefixCovered(tbl, i1, model.NewCIStr("c_4"), model.NewCIStr("c_2"))) + require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, model.NewCIStr("c_2"))) +} diff --git a/pkg/parser/model/ddl.go b/pkg/meta/model/job.go similarity index 90% rename from pkg/parser/model/ddl.go rename to pkg/meta/model/job.go index 4a57fda2e5a91..718ca6e6d9857 100644 --- a/pkg/parser/model/ddl.go +++ b/pkg/meta/model/job.go @@ -1,4 +1,4 @@ -// Copyright 2015 PingCAP, Inc. +// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -8,6 +8,7 @@ // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. @@ -20,6 +21,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" ) @@ -68,7 +70,7 @@ const ( ActionDropSequence ActionType = 36 ActionAddColumns ActionType = 37 // Deprecated, we use ActionMultiSchemaChange instead. ActionDropColumns ActionType = 38 // Deprecated, we use ActionMultiSchemaChange instead. - ActionModifyTableAutoIdCache ActionType = 39 //nolint:revive + ActionModifyTableAutoIDCache ActionType = 39 ActionRebaseAutoRandomBase ActionType = 40 ActionAlterIndexVisibility ActionType = 41 ActionExchangeTablePartition ActionType = 42 @@ -78,7 +80,7 @@ const ( // `ActionAlterTableAlterPartition` is removed and will never be used. // Just left a tombstone here for compatibility. - __DEPRECATED_ActionAlterTableAlterPartition ActionType = 46 //nolint:revive + _DEPRECATEDActionAlterTableAlterPartition ActionType = 46 ActionRenameTables ActionType = 47 ActionDropIndexes ActionType = 48 // Deprecated, we use ActionMultiSchemaChange instead. @@ -148,7 +150,7 @@ var ActionMap = map[ActionType]string{ ActionCreateSequence: "create sequence", ActionAlterSequence: "alter sequence", ActionDropSequence: "drop sequence", - ActionModifyTableAutoIdCache: "modify auto id cache", + ActionModifyTableAutoIDCache: "modify auto id cache", ActionRebaseAutoRandomBase: "rebase auto_random ID", ActionAlterIndexVisibility: "alter index visibility", ActionExchangeTablePartition: "exchange partition", @@ -180,104 +182,7 @@ var ActionMap = map[ActionType]string{ // `ActionAlterTableAlterPartition` is removed and will never be used. // Just left a tombstone here for compatibility. - __DEPRECATED_ActionAlterTableAlterPartition: "alter partition", -} - -// DDLBDRType is the type for DDL when BDR enable. -type DDLBDRType string - -const ( - // UnsafeDDL means the DDL can't be executed by user when cluster is Primary/Secondary. - UnsafeDDL DDLBDRType = "unsafe DDL" - // SafeDDL means the DDL can be executed by user when cluster is Primary. - SafeDDL DDLBDRType = "safe DDL" - // UnmanagementDDL means the DDL can't be synced by CDC. - UnmanagementDDL DDLBDRType = "unmanagement DDL" - // UnknownDDL means the DDL is unknown. - UnknownDDL DDLBDRType = "unknown DDL" -) - -// ActionBDRMap is the map of DDL ActionType to DDLBDRType. -var ActionBDRMap = map[ActionType]DDLBDRType{} - -// BDRActionMap is the map of DDLBDRType to ActionType (reversed from ActionBDRMap). -var BDRActionMap = map[DDLBDRType][]ActionType{ - SafeDDL: { - ActionCreateSchema, - ActionCreateTable, - ActionAddColumn, // add a new column to table if it’s nullable or with default value. - ActionAddIndex, //add non-unique index - ActionDropIndex, - ActionModifyColumn, // add or update comments for column, change default values of one particular column - ActionSetDefaultValue, - ActionModifyTableComment, - ActionRenameIndex, - ActionAddTablePartition, - ActionDropPrimaryKey, - ActionAlterIndexVisibility, - ActionCreateTables, - ActionAlterTTLInfo, - ActionAlterTTLRemove, - ActionCreateView, - ActionDropView, - }, - UnsafeDDL: { - ActionDropSchema, - ActionDropTable, - ActionDropColumn, - ActionAddForeignKey, - ActionDropForeignKey, - ActionTruncateTable, - ActionRebaseAutoID, - ActionRenameTable, - ActionShardRowID, - ActionDropTablePartition, - ActionModifyTableCharsetAndCollate, - ActionTruncateTablePartition, - ActionRecoverTable, - ActionModifySchemaCharsetAndCollate, - ActionLockTable, - ActionUnlockTable, - ActionRepairTable, - ActionSetTiFlashReplica, - ActionUpdateTiFlashReplicaStatus, - ActionAddPrimaryKey, - ActionCreateSequence, - ActionAlterSequence, - ActionDropSequence, - ActionModifyTableAutoIdCache, - ActionRebaseAutoRandomBase, - ActionExchangeTablePartition, - ActionAddCheckConstraint, - ActionDropCheckConstraint, - ActionAlterCheckConstraint, - ActionRenameTables, - ActionAlterTableAttributes, - ActionAlterTablePartitionAttributes, - ActionAlterTablePartitionPlacement, - ActionModifySchemaDefaultPlacement, - ActionAlterTablePlacement, - ActionAlterCacheTable, - ActionAlterTableStatsOptions, - ActionAlterNoCacheTable, - ActionMultiSchemaChange, - ActionFlashbackCluster, - ActionRecoverSchema, - ActionReorganizePartition, - ActionAlterTablePartitioning, - ActionRemovePartitioning, - }, - UnmanagementDDL: { - ActionCreatePlacementPolicy, - ActionAlterPlacementPolicy, - ActionDropPlacementPolicy, - ActionCreateResourceGroup, - ActionAlterResourceGroup, - ActionDropResourceGroup, - }, - UnknownDDL: { - __DEPRECATED_ActionAlterTableAlterPartition, - }, + _DEPRECATEDActionAlterTableAlterPartition: "alter partition", } // String return current ddl action in string @@ -288,204 +193,54 @@ func (action ActionType) String() string { return "none" } -// HistoryInfo is used for binlog. -type HistoryInfo struct { - SchemaVersion int64 - DBInfo *DBInfo - TableInfo *TableInfo - FinishedTS uint64 - - // MultipleTableInfos is like TableInfo but only for operations updating multiple tables. - MultipleTableInfos []*TableInfo -} - -// AddDBInfo adds schema version and schema information that are used for binlog. -// dbInfo is added in the following operations: create database, drop database. -func (h *HistoryInfo) AddDBInfo(schemaVer int64, dbInfo *DBInfo) { - h.SchemaVersion = schemaVer - h.DBInfo = dbInfo -} - -// AddTableInfo adds schema version and table information that are used for binlog. -// tblInfo is added except for the following operations: create database, drop database. -func (h *HistoryInfo) AddTableInfo(schemaVer int64, tblInfo *TableInfo) { - h.SchemaVersion = schemaVer - h.TableInfo = tblInfo -} - -// SetTableInfos is like AddTableInfo, but will add multiple table infos to the binlog. -func (h *HistoryInfo) SetTableInfos(schemaVer int64, tblInfos []*TableInfo) { - h.SchemaVersion = schemaVer - h.MultipleTableInfos = make([]*TableInfo, len(tblInfos)) - copy(h.MultipleTableInfos, tblInfos) -} +// SchemaState is the state for schema elements. +type SchemaState byte -// Clean cleans history information. -func (h *HistoryInfo) Clean() { - h.SchemaVersion = 0 - h.DBInfo = nil - h.TableInfo = nil - h.MultipleTableInfos = nil -} - -// TimeZoneLocation represents a single time zone. -type TimeZoneLocation struct { - Name string `json:"name"` - Offset int `json:"offset"` // seconds east of UTC - location *time.Location -} - -// GetLocation gets the timezone location. -func (tz *TimeZoneLocation) GetLocation() (*time.Location, error) { - if tz.location != nil { - return tz.location, nil - } - - var err error - if tz.Offset == 0 { - tz.location, err = time.LoadLocation(tz.Name) - } else { - tz.location = time.FixedZone(tz.Name, tz.Offset) - } - return tz.location, err -} - -// MultiSchemaInfo keeps some information for multi schema change. -type MultiSchemaInfo struct { - SubJobs []*SubJob `json:"sub_jobs"` - Revertible bool `json:"revertible"` - Seq int32 `json:"seq"` - - // SkipVersion is used to control whether generating a new schema version for a sub-job. - SkipVersion bool `json:"-"` - - AddColumns []CIStr `json:"-"` - DropColumns []CIStr `json:"-"` - ModifyColumns []CIStr `json:"-"` - AddIndexes []CIStr `json:"-"` - DropIndexes []CIStr `json:"-"` - AlterIndexes []CIStr `json:"-"` - - AddForeignKeys []AddForeignKeyInfo `json:"-"` - - RelativeColumns []CIStr `json:"-"` - PositionColumns []CIStr `json:"-"` -} - -// AddForeignKeyInfo contains foreign key information. -type AddForeignKeyInfo struct { - Name CIStr - Cols []CIStr -} - -// NewMultiSchemaInfo new a MultiSchemaInfo. -func NewMultiSchemaInfo() *MultiSchemaInfo { - return &MultiSchemaInfo{ - SubJobs: nil, - Revertible: true, - } -} - -// SubJob is a representation of one DDL schema change. A Job may contain zero -// (when multi-schema change is not applicable) or more SubJobs. -type SubJob struct { - Type ActionType `json:"type"` - Args []interface{} `json:"-"` - RawArgs json.RawMessage `json:"raw_args"` - SchemaState SchemaState `json:"schema_state"` - SnapshotVer uint64 `json:"snapshot_ver"` - RealStartTS uint64 `json:"real_start_ts"` - Revertible bool `json:"revertible"` - State JobState `json:"state"` - RowCount int64 `json:"row_count"` - Warning *terror.Error `json:"warning"` - CtxVars []interface{} `json:"-"` - SchemaVer int64 `json:"schema_version"` - ReorgTp ReorgType `json:"reorg_tp"` - UseCloud bool `json:"use_cloud"` -} +const ( + // StateNone means this schema element is absent and can't be used. + StateNone SchemaState = iota + // StateDeleteOnly means we can only delete items for this schema element. + StateDeleteOnly + // StateWriteOnly means we can use any write operation on this schema element, + // but outer can't read the changed data. + StateWriteOnly + // StateWriteReorganization means we are re-organizing whole data after write only state. + StateWriteReorganization + // StateDeleteReorganization means we are re-organizing whole data after delete only state. + StateDeleteReorganization + // StatePublic means this schema element is ok for all write and read operations. + StatePublic + // StateReplicaOnly means we're waiting tiflash replica to be finished. + StateReplicaOnly + // StateGlobalTxnOnly means we can only use global txn for operator on this schema element + StateGlobalTxnOnly + /* + * Please add the new state at the end to keep the values consistent across versions. + */ +) -// IsNormal returns true if the sub-job is normally running. -func (sub *SubJob) IsNormal() bool { - switch sub.State { - case JobStateCancelling, JobStateCancelled, - JobStateRollingback, JobStateRollbackDone: - return false +// String implements fmt.Stringer interface. +func (s SchemaState) String() string { + switch s { + case StateDeleteOnly: + return "delete only" + case StateWriteOnly: + return "write only" + case StateWriteReorganization: + return "write reorganization" + case StateDeleteReorganization: + return "delete reorganization" + case StatePublic: + return "public" + case StateReplicaOnly: + return "replica only" + case StateGlobalTxnOnly: + return "global txn only" default: - return true - } -} - -// IsFinished returns true if the job is done. -func (sub *SubJob) IsFinished() bool { - return sub.State == JobStateDone || - sub.State == JobStateRollbackDone || - sub.State == JobStateCancelled -} - -// ToProxyJob converts a sub-job to a proxy job. -func (sub *SubJob) ToProxyJob(parentJob *Job, seq int) Job { - return Job{ - ID: parentJob.ID, - Type: sub.Type, - SchemaID: parentJob.SchemaID, - TableID: parentJob.TableID, - SchemaName: parentJob.SchemaName, - State: sub.State, - Warning: sub.Warning, - Error: nil, - ErrorCount: 0, - RowCount: sub.RowCount, - Mu: sync.Mutex{}, - CtxVars: sub.CtxVars, - Args: sub.Args, - RawArgs: sub.RawArgs, - SchemaState: sub.SchemaState, - SnapshotVer: sub.SnapshotVer, - RealStartTS: sub.RealStartTS, - StartTS: parentJob.StartTS, - DependencyID: parentJob.DependencyID, - Query: parentJob.Query, - BinlogInfo: parentJob.BinlogInfo, - Version: parentJob.Version, - ReorgMeta: parentJob.ReorgMeta, - MultiSchemaInfo: &MultiSchemaInfo{Revertible: sub.Revertible, Seq: int32(seq)}, - Priority: parentJob.Priority, - SeqNum: parentJob.SeqNum, - Charset: parentJob.Charset, - Collate: parentJob.Collate, - AdminOperator: parentJob.AdminOperator, - TraceInfo: parentJob.TraceInfo, + return "none" } } -// FromProxyJob converts a proxy job to a sub-job. -func (sub *SubJob) FromProxyJob(proxyJob *Job, ver int64) { - sub.Revertible = proxyJob.MultiSchemaInfo.Revertible - sub.SchemaState = proxyJob.SchemaState - sub.SnapshotVer = proxyJob.SnapshotVer - sub.RealStartTS = proxyJob.RealStartTS - sub.Args = proxyJob.Args - sub.State = proxyJob.State - sub.Warning = proxyJob.Warning - sub.RowCount = proxyJob.RowCount - sub.SchemaVer = ver - sub.ReorgTp = proxyJob.ReorgMeta.ReorgTp - sub.UseCloud = proxyJob.ReorgMeta.UseCloudStorage -} - -// JobMeta is meta info of Job. -type JobMeta struct { - SchemaID int64 `json:"schema_id"` - TableID int64 `json:"table_id"` - // Type is the DDL job's type. - Type ActionType `json:"job_type"` - // Query is the DDL job's SQL string. - Query string `json:"query"` - // Priority is only used to set the operation priority of adding indices. - Priority int `json:"priority"` -} - // Job is for a DDL operation. type Job struct { ID int64 `json:"id"` @@ -509,14 +264,14 @@ type Job struct { // CtxVars are variables attached to the job. It is for internal usage. // E.g. passing arguments between functions by one single *Job pointer. // for ExchangeTablePartition, RenameTables, RenameTable, it's [slice-of-db-id, slice-of-table-id] - CtxVars []interface{} `json:"-"` + CtxVars []any `json:"-"` // Note: it might change when state changes, such as when rollback on AddColumn. - // - CreateTable, it's [model.TableInfo, foreignKeyCheck] + // - CreateTable, it's [TableInfo, foreignKeyCheck] // - AddIndex or AddPrimaryKey: [unique, .... // - TruncateTable: [new-table-id, foreignKeyCheck, ... // - RenameTable: [old-db-id, new-table-name, old-db-name] // - ExchangeTablePartition: [partition-id, pt-db-id, pt-id, partition-name, with-validation] - Args []interface{} `json:"-"` + Args []any `json:"-"` // RawArgs : We must use json raw message to delay parsing special args. RawArgs json.RawMessage `json:"raw_args"` SchemaState SchemaState `json:"schema_state"` @@ -590,44 +345,6 @@ type Job struct { SQLMode mysql.SQLMode `json:"sql_mode"` } -// InvolvingSchemaInfo returns the schema info involved in the job. The value -// should be stored in lower case. Only one type of the three member types -// (Database&Table, Policy, ResourceGroup) should only be set in a -// InvolvingSchemaInfo. -type InvolvingSchemaInfo struct { - Database string `json:"database,omitempty"` - Table string `json:"table,omitempty"` - Policy string `json:"policy,omitempty"` - ResourceGroup string `json:"resource_group,omitempty"` - Mode InvolvingSchemaInfoMode `json:"mode,omitempty"` -} - -// InvolvingSchemaInfoMode is used by InvolvingSchemaInfo.Mode. -type InvolvingSchemaInfoMode int - -// ExclusiveInvolving and SharedInvolving are considered like the exclusive lock -// and shared lock when calculate DDL job dependencies. And we also implement the -// fair lock semantic which means if we have job A/B/C arrive in order, and job B -// (exclusive request object 0) is waiting for the running job A (shared request -// object 0), and job C (shared request object 0) arrives, job C should also be -// blocked until job B is finished although job A & C has no dependency. -const ( - // ExclusiveInvolving is the default value to keep compatibility with old - // versions. - ExclusiveInvolving InvolvingSchemaInfoMode = iota - SharedInvolving -) - -const ( - // InvolvingAll means all schemas/tables are affected. It's used in - // InvolvingSchemaInfo.Database/Tables fields. When both the Database and Tables - // are InvolvingAll it also means all placement policies and resource groups are - // affected. Currently the only case is FLASHBACK CLUSTER. - InvolvingAll = "*" - // InvolvingNone means no schema/table is affected. - InvolvingNone = "" -) - // FinishTableJob is called when a job is finished. // It updates the job's state information and adds tblInfo to the binlog. func (job *Job) FinishTableJob(jobState JobState, schemaState SchemaState, ver int64, tblInfo *TableInfo) { @@ -674,24 +391,18 @@ func (job *Job) Clone() *Job { return nil } if len(job.Args) > 0 { - clone.Args = make([]interface{}, len(job.Args)) + clone.Args = make([]any, len(job.Args)) copy(clone.Args, job.Args) } if job.MultiSchemaInfo != nil { for i, sub := range job.MultiSchemaInfo.SubJobs { - clone.MultiSchemaInfo.SubJobs[i].Args = make([]interface{}, len(sub.Args)) + clone.MultiSchemaInfo.SubJobs[i].Args = make([]any, len(sub.Args)) copy(clone.MultiSchemaInfo.SubJobs[i].Args, sub.Args) } } return &clone } -// TSConvert2Time converts timestamp to time. -func TSConvert2Time(ts uint64) time.Time { - t := int64(ts >> 18) // 18 is for the logical time. - return time.UnixMilli(t) -} - // SetRowCount sets the number of rows. Make sure it can pass `make race`. func (job *Job) SetRowCount(count int64) { job.Mu.Lock() @@ -764,7 +475,7 @@ func (job *Job) Decode(b []byte) error { // DecodeArgs decodes serialized job arguments from job.RawArgs into the given // variables, and also save the result in job.Args. -func (job *Job) DecodeArgs(args ...interface{}) error { +func (job *Job) DecodeArgs(args ...any) error { var rawArgs []json.RawMessage if err := json.Unmarshal(job.RawArgs, &rawArgs); err != nil { return errors.Trace(err) @@ -1049,7 +760,7 @@ func (job *Job) IsRollbackable() bool { ActionTruncateTable, ActionAddForeignKey, ActionRenameTable, ActionRenameTables, ActionModifyTableCharsetAndCollate, ActionModifySchemaCharsetAndCollate, ActionRepairTable, - ActionModifyTableAutoIdCache, ActionModifySchemaDefaultPlacement, ActionDropCheckConstraint: + ActionModifyTableAutoIDCache, ActionModifySchemaDefaultPlacement, ActionDropCheckConstraint: return job.SchemaState == StateNone case ActionMultiSchemaChange: return job.MultiSchemaInfo.Revertible @@ -1077,6 +788,180 @@ func (job *Job) GetInvolvingSchemaInfo() []InvolvingSchemaInfo { } } +// SubJob is a representation of one DDL schema change. A Job may contain zero +// (when multi-schema change is not applicable) or more SubJobs. +type SubJob struct { + Type ActionType `json:"type"` + Args []any `json:"-"` + RawArgs json.RawMessage `json:"raw_args"` + SchemaState SchemaState `json:"schema_state"` + SnapshotVer uint64 `json:"snapshot_ver"` + RealStartTS uint64 `json:"real_start_ts"` + Revertible bool `json:"revertible"` + State JobState `json:"state"` + RowCount int64 `json:"row_count"` + Warning *terror.Error `json:"warning"` + CtxVars []any `json:"-"` + SchemaVer int64 `json:"schema_version"` + ReorgTp ReorgType `json:"reorg_tp"` + UseCloud bool `json:"use_cloud"` +} + +// IsNormal returns true if the sub-job is normally running. +func (sub *SubJob) IsNormal() bool { + switch sub.State { + case JobStateCancelling, JobStateCancelled, + JobStateRollingback, JobStateRollbackDone: + return false + default: + return true + } +} + +// IsFinished returns true if the job is done. +func (sub *SubJob) IsFinished() bool { + return sub.State == JobStateDone || + sub.State == JobStateRollbackDone || + sub.State == JobStateCancelled +} + +// ToProxyJob converts a sub-job to a proxy job. +func (sub *SubJob) ToProxyJob(parentJob *Job, seq int) Job { + return Job{ + ID: parentJob.ID, + Type: sub.Type, + SchemaID: parentJob.SchemaID, + TableID: parentJob.TableID, + SchemaName: parentJob.SchemaName, + State: sub.State, + Warning: sub.Warning, + Error: nil, + ErrorCount: 0, + RowCount: sub.RowCount, + Mu: sync.Mutex{}, + CtxVars: sub.CtxVars, + Args: sub.Args, + RawArgs: sub.RawArgs, + SchemaState: sub.SchemaState, + SnapshotVer: sub.SnapshotVer, + RealStartTS: sub.RealStartTS, + StartTS: parentJob.StartTS, + DependencyID: parentJob.DependencyID, + Query: parentJob.Query, + BinlogInfo: parentJob.BinlogInfo, + Version: parentJob.Version, + ReorgMeta: parentJob.ReorgMeta, + MultiSchemaInfo: &MultiSchemaInfo{Revertible: sub.Revertible, Seq: int32(seq)}, + Priority: parentJob.Priority, + SeqNum: parentJob.SeqNum, + Charset: parentJob.Charset, + Collate: parentJob.Collate, + AdminOperator: parentJob.AdminOperator, + TraceInfo: parentJob.TraceInfo, + } +} + +// FromProxyJob converts a proxy job to a sub-job. +func (sub *SubJob) FromProxyJob(proxyJob *Job, ver int64) { + sub.Revertible = proxyJob.MultiSchemaInfo.Revertible + sub.SchemaState = proxyJob.SchemaState + sub.SnapshotVer = proxyJob.SnapshotVer + sub.RealStartTS = proxyJob.RealStartTS + sub.Args = proxyJob.Args + sub.State = proxyJob.State + sub.Warning = proxyJob.Warning + sub.RowCount = proxyJob.RowCount + sub.SchemaVer = ver + sub.ReorgTp = proxyJob.ReorgMeta.ReorgTp + sub.UseCloud = proxyJob.ReorgMeta.UseCloudStorage +} + +// MultiSchemaInfo keeps some information for multi schema change. +type MultiSchemaInfo struct { + SubJobs []*SubJob `json:"sub_jobs"` + Revertible bool `json:"revertible"` + Seq int32 `json:"seq"` + + // SkipVersion is used to control whether generating a new schema version for a sub-job. + SkipVersion bool `json:"-"` + + AddColumns []model.CIStr `json:"-"` + DropColumns []model.CIStr `json:"-"` + ModifyColumns []model.CIStr `json:"-"` + AddIndexes []model.CIStr `json:"-"` + DropIndexes []model.CIStr `json:"-"` + AlterIndexes []model.CIStr `json:"-"` + + AddForeignKeys []AddForeignKeyInfo `json:"-"` + + RelativeColumns []model.CIStr `json:"-"` + PositionColumns []model.CIStr `json:"-"` +} + +// AddForeignKeyInfo contains foreign key information. +type AddForeignKeyInfo struct { + Name model.CIStr + Cols []model.CIStr +} + +// NewMultiSchemaInfo new a MultiSchemaInfo. +func NewMultiSchemaInfo() *MultiSchemaInfo { + return &MultiSchemaInfo{ + SubJobs: nil, + Revertible: true, + } +} + +// JobMeta is meta info of Job. +type JobMeta struct { + SchemaID int64 `json:"schema_id"` + TableID int64 `json:"table_id"` + // Type is the DDL job's type. + Type ActionType `json:"job_type"` + // Query is the DDL job's SQL string. + Query string `json:"query"` + // Priority is only used to set the operation priority of adding indices. + Priority int `json:"priority"` +} + +// InvolvingSchemaInfo returns the schema info involved in the job. The value +// should be stored in lower case. Only one type of the three member types +// (Database&Table, Policy, ResourceGroup) should only be set in a +// InvolvingSchemaInfo. +type InvolvingSchemaInfo struct { + Database string `json:"database,omitempty"` + Table string `json:"table,omitempty"` + Policy string `json:"policy,omitempty"` + ResourceGroup string `json:"resource_group,omitempty"` + Mode InvolvingSchemaInfoMode `json:"mode,omitempty"` +} + +// InvolvingSchemaInfoMode is used by InvolvingSchemaInfo.Mode. +type InvolvingSchemaInfoMode int + +// ExclusiveInvolving and SharedInvolving are considered like the exclusive lock +// and shared lock when calculate DDL job dependencies. And we also implement the +// fair lock semantic which means if we have job A/B/C arrive in order, and job B +// (exclusive request object 0) is waiting for the running job A (shared request +// object 0), and job C (shared request object 0) arrives, job C should also be +// blocked until job B is finished although job A & C has no dependency. +const ( + // ExclusiveInvolving is the default value to keep compatibility with old + // versions. + ExclusiveInvolving InvolvingSchemaInfoMode = iota + SharedInvolving +) + +const ( + // InvolvingAll means all schemas/tables are affected. It's used in + // InvolvingSchemaInfo.Database/Tables fields. When both the Database and Tables + // are InvolvingAll it also means all placement policies and resource groups are + // affected. Currently the only case is FLASHBACK CLUSTER. + InvolvingAll = "*" + // InvolvingNone means no schema/table is affected. + InvolvingNone = "" +) + // JobState is for job state. type JobState int32 @@ -1180,6 +1065,7 @@ const ( AdminCommandBySystem ) +// String implements fmt.Stringer interface. func (a *AdminCommandOperator) String() string { switch *a { case AdminCommandByEndUser: @@ -1226,10 +1112,72 @@ type AffectedOption struct { OldSchemaID int64 `json:"old_schema_id"` } -func init() { - for bdrType, v := range BDRActionMap { - for _, action := range v { - ActionBDRMap[action] = bdrType - } +// HistoryInfo is used for binlog. +type HistoryInfo struct { + SchemaVersion int64 + DBInfo *DBInfo + TableInfo *TableInfo + FinishedTS uint64 + + // MultipleTableInfos is like TableInfo but only for operations updating multiple tables. + MultipleTableInfos []*TableInfo +} + +// AddDBInfo adds schema version and schema information that are used for binlog. +// dbInfo is added in the following operations: create database, drop database. +func (h *HistoryInfo) AddDBInfo(schemaVer int64, dbInfo *DBInfo) { + h.SchemaVersion = schemaVer + h.DBInfo = dbInfo +} + +// AddTableInfo adds schema version and table information that are used for binlog. +// tblInfo is added except for the following operations: create database, drop database. +func (h *HistoryInfo) AddTableInfo(schemaVer int64, tblInfo *TableInfo) { + h.SchemaVersion = schemaVer + h.TableInfo = tblInfo +} + +// SetTableInfos is like AddTableInfo, but will add multiple table infos to the binlog. +func (h *HistoryInfo) SetTableInfos(schemaVer int64, tblInfos []*TableInfo) { + h.SchemaVersion = schemaVer + h.MultipleTableInfos = make([]*TableInfo, len(tblInfos)) + copy(h.MultipleTableInfos, tblInfos) +} + +// Clean cleans history information. +func (h *HistoryInfo) Clean() { + h.SchemaVersion = 0 + h.DBInfo = nil + h.TableInfo = nil + h.MultipleTableInfos = nil +} + +// TimeZoneLocation represents a single time zone. +type TimeZoneLocation struct { + Name string `json:"name"` + Offset int `json:"offset"` // seconds east of UTC + location *time.Location +} + +// GetLocation gets the timezone location. +func (tz *TimeZoneLocation) GetLocation() (*time.Location, error) { + if tz.location != nil { + return tz.location, nil + } + + var err error + if tz.Offset == 0 { + tz.location, err = time.LoadLocation(tz.Name) + } else { + tz.location = time.FixedZone(tz.Name, tz.Offset) } + return tz.location, err +} + +// TraceInfo is the information for trace. +type TraceInfo struct { + // ConnectionID is the id of the connection + ConnectionID uint64 `json:"connection_id"` + // SessionAlias is the alias of session + SessionAlias string `json:"session_alias"` } diff --git a/pkg/meta/model/job_test.go b/pkg/meta/model/job_test.go new file mode 100644 index 0000000000000..cde61c8a3e70d --- /dev/null +++ b/pkg/meta/model/job_test.go @@ -0,0 +1,505 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "testing" + "time" + "unsafe" + + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/stretchr/testify/require" +) + +func TestJobStartTime(t *testing.T) { + job := &Job{ + ID: 123, + BinlogInfo: &HistoryInfo{}, + } + require.Equal(t, TSConvert2Time(job.StartTS), time.Unix(0, 0)) + require.Equal(t, fmt.Sprintf("ID:123, Type:none, State:none, SchemaState:none, SchemaID:0, TableID:0, RowCount:0, ArgLen:0, start time: %s, Err:, ErrCount:0, SnapshotVersion:0, LocalMode: false", time.Unix(0, 0)), job.String()) +} + +func TestState(t *testing.T) { + jobTbl := []JobState{ + JobStateRunning, + JobStateDone, + JobStateCancelled, + JobStateRollingback, + JobStateRollbackDone, + JobStateSynced, + } + + for _, state := range jobTbl { + require.Greater(t, len(state.String()), 0) + } +} + +func TestJobCodec(t *testing.T) { + type A struct { + Name string + } + tzName, tzOffset := time.Now().In(time.UTC).Zone() + job := &Job{ + ID: 1, + TableID: 2, + SchemaID: 1, + BinlogInfo: &HistoryInfo{}, + Args: []any{model.NewCIStr("a"), A{Name: "abc"}}, + ReorgMeta: &DDLReorgMeta{ + Location: &TimeZoneLocation{Name: tzName, Offset: tzOffset}, + }, + } + job.BinlogInfo.AddDBInfo(123, &DBInfo{ID: 1, Name: model.NewCIStr("test_history_db")}) + job.BinlogInfo.AddTableInfo(123, &TableInfo{ID: 1, Name: model.NewCIStr("test_history_tbl")}) + + // Test IsDependentOn. + // job: table ID is 2 + // job1: table ID is 2 + var err error + job1 := &Job{ + ID: 2, + TableID: 2, + SchemaID: 1, + Type: ActionRenameTable, + BinlogInfo: &HistoryInfo{}, + Args: []any{int64(3), model.NewCIStr("new_table_name")}, + } + job1.RawArgs, err = json.Marshal(job1.Args) + require.NoError(t, err) + isDependent, err := job.IsDependentOn(job1) + require.NoError(t, err) + require.True(t, isDependent) + // job1: rename table, old schema ID is 3 + // job2: create schema, schema ID is 3 + job2 := &Job{ + ID: 3, + TableID: 3, + SchemaID: 3, + Type: ActionCreateSchema, + BinlogInfo: &HistoryInfo{}, + } + isDependent, err = job2.IsDependentOn(job1) + require.NoError(t, err) + require.True(t, isDependent) + + // Test IsDependentOn for exchange partition with table. + // test ActionCreateSchema and ActionExchangeTablePartition is dependent. + job3 := &Job{ + ID: 4, + TableID: 4, + SchemaID: 4, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{int64(6), int64(3), int64(5), "pt", true}, + } + job3.RawArgs, err = json.Marshal(job3.Args) + require.NoError(t, err) + isDependent, err = job3.IsDependentOn(job2) + require.NoError(t, err) + require.True(t, isDependent) + + // test random and ActionExchangeTablePartition is dependent because TableID is same. + job4 := &Job{ + ID: 5, + TableID: 5, + SchemaID: 3, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{6, 4, 2, "pt", true}, + } + job4.RawArgs, err = json.Marshal(job4.Args) + require.NoError(t, err) + isDependent, err = job4.IsDependentOn(job) + require.NoError(t, err) + require.True(t, isDependent) + + // test ActionExchangeTablePartition and ActionExchangeTablePartition is dependent. + job5 := &Job{ + ID: 6, + TableID: 6, + SchemaID: 6, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{2, 6, 5, "pt", true}, + } + job5.RawArgs, err = json.Marshal(job5.Args) + require.NoError(t, err) + isDependent, err = job5.IsDependentOn(job4) + require.NoError(t, err) + require.True(t, isDependent) + + job6 := &Job{ + ID: 7, + TableID: 7, + SchemaID: 7, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{6, 4, 2, "pt", true}, + } + job6.RawArgs, err = json.Marshal(job6.Args) + require.NoError(t, err) + isDependent, err = job6.IsDependentOn(job5) + require.NoError(t, err) + require.True(t, isDependent) + + job7 := &Job{ + ID: 8, + TableID: 8, + SchemaID: 8, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{8, 4, 6, "pt", true}, + } + job7.RawArgs, err = json.Marshal(job7.Args) + require.NoError(t, err) + isDependent, err = job7.IsDependentOn(job6) + require.NoError(t, err) + require.True(t, isDependent) + + job8 := &Job{ + ID: 9, + TableID: 9, + SchemaID: 9, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{8, 9, 9, "pt", true}, + } + job8.RawArgs, err = json.Marshal(job8.Args) + require.NoError(t, err) + isDependent, err = job8.IsDependentOn(job7) + require.NoError(t, err) + require.True(t, isDependent) + + job9 := &Job{ + ID: 10, + TableID: 10, + SchemaID: 10, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{10, 10, 8, "pt", true}, + } + job9.RawArgs, err = json.Marshal(job9.Args) + require.NoError(t, err) + isDependent, err = job9.IsDependentOn(job8) + require.NoError(t, err) + require.True(t, isDependent) + + // test ActionDropSchema and ActionExchangeTablePartition is dependent. + job10 := &Job{ + ID: 11, + TableID: 11, + SchemaID: 11, + Type: ActionDropSchema, + BinlogInfo: &HistoryInfo{}, + } + job10.RawArgs, err = json.Marshal(job10.Args) + require.NoError(t, err) + + job11 := &Job{ + ID: 12, + TableID: 12, + SchemaID: 11, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{10, 10, 8, "pt", true}, + } + job11.RawArgs, err = json.Marshal(job11.Args) + require.NoError(t, err) + isDependent, err = job11.IsDependentOn(job10) + require.NoError(t, err) + require.True(t, isDependent) + + // test ActionDropTable and ActionExchangeTablePartition is dependent. + job12 := &Job{ + ID: 13, + TableID: 13, + SchemaID: 11, + Type: ActionDropTable, + BinlogInfo: &HistoryInfo{}, + } + job12.RawArgs, err = json.Marshal(job12.Args) + require.NoError(t, err) + isDependent, err = job11.IsDependentOn(job12) + require.NoError(t, err) + require.False(t, isDependent) + + job13 := &Job{ + ID: 14, + TableID: 12, + SchemaID: 14, + Type: ActionDropTable, + BinlogInfo: &HistoryInfo{}, + } + job13.RawArgs, err = json.Marshal(job13.Args) + require.NoError(t, err) + isDependent, err = job11.IsDependentOn(job13) + require.NoError(t, err) + require.True(t, isDependent) + + // test ActionDropTable and ActionExchangeTablePartition is dependent. + job14 := &Job{ + ID: 15, + TableID: 15, + SchemaID: 15, + Type: ActionExchangeTablePartition, + BinlogInfo: &HistoryInfo{}, + Args: []any{16, 17, 12, "pt", true}, + } + job14.RawArgs, err = json.Marshal(job14.Args) + require.NoError(t, err) + isDependent, err = job13.IsDependentOn(job14) + require.NoError(t, err) + require.True(t, isDependent) + + // test ActionFlashbackCluster with other ddl jobs are dependent. + job15 := &Job{ + ID: 16, + Type: ActionFlashbackCluster, + BinlogInfo: &HistoryInfo{}, + Args: []any{0, map[string]any{}, "ON", true}, + } + job15.RawArgs, err = json.Marshal(job15.Args) + require.NoError(t, err) + isDependent, err = job.IsDependentOn(job15) + require.NoError(t, err) + require.True(t, isDependent) + + require.Equal(t, false, job.IsCancelled()) + b, err := job.Encode(false) + require.NoError(t, err) + newJob := &Job{} + err = newJob.Decode(b) + require.NoError(t, err) + require.Equal(t, job.BinlogInfo, newJob.BinlogInfo) + name := model.CIStr{} + a := A{} + err = newJob.DecodeArgs(&name, &a) + require.NoError(t, err) + require.Equal(t, model.NewCIStr(""), name) + require.Equal(t, A{Name: ""}, a) + require.Greater(t, len(newJob.String()), 0) + require.Equal(t, newJob.ReorgMeta.Location.Name, tzName) + require.Equal(t, newJob.ReorgMeta.Location.Offset, tzOffset) + + job.BinlogInfo.Clean() + b1, err := job.Encode(true) + require.NoError(t, err) + newJob = &Job{} + err = newJob.Decode(b1) + require.NoError(t, err) + require.Equal(t, &HistoryInfo{}, newJob.BinlogInfo) + name = model.CIStr{} + a = A{} + err = newJob.DecodeArgs(&name, &a) + require.NoError(t, err) + require.Equal(t, model.NewCIStr("a"), name) + require.Equal(t, A{Name: "abc"}, a) + require.Greater(t, len(newJob.String()), 0) + + b2, err := job.Encode(true) + require.NoError(t, err) + newJob = &Job{} + err = newJob.Decode(b2) + require.NoError(t, err) + name = model.CIStr{} + // Don't decode to a here. + err = newJob.DecodeArgs(&name) + require.NoError(t, err) + require.Equal(t, model.NewCIStr("a"), name) + require.Greater(t, len(newJob.String()), 0) + + job.State = JobStateDone + require.True(t, job.IsDone()) + require.True(t, job.IsFinished()) + require.False(t, job.IsRunning()) + require.False(t, job.IsSynced()) + require.False(t, job.IsRollbackDone()) + job.SetRowCount(3) + require.Equal(t, int64(3), job.GetRowCount()) +} + +func TestLocation(t *testing.T) { + // test offset = 0 + loc := &TimeZoneLocation{} + nLoc, err := loc.GetLocation() + require.NoError(t, err) + require.Equal(t, nLoc.String(), "UTC") + // test loc.location != nil + loc.Name = "Asia/Shanghai" + nLoc, err = loc.GetLocation() + require.NoError(t, err) + require.Equal(t, nLoc.String(), "UTC") + // timezone +05:00 + loc1 := &TimeZoneLocation{Name: "UTC", Offset: 18000} + loc1Byte, err := json.Marshal(loc1) + require.NoError(t, err) + loc2 := &TimeZoneLocation{} + err = json.Unmarshal(loc1Byte, loc2) + require.NoError(t, err) + require.Equal(t, loc2.Offset, loc1.Offset) + require.Equal(t, loc2.Name, loc1.Name) + nLoc, err = loc2.GetLocation() + require.NoError(t, err) + require.Equal(t, nLoc.String(), "UTC") + location := time.FixedZone("UTC", loc1.Offset) + require.Equal(t, nLoc, location) +} + +func TestJobClone(t *testing.T) { + job := &Job{ + ID: 100, + Type: ActionCreateTable, + SchemaID: 101, + TableID: 102, + SchemaName: "test", + TableName: "t", + State: JobStateDone, + MultiSchemaInfo: nil, + } + clone := job.Clone() + require.Equal(t, job.ID, clone.ID) + require.Equal(t, job.Type, clone.Type) + require.Equal(t, job.SchemaID, clone.SchemaID) + require.Equal(t, job.TableID, clone.TableID) + require.Equal(t, job.SchemaName, clone.SchemaName) + require.Equal(t, job.TableName, clone.TableName) + require.Equal(t, job.State, clone.State) + require.Equal(t, job.MultiSchemaInfo, clone.MultiSchemaInfo) +} + +func TestJobSize(t *testing.T) { + msg := `Please make sure that the following methods work as expected: +- SubJob.FromProxyJob() +- SubJob.ToProxyJob() +` + job := Job{} + require.Equal(t, 400, int(unsafe.Sizeof(job)), msg) +} + +func TestBackfillMetaCodec(t *testing.T) { + jm := &JobMeta{ + SchemaID: 1, + TableID: 2, + Query: "alter table t add index idx(a)", + Priority: 1, + } + bm := &BackfillMeta{ + EndInclude: true, + Error: terror.ErrResultUndetermined, + JobMeta: jm, + } + bmBytes, err := bm.Encode() + require.NoError(t, err) + bmRet := &BackfillMeta{} + bmRet.Decode(bmBytes) + require.Equal(t, bm, bmRet) +} + +func TestMayNeedReorg(t *testing.T) { + //TODO(bb7133): add more test cases for different ActionType. + reorgJobTypes := []ActionType{ + ActionReorganizePartition, + ActionRemovePartitioning, + ActionAlterTablePartitioning, + ActionAddIndex, + ActionAddPrimaryKey, + } + generalJobTypes := []ActionType{ + ActionCreateTable, + ActionDropTable, + } + job := &Job{ + ID: 100, + Type: ActionCreateTable, + SchemaID: 101, + TableID: 102, + SchemaName: "test", + TableName: "t", + State: JobStateDone, + MultiSchemaInfo: nil, + } + for _, jobType := range reorgJobTypes { + job.Type = jobType + require.True(t, job.MayNeedReorg()) + } + for _, jobType := range generalJobTypes { + job.Type = jobType + require.False(t, job.MayNeedReorg()) + } +} + +func TestInFinalState(t *testing.T) { + for s, v := range map[JobState]bool{ + JobStateSynced: true, + JobStateCancelled: true, + JobStatePaused: true, + JobStateCancelling: false, + JobStateRollbackDone: false, + } { + require.Equal(t, v, (&Job{State: s}).InFinalState()) + } +} + +func TestSchemaState(t *testing.T) { + schemaTbl := []SchemaState{ + StateDeleteOnly, + StateWriteOnly, + StateWriteReorganization, + StateDeleteReorganization, + StatePublic, + StateGlobalTxnOnly, + } + + for _, state := range schemaTbl { + require.Greater(t, len(state.String()), 0) + } +} + +func TestString(t *testing.T) { + acts := []struct { + act ActionType + result string + }{ + {ActionNone, "none"}, + {ActionAddForeignKey, "add foreign key"}, + {ActionDropForeignKey, "drop foreign key"}, + {ActionTruncateTable, "truncate table"}, + {ActionModifyColumn, "modify column"}, + {ActionRenameTable, "rename table"}, + {ActionRenameTables, "rename tables"}, + {ActionSetDefaultValue, "set default value"}, + {ActionCreateSchema, "create schema"}, + {ActionDropSchema, "drop schema"}, + {ActionCreateTable, "create table"}, + {ActionDropTable, "drop table"}, + {ActionAddIndex, "add index"}, + {ActionDropIndex, "drop index"}, + {ActionAddColumn, "add column"}, + {ActionDropColumn, "drop column"}, + {ActionModifySchemaCharsetAndCollate, "modify schema charset and collate"}, + {ActionAlterTablePlacement, "alter table placement"}, + {ActionAlterTablePartitionPlacement, "alter table partition placement"}, + {ActionAlterNoCacheTable, "alter table nocache"}, + } + + for _, v := range acts { + str := v.act.String() + require.Equal(t, v.result, str) + } +} diff --git a/pkg/meta/model/placement.go b/pkg/meta/model/placement.go new file mode 100644 index 0000000000000..5ef1ee42667ae --- /dev/null +++ b/pkg/meta/model/placement.go @@ -0,0 +1,143 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strings" + "time" + + "github.com/pingcap/tidb/pkg/parser/model" +) + +// PolicyRefInfo is the struct to refer the placement policy. +type PolicyRefInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"name"` +} + +// PlacementSettings is the settings of the placement +type PlacementSettings struct { + PrimaryRegion string `json:"primary_region"` + Regions string `json:"regions"` + Learners uint64 `json:"learners"` + Followers uint64 `json:"followers"` + Voters uint64 `json:"voters"` + Schedule string `json:"schedule"` + Constraints string `json:"constraints"` + LeaderConstraints string `json:"leader_constraints"` + LearnerConstraints string `json:"learner_constraints"` + FollowerConstraints string `json:"follower_constraints"` + VoterConstraints string `json:"voter_constraints"` + SurvivalPreferences string `json:"survival_preferences"` +} + +// String implements fmt.Stringer interface. +func (p *PlacementSettings) String() string { + sb := new(strings.Builder) + if len(p.PrimaryRegion) > 0 { + writeSettingStringToBuilder(sb, "PRIMARY_REGION", p.PrimaryRegion) + } + + if len(p.Regions) > 0 { + writeSettingStringToBuilder(sb, "REGIONS", p.Regions) + } + + if len(p.Schedule) > 0 { + writeSettingStringToBuilder(sb, "SCHEDULE", p.Schedule) + } + + if len(p.Constraints) > 0 { + writeSettingStringToBuilder(sb, "CONSTRAINTS", p.Constraints) + } + + if len(p.LeaderConstraints) > 0 { + writeSettingStringToBuilder(sb, "LEADER_CONSTRAINTS", p.LeaderConstraints) + } + + if p.Voters > 0 { + writeSettingIntegerToBuilder(sb, "VOTERS", p.Voters) + } + + if len(p.VoterConstraints) > 0 { + writeSettingStringToBuilder(sb, "VOTER_CONSTRAINTS", p.VoterConstraints) + } + + if p.Followers > 0 { + writeSettingIntegerToBuilder(sb, "FOLLOWERS", p.Followers) + } + + if len(p.FollowerConstraints) > 0 { + writeSettingStringToBuilder(sb, "FOLLOWER_CONSTRAINTS", p.FollowerConstraints) + } + + if p.Learners > 0 { + writeSettingIntegerToBuilder(sb, "LEARNERS", p.Learners) + } + + if len(p.LearnerConstraints) > 0 { + writeSettingStringToBuilder(sb, "LEARNER_CONSTRAINTS", p.LearnerConstraints) + } + + if len(p.SurvivalPreferences) > 0 { + writeSettingStringToBuilder(sb, "SURVIVAL_PREFERENCES", p.SurvivalPreferences) + } + + return sb.String() +} + +// Clone clones the placement settings. +func (p *PlacementSettings) Clone() *PlacementSettings { + cloned := *p + return &cloned +} + +func writeSettingStringToBuilder(sb *strings.Builder, item string, value string, separatorFns ...func()) { + writeSettingItemToBuilder(sb, fmt.Sprintf("%s=\"%s\"", item, strings.ReplaceAll(value, "\"", "\\\"")), separatorFns...) +} +func writeSettingIntegerToBuilder(sb *strings.Builder, item string, value uint64, separatorFns ...func()) { + writeSettingItemToBuilder(sb, fmt.Sprintf("%s=%d", item, value), separatorFns...) +} + +func writeSettingDurationToBuilder(sb *strings.Builder, item string, dur time.Duration, separatorFns ...func()) { + writeSettingStringToBuilder(sb, item, dur.String(), separatorFns...) +} + +func writeSettingItemToBuilder(sb *strings.Builder, item string, separatorFns ...func()) { + if sb.Len() != 0 { + for _, fn := range separatorFns { + fn() + } + if len(separatorFns) == 0 { + sb.WriteString(" ") + } + } + sb.WriteString(item) +} + +// PolicyInfo is the struct to store the placement policy. +type PolicyInfo struct { + *PlacementSettings + ID int64 `json:"id"` + Name model.CIStr `json:"name"` + State SchemaState `json:"state"` +} + +// Clone clones PolicyInfo. +func (p *PolicyInfo) Clone() *PolicyInfo { + cloned := *p + cloned.PlacementSettings = p.PlacementSettings.Clone() + return &cloned +} diff --git a/pkg/meta/model/placement_test.go b/pkg/meta/model/placement_test.go new file mode 100644 index 0000000000000..fe5908a2f9efc --- /dev/null +++ b/pkg/meta/model/placement_test.go @@ -0,0 +1,87 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/stretchr/testify/require" +) + +func TestPlacementSettingsString(t *testing.T) { + settings := &PlacementSettings{ + PrimaryRegion: "us-east-1", + Regions: "us-east-1,us-east-2", + Schedule: "EVEN", + } + require.Equal(t, "PRIMARY_REGION=\"us-east-1\" REGIONS=\"us-east-1,us-east-2\" SCHEDULE=\"EVEN\"", settings.String()) + + settings = &PlacementSettings{ + LeaderConstraints: "[+region=bj]", + } + require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=bj]\"", settings.String()) + + settings = &PlacementSettings{ + Voters: 1, + VoterConstraints: "[+region=us-east-1]", + Followers: 2, + FollowerConstraints: "[+disk=ssd]", + Learners: 3, + LearnerConstraints: "[+region=us-east-2]", + } + require.Equal(t, "VOTERS=1 VOTER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=2 FOLLOWER_CONSTRAINTS=\"[+disk=ssd]\" LEARNERS=3 LEARNER_CONSTRAINTS=\"[+region=us-east-2]\"", settings.String()) + + settings = &PlacementSettings{ + Voters: 3, + Followers: 2, + Learners: 1, + Constraints: "{\"+us-east-1\":1,+us-east-2:1}", + } + require.Equal(t, "CONSTRAINTS=\"{\\\"+us-east-1\\\":1,+us-east-2:1}\" VOTERS=3 FOLLOWERS=2 LEARNERS=1", settings.String()) +} + +func TestPlacementSettingsClone(t *testing.T) { + settings := &PlacementSettings{} + clonedSettings := settings.Clone() + clonedSettings.PrimaryRegion = "r1" + clonedSettings.Regions = "r1,r2" + clonedSettings.Followers = 1 + clonedSettings.Voters = 2 + clonedSettings.Followers = 3 + clonedSettings.Constraints = "[+zone=z1]" + clonedSettings.LearnerConstraints = "[+region=r1]" + clonedSettings.FollowerConstraints = "[+disk=ssd]" + clonedSettings.LeaderConstraints = "[+region=r2]" + clonedSettings.VoterConstraints = "[+zone=z2]" + clonedSettings.Schedule = "even" + require.Equal(t, PlacementSettings{}, *settings) +} + +func TestPlacementPolicyClone(t *testing.T) { + policy := &PolicyInfo{ + PlacementSettings: &PlacementSettings{}, + } + clonedPolicy := policy.Clone() + clonedPolicy.ID = 100 + clonedPolicy.Name = model.NewCIStr("p2") + clonedPolicy.State = StateDeleteOnly + clonedPolicy.PlacementSettings.Followers = 10 + + require.Equal(t, int64(0), policy.ID) + require.Equal(t, model.NewCIStr(""), policy.Name) + require.Equal(t, StateNone, policy.State) + require.Equal(t, PlacementSettings{}, *(policy.PlacementSettings)) +} diff --git a/pkg/parser/model/reorg.go b/pkg/meta/model/reorg.go similarity index 98% rename from pkg/parser/model/reorg.go rename to pkg/meta/model/reorg.go index bcdfa9b358efc..01be1d69fcbc1 100644 --- a/pkg/parser/model/reorg.go +++ b/pkg/meta/model/reorg.go @@ -8,6 +8,7 @@ // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. @@ -21,6 +22,42 @@ import ( "github.com/pingcap/tidb/pkg/parser/terror" ) +// BackfillState is the state used by the backfill-merge process. +type BackfillState byte + +const ( + // BackfillStateInapplicable means the backfill-merge process is not used. + BackfillStateInapplicable BackfillState = iota + // BackfillStateRunning is the state that the backfill process is running. + // In this state, the index's write and delete operations are redirected to a temporary index. + BackfillStateRunning + // BackfillStateReadyToMerge is the state that the temporary index's records are ready to be merged back + // to the origin index. + // In this state, the index's write and delete operations are copied to a temporary index. + // This state is used to make sure that all the TiDB instances are aware of the copy + // during the merge(BackfillStateMerging). + BackfillStateReadyToMerge + // BackfillStateMerging is the state that the temp index is merging back to the origin index. + // In this state, the index's write and delete operations are copied to a temporary index. + BackfillStateMerging +) + +// String implements fmt.Stringer interface. +func (s BackfillState) String() string { + switch s { + case BackfillStateRunning: + return "backfill state running" + case BackfillStateReadyToMerge: + return "backfill state ready to merge" + case BackfillStateMerging: + return "backfill state merging" + case BackfillStateInapplicable: + return "backfill state inapplicable" + default: + return "backfill state unknown" + } +} + // DDLReorgMeta is meta info of DDL reorganization. type DDLReorgMeta struct { SQLMode mysql.SQLMode `json:"sql_mode"` @@ -104,42 +141,6 @@ func (tp ReorgType) String() string { return "" } -// BackfillState is the state used by the backfill-merge process. -type BackfillState byte - -const ( - // BackfillStateInapplicable means the backfill-merge process is not used. - BackfillStateInapplicable BackfillState = iota - // BackfillStateRunning is the state that the backfill process is running. - // In this state, the index's write and delete operations are redirected to a temporary index. - BackfillStateRunning - // BackfillStateReadyToMerge is the state that the temporary index's records are ready to be merged back - // to the origin index. - // In this state, the index's write and delete operations are copied to a temporary index. - // This state is used to make sure that all the TiDB instances are aware of the copy - // during the merge(BackfillStateMerging). - BackfillStateReadyToMerge - // BackfillStateMerging is the state that the temp index is merging back to the origin index. - // In this state, the index's write and delete operations are copied to a temporary index. - BackfillStateMerging -) - -// String implements fmt.Stringer interface. -func (s BackfillState) String() string { - switch s { - case BackfillStateRunning: - return "backfill state running" - case BackfillStateReadyToMerge: - return "backfill state ready to merge" - case BackfillStateMerging: - return "backfill state merging" - case BackfillStateInapplicable: - return "backfill state inapplicable" - default: - return "backfill state unknown" - } -} - // BackfillMeta is meta info of the backfill job. type BackfillMeta struct { IsUnique bool `json:"is_unique"` diff --git a/pkg/meta/model/resource_group.go b/pkg/meta/model/resource_group.go new file mode 100644 index 0000000000000..d26379d288159 --- /dev/null +++ b/pkg/meta/model/resource_group.go @@ -0,0 +1,132 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strings" + "time" + + "github.com/pingcap/tidb/pkg/parser/model" +) + +// ResourceGroupRunawaySettings is the runaway settings of the resource group +type ResourceGroupRunawaySettings struct { + ExecElapsedTimeMs uint64 `json:"exec_elapsed_time_ms"` + Action model.RunawayActionType `json:"action"` + WatchType model.RunawayWatchType `json:"watch_type"` + WatchDurationMs int64 `json:"watch_duration_ms"` +} + +// ResourceGroupBackgroundSettings is the background settings of the resource group. +type ResourceGroupBackgroundSettings struct { + JobTypes []string `json:"job_types"` +} + +// ResourceGroupSettings is the settings of the resource group +type ResourceGroupSettings struct { + RURate uint64 `json:"ru_per_sec"` + Priority uint64 `json:"priority"` + CPULimiter string `json:"cpu_limit"` + IOReadBandwidth string `json:"io_read_bandwidth"` + IOWriteBandwidth string `json:"io_write_bandwidth"` + BurstLimit int64 `json:"burst_limit"` + Runaway *ResourceGroupRunawaySettings `json:"runaway"` + Background *ResourceGroupBackgroundSettings `json:"background"` +} + +// NewResourceGroupSettings creates a new ResourceGroupSettings. +func NewResourceGroupSettings() *ResourceGroupSettings { + return &ResourceGroupSettings{ + RURate: 0, + Priority: model.MediumPriorityValue, + CPULimiter: "", + IOReadBandwidth: "", + IOWriteBandwidth: "", + BurstLimit: 0, + } +} + +// String implements the fmt.Stringer interface. +func (p *ResourceGroupSettings) String() string { + sb := new(strings.Builder) + separatorFn := func() { + sb.WriteString(", ") + } + if p.RURate != 0 { + writeSettingIntegerToBuilder(sb, "RU_PER_SEC", p.RURate, separatorFn) + } + writeSettingItemToBuilder(sb, "PRIORITY="+model.PriorityValueToName(p.Priority), separatorFn) + if len(p.CPULimiter) > 0 { + writeSettingStringToBuilder(sb, "CPU", p.CPULimiter, separatorFn) + } + if len(p.IOReadBandwidth) > 0 { + writeSettingStringToBuilder(sb, "IO_READ_BANDWIDTH", p.IOReadBandwidth, separatorFn) + } + if len(p.IOWriteBandwidth) > 0 { + writeSettingStringToBuilder(sb, "IO_WRITE_BANDWIDTH", p.IOWriteBandwidth, separatorFn) + } + // Once burst limit is negative, meaning allow burst with unlimit. + if p.BurstLimit < 0 { + writeSettingItemToBuilder(sb, "BURSTABLE", separatorFn) + } + if p.Runaway != nil { + writeSettingDurationToBuilder(sb, "QUERY_LIMIT=(EXEC_ELAPSED", time.Duration(p.Runaway.ExecElapsedTimeMs)*time.Millisecond, separatorFn) + writeSettingItemToBuilder(sb, "ACTION="+p.Runaway.Action.String()) + if p.Runaway.WatchType != model.WatchNone { + writeSettingItemToBuilder(sb, "WATCH="+p.Runaway.WatchType.String()) + if p.Runaway.WatchDurationMs > 0 { + writeSettingDurationToBuilder(sb, "DURATION", time.Duration(p.Runaway.WatchDurationMs)*time.Millisecond) + } else { + writeSettingItemToBuilder(sb, "DURATION=UNLIMITED") + } + } + sb.WriteString(")") + } + if p.Background != nil { + fmt.Fprintf(sb, ", BACKGROUND=(TASK_TYPES='%s')", strings.Join(p.Background.JobTypes, ",")) + } + + return sb.String() +} + +// Adjust adjusts the resource group settings. +func (p *ResourceGroupSettings) Adjust() { + // Curretly we only support ru_per_sec sytanx, so BurstLimit(capicity) is always same as ru_per_sec except burstable. + if p.BurstLimit >= 0 { + p.BurstLimit = int64(p.RURate) + } +} + +// Clone clones the resource group settings. +func (p *ResourceGroupSettings) Clone() *ResourceGroupSettings { + cloned := *p + return &cloned +} + +// ResourceGroupInfo is the struct to store the resource group. +type ResourceGroupInfo struct { + *ResourceGroupSettings + ID int64 `json:"id"` + Name model.CIStr `json:"name"` + State SchemaState `json:"state"` +} + +// Clone clones the ResourceGroupInfo. +func (p *ResourceGroupInfo) Clone() *ResourceGroupInfo { + cloned := *p + cloned.ResourceGroupSettings = p.ResourceGroupSettings.Clone() + return &cloned +} diff --git a/pkg/meta/model/table.go b/pkg/meta/model/table.go new file mode 100644 index 0000000000000..2274ec91c89b1 --- /dev/null +++ b/pkg/meta/model/table.go @@ -0,0 +1,1132 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" + "unsafe" + + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/duration" + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" +) + +// ExtraHandleID is the column ID of column which we need to append to schema to occupy the handle's position +// for use of execution phase. +const ExtraHandleID = -1 + +// ExtraPhysTblID is the column ID of column that should be filled in with the physical table id. +// Primarily used for table partition dynamic prune mode, to return which partition (physical table id) the row came from. +// If used with a global index, the partition ID decoded from the key value will be filled in. +const ExtraPhysTblID = -3 + +// Deprecated: Use ExtraPhysTblID instead. +// const ExtraPidColID = -2 + +// ExtraRowChecksumID is the column ID of column which holds the row checksum info. +const ExtraRowChecksumID = -4 + +const ( + // TableInfoVersion0 means the table info version is 0. + // Upgrade from v2.1.1 or v2.1.2 to v2.1.3 and later, and then execute a "change/modify column" statement + // that does not specify a charset value for column. Then the following error may be reported: + // ERROR 1105 (HY000): unsupported modify charset from utf8mb4 to utf8. + // To eliminate this error, we will not modify the charset of this column + // when executing a change/modify column statement that does not specify a charset value for column. + // This behavior is not compatible with MySQL. + TableInfoVersion0 = uint16(0) + // TableInfoVersion1 means the table info version is 1. + // When we execute a change/modify column statement that does not specify a charset value for column, + // we set the charset of this column to the charset of table. This behavior is compatible with MySQL. + TableInfoVersion1 = uint16(1) + // TableInfoVersion2 means the table info version is 2. + // This is for v2.1.7 to Compatible with older versions charset problem. + // Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8. + // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. + // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. + TableInfoVersion2 = uint16(2) + // TableInfoVersion3 means the table info version is 3. + // This version aims to deal with upper-cased charset name in TableInfo stored by versions prior to TiDB v2.1.9: + // TiDB always suppose all charsets / collations as lower-cased and try to convert them if they're not. + // However, the convert is missed in some scenarios before v2.1.9, so for all those tables prior to TableInfoVersion3, their + // charsets / collations will be converted to lower-case while loading from the storage. + TableInfoVersion3 = uint16(3) + // TableInfoVersion4 is not used. + TableInfoVersion4 = uint16(4) + // TableInfoVersion5 indicates that the auto_increment allocator in TiDB has been separated from + // _tidb_rowid allocator when AUTO_ID_CACHE is 1. This version is introduced to preserve the compatibility of old tables: + // the tables with version <= TableInfoVersion4 still use a single allocator for auto_increment and _tidb_rowid. + // Also see https://github.com/pingcap/tidb/issues/982. + TableInfoVersion5 = uint16(5) + + // CurrLatestTableInfoVersion means the latest table info in the current TiDB. + CurrLatestTableInfoVersion = TableInfoVersion5 +) + +// ExtraHandleName is the name of ExtraHandle Column. +var ExtraHandleName = model.NewCIStr("_tidb_rowid") + +// ExtraPhysTblIDName is the name of ExtraPhysTblID Column. +var ExtraPhysTblIDName = model.NewCIStr("_tidb_tid") + +// Deprecated: Use ExtraPhysTblIDName instead. +// var ExtraPartitionIdName = NewCIStr("_tidb_pid") //nolint:revive + +// TableInfo provides meta data describing a DB table. +type TableInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"name"` + Charset string `json:"charset"` + Collate string `json:"collate"` + // Columns are listed in the order in which they appear in the schema. + Columns []*ColumnInfo `json:"cols"` + Indices []*IndexInfo `json:"index_info"` + Constraints []*ConstraintInfo `json:"constraint_info"` + ForeignKeys []*FKInfo `json:"fk_info"` + State SchemaState `json:"state"` + // PKIsHandle is true when primary key is a single integer column. + PKIsHandle bool `json:"pk_is_handle"` + // IsCommonHandle is true when clustered index feature is + // enabled and the primary key is not a single integer column. + IsCommonHandle bool `json:"is_common_handle"` + // CommonHandleVersion is the version of the clustered index. + // 0 for the clustered index created == 5.0.0 RC. + // 1 for the clustered index created > 5.0.0 RC. + CommonHandleVersion uint16 `json:"common_handle_version"` + + Comment string `json:"comment"` + AutoIncID int64 `json:"auto_inc_id"` + + // Only used by BR when: + // 1. SepAutoInc() is true + // 2. The table is nonclustered and has auto_increment column. + // In that case, both auto_increment_id and tidb_rowid need to be backup & recover. + // See also https://github.com/pingcap/tidb/issues/46093 + // + // It should have been named TiDBRowID, but for historial reasons, we do not use separate meta key for _tidb_rowid and auto_increment_id, + // and field `AutoIncID` is used to serve both _tidb_rowid and auto_increment_id. + // If we introduce a TiDBRowID here, it could make furthur misunderstanding: + // in most cases, AutoIncID is _tidb_rowid and TiDBRowID is null + // but in some cases, AutoIncID is auto_increment_id and TiDBRowID is _tidb_rowid + // So let's just use another name AutoIncIDExtra to avoid misconception. + AutoIncIDExtra int64 `json:"auto_inc_id_extra,omitempty"` + + AutoIDCache int64 `json:"auto_id_cache"` + AutoRandID int64 `json:"auto_rand_id"` + MaxColumnID int64 `json:"max_col_id"` + MaxIndexID int64 `json:"max_idx_id"` + MaxForeignKeyID int64 `json:"max_fk_id"` + MaxConstraintID int64 `json:"max_cst_id"` + // UpdateTS is used to record the timestamp of updating the table's schema information. + // These changing schema operations don't include 'truncate table' and 'rename table'. + UpdateTS uint64 `json:"update_timestamp"` + // OldSchemaID : + // Because auto increment ID has schemaID as prefix, + // We need to save original schemaID to keep autoID unchanged + // while renaming a table from one database to another. + // Only set if table has been renamed across schemas + // Old name 'old_schema_id' is kept for backwards compatibility + AutoIDSchemaID int64 `json:"old_schema_id,omitempty"` + + // ShardRowIDBits specify if the implicit row ID is sharded. + ShardRowIDBits uint64 + // MaxShardRowIDBits uses to record the max ShardRowIDBits be used so far. + MaxShardRowIDBits uint64 `json:"max_shard_row_id_bits"` + // AutoRandomBits is used to set the bit number to shard automatically when PKIsHandle. + AutoRandomBits uint64 `json:"auto_random_bits"` + // AutoRandomRangeBits represents the bit number of the int primary key that will be used by TiDB. + AutoRandomRangeBits uint64 `json:"auto_random_range_bits"` + // PreSplitRegions specify the pre-split region when create table. + // The pre-split region num is 2^(PreSplitRegions-1). + // And the PreSplitRegions should less than or equal to ShardRowIDBits. + PreSplitRegions uint64 `json:"pre_split_regions"` + + Partition *PartitionInfo `json:"partition"` + + Compression string `json:"compression"` + + View *ViewInfo `json:"view"` + + Sequence *SequenceInfo `json:"sequence"` + + // Lock represent the table lock info. + Lock *TableLockInfo `json:"Lock"` + + // Version means the version of the table info. + Version uint16 `json:"version"` + + // TiFlashReplica means the TiFlash replica info. + TiFlashReplica *TiFlashReplicaInfo `json:"tiflash_replica"` + + // IsColumnar means the table is column-oriented. + // It's true when the engine of the table is TiFlash only. + IsColumnar bool `json:"is_columnar"` + + TempTableType `json:"temp_table_type"` + TableCacheStatusType `json:"cache_table_status"` + PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"` + + // StatsOptions is used when do analyze/auto-analyze for each table + StatsOptions *StatsOptions `json:"stats_options"` + + ExchangePartitionInfo *ExchangePartitionInfo `json:"exchange_partition_info"` + + TTLInfo *TTLInfo `json:"ttl_info"` + + // Revision is per table schema's version, it will be increased when the schema changed. + Revision uint64 `json:"revision"` + + DBID int64 `json:"-"` +} + +// SepAutoInc decides whether _rowid and auto_increment id use separate allocator. +func (t *TableInfo) SepAutoInc() bool { + return t.Version >= TableInfoVersion5 && t.AutoIDCache == 1 +} + +// GetPartitionInfo returns the partition information. +func (t *TableInfo) GetPartitionInfo() *PartitionInfo { + if t.Partition != nil && t.Partition.Enable { + return t.Partition + } + return nil +} + +// GetUpdateTime gets the table's updating time. +func (t *TableInfo) GetUpdateTime() time.Time { + return TSConvert2Time(t.UpdateTS) +} + +// GetAutoIDSchemaID returns the schema ID that was used to create an allocator. +func (t *TableInfo) GetAutoIDSchemaID(dbID int64) int64 { + if t.AutoIDSchemaID != 0 { + return t.AutoIDSchemaID + } + return dbID +} + +// Clone clones TableInfo. +func (t *TableInfo) Clone() *TableInfo { + nt := *t + nt.Columns = make([]*ColumnInfo, len(t.Columns)) + nt.Indices = make([]*IndexInfo, len(t.Indices)) + nt.ForeignKeys = make([]*FKInfo, len(t.ForeignKeys)) + + for i := range t.Columns { + nt.Columns[i] = t.Columns[i].Clone() + } + + for i := range t.Indices { + nt.Indices[i] = t.Indices[i].Clone() + } + + for i := range t.ForeignKeys { + nt.ForeignKeys[i] = t.ForeignKeys[i].Clone() + } + + if t.Partition != nil { + nt.Partition = t.Partition.Clone() + } + if t.TTLInfo != nil { + nt.TTLInfo = t.TTLInfo.Clone() + } + + return &nt +} + +// GetPkName will return the pk name if pk exists. +func (t *TableInfo) GetPkName() model.CIStr { + for _, colInfo := range t.Columns { + if mysql.HasPriKeyFlag(colInfo.GetFlag()) { + return colInfo.Name + } + } + return model.CIStr{} +} + +// GetPkColInfo gets the ColumnInfo of pk if exists. +// Make sure PkIsHandle checked before call this method. +func (t *TableInfo) GetPkColInfo() *ColumnInfo { + for _, colInfo := range t.Columns { + if mysql.HasPriKeyFlag(colInfo.GetFlag()) { + return colInfo + } + } + return nil +} + +// GetAutoIncrementColInfo gets the ColumnInfo of auto_increment column if exists. +func (t *TableInfo) GetAutoIncrementColInfo() *ColumnInfo { + for _, colInfo := range t.Columns { + if mysql.HasAutoIncrementFlag(colInfo.GetFlag()) { + return colInfo + } + } + return nil +} + +// IsAutoIncColUnsigned checks whether the auto increment column is unsigned. +func (t *TableInfo) IsAutoIncColUnsigned() bool { + col := t.GetAutoIncrementColInfo() + if col == nil { + return false + } + return mysql.HasUnsignedFlag(col.GetFlag()) +} + +// ContainsAutoRandomBits indicates whether a table contains auto_random column. +func (t *TableInfo) ContainsAutoRandomBits() bool { + return t.AutoRandomBits != 0 +} + +// IsAutoRandomBitColUnsigned indicates whether the auto_random column is unsigned. Make sure the table contains auto_random before calling this method. +func (t *TableInfo) IsAutoRandomBitColUnsigned() bool { + if !t.PKIsHandle || t.AutoRandomBits == 0 { + return false + } + return mysql.HasUnsignedFlag(t.GetPkColInfo().GetFlag()) +} + +// Cols returns the columns of the table in public state. +func (t *TableInfo) Cols() []*ColumnInfo { + publicColumns := make([]*ColumnInfo, len(t.Columns)) + maxOffset := -1 + for _, col := range t.Columns { + if col.State != StatePublic { + continue + } + publicColumns[col.Offset] = col + if maxOffset < col.Offset { + maxOffset = col.Offset + } + } + return publicColumns[0 : maxOffset+1] +} + +// FindIndexByName finds index by name. +func (t *TableInfo) FindIndexByName(idxName string) *IndexInfo { + for _, idx := range t.Indices { + if idx.Name.L == idxName { + return idx + } + } + return nil +} + +// FindPublicColumnByName finds the public column by name. +func (t *TableInfo) FindPublicColumnByName(colNameL string) *ColumnInfo { + for _, col := range t.Cols() { + if col.Name.L == colNameL { + return col + } + } + return nil +} + +// IsLocked checks whether the table was locked. +func (t *TableInfo) IsLocked() bool { + return t.Lock != nil && len(t.Lock.Sessions) > 0 +} + +// MoveColumnInfo moves a column to another offset. It maintains the offsets of all affects columns and index columns, +func (t *TableInfo) MoveColumnInfo(from, to int) { + if from == to { + return + } + updatedOffsets := make(map[int]int) + src := t.Columns[from] + if from < to { + for i := from; i < to; i++ { + t.Columns[i] = t.Columns[i+1] + t.Columns[i].Offset = i + updatedOffsets[i+1] = i + } + } else if from > to { + for i := from; i > to; i-- { + t.Columns[i] = t.Columns[i-1] + t.Columns[i].Offset = i + updatedOffsets[i-1] = i + } + } + t.Columns[to] = src + t.Columns[to].Offset = to + updatedOffsets[from] = to + for _, idx := range t.Indices { + for _, idxCol := range idx.Columns { + newOffset, ok := updatedOffsets[idxCol.Offset] + if ok { + idxCol.Offset = newOffset + } + } + } +} + +// ClearPlacement clears all table and partitions' placement settings +func (t *TableInfo) ClearPlacement() { + t.PlacementPolicyRef = nil + if t.Partition != nil { + for i := range t.Partition.Definitions { + def := &t.Partition.Definitions[i] + def.PlacementPolicyRef = nil + } + } +} + +// GetPrimaryKey extract the primary key in a table and return `IndexInfo` +// The returned primary key could be explicit or implicit. +// If there is no explicit primary key in table, +// the first UNIQUE INDEX on NOT NULL columns will be the implicit primary key. +// For more information about implicit primary key, see +// https://dev.mysql.com/doc/refman/8.0/en/invisible-indexes.html +func (t *TableInfo) GetPrimaryKey() *IndexInfo { + var implicitPK *IndexInfo + + for _, key := range t.Indices { + if key.Primary { + // table has explicit primary key + return key + } + // The case index without any columns should never happen, but still do a check here + if len(key.Columns) == 0 { + continue + } + // find the first unique key with NOT NULL columns + if implicitPK == nil && key.Unique { + // ensure all columns in unique key have NOT NULL flag + allColNotNull := true + skip := false + for _, idxCol := range key.Columns { + col := FindColumnInfo(t.Cols(), idxCol.Name.L) + // This index has a column in DeleteOnly state, + // or it is expression index (it defined on a hidden column), + // it can not be implicit PK, go to next index iterator + if col == nil || col.Hidden { + skip = true + break + } + if !mysql.HasNotNullFlag(col.GetFlag()) { + allColNotNull = false + break + } + } + if skip { + continue + } + if allColNotNull { + implicitPK = key + } + } + } + return implicitPK +} + +// ColumnIsInIndex checks whether c is included in any indices of t. +func (t *TableInfo) ColumnIsInIndex(c *ColumnInfo) bool { + for _, index := range t.Indices { + for _, column := range index.Columns { + if column.Name.L == c.Name.L { + return true + } + } + } + return false +} + +// HasClusteredIndex checks whether the table has a clustered index. +func (t *TableInfo) HasClusteredIndex() bool { + return t.PKIsHandle || t.IsCommonHandle +} + +// IsView checks if TableInfo is a view. +func (t *TableInfo) IsView() bool { + return t.View != nil +} + +// IsSequence checks if TableInfo is a sequence. +func (t *TableInfo) IsSequence() bool { + return t.Sequence != nil +} + +// IsBaseTable checks to see the table is neither a view or a sequence. +func (t *TableInfo) IsBaseTable() bool { + return t.Sequence == nil && t.View == nil +} + +// FindConstraintInfoByName finds constraintInfo by name. +func (t *TableInfo) FindConstraintInfoByName(constrName string) *ConstraintInfo { + lowConstrName := strings.ToLower(constrName) + for _, chk := range t.Constraints { + if chk.Name.L == lowConstrName { + return chk + } + } + return nil +} + +// FindIndexNameByID finds index name by id. +func (t *TableInfo) FindIndexNameByID(id int64) string { + indexInfo := FindIndexInfoByID(t.Indices, id) + if indexInfo != nil { + return indexInfo.Name.L + } + return "" +} + +// FindColumnNameByID finds column name by id. +func (t *TableInfo) FindColumnNameByID(id int64) string { + colInfo := FindColumnInfoByID(t.Columns, id) + if colInfo != nil { + return colInfo.Name.L + } + return "" +} + +// GetColumnByID finds the column by ID. +func (t *TableInfo) GetColumnByID(id int64) *ColumnInfo { + for _, col := range t.Columns { + if col.State != StatePublic { + continue + } + if col.ID == id { + return col + } + } + return nil +} + +// FindFKInfoByName finds FKInfo in fks by lowercase name. +func FindFKInfoByName(fks []*FKInfo, name string) *FKInfo { + for _, fk := range fks { + if fk.Name.L == name { + return fk + } + } + return nil +} + +// TableNameInfo provides meta data describing a table name info. +type TableNameInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"name"` +} + +// TableCacheStatusType is the type of the table cache status +type TableCacheStatusType int + +// TableCacheStatusType values. +const ( + TableCacheStatusDisable TableCacheStatusType = iota + TableCacheStatusEnable + TableCacheStatusSwitching +) + +// String implements fmt.Stringer interface. +func (t TableCacheStatusType) String() string { + switch t { + case TableCacheStatusDisable: + return "disable" + case TableCacheStatusEnable: + return "enable" + case TableCacheStatusSwitching: + return "switching" + default: + return "" + } +} + +// TempTableType is the type of the temp table +type TempTableType byte + +// TempTableType values. +const ( + TempTableNone TempTableType = iota + TempTableGlobal + TempTableLocal +) + +// String implements fmt.Stringer interface. +func (t TempTableType) String() string { + switch t { + case TempTableGlobal: + return "global" + case TempTableLocal: + return "local" + default: + return "" + } +} + +// TableLockInfo provides meta data describing a table lock. +type TableLockInfo struct { + Tp model.TableLockType + // Use array because there may be multiple sessions holding the same read lock. + Sessions []SessionInfo + State TableLockState + // TS is used to record the timestamp this table lock been locked. + TS uint64 +} + +// SessionInfo contain the session ID and the server ID. +type SessionInfo struct { + ServerID string + SessionID uint64 +} + +// String implements fmt.Stringer interface. +func (s SessionInfo) String() string { + return "server: " + s.ServerID + "_session: " + strconv.FormatUint(s.SessionID, 10) +} + +// TableLockTpInfo is composed by schema ID, table ID and table lock type. +type TableLockTpInfo struct { + SchemaID int64 + TableID int64 + Tp model.TableLockType +} + +// TableLockState is the state for table lock. +type TableLockState byte + +const ( + // TableLockStateNone means this table lock is absent. + TableLockStateNone TableLockState = iota + // TableLockStatePreLock means this table lock is pre-lock state. Other session doesn't hold this lock should't do corresponding operation according to the lock type. + TableLockStatePreLock + // TableLockStatePublic means this table lock is public state. + TableLockStatePublic +) + +// String implements fmt.Stringer interface. +func (t TableLockState) String() string { + switch t { + case TableLockStatePreLock: + return "pre-lock" + case TableLockStatePublic: + return "public" + default: + return "none" + } +} + +// TiFlashReplicaInfo means the flash replica info. +type TiFlashReplicaInfo struct { + Count uint64 + LocationLabels []string + Available bool + AvailablePartitionIDs []int64 +} + +// IsPartitionAvailable checks whether the partition table replica was available. +func (tr *TiFlashReplicaInfo) IsPartitionAvailable(pid int64) bool { + for _, id := range tr.AvailablePartitionIDs { + if id == pid { + return true + } + } + return false +} + +// ViewInfo provides meta data describing a DB view. +type ViewInfo struct { + Algorithm model.ViewAlgorithm `json:"view_algorithm"` + Definer *auth.UserIdentity `json:"view_definer"` + Security model.ViewSecurity `json:"view_security"` + SelectStmt string `json:"view_select"` + CheckOption model.ViewCheckOption `json:"view_checkoption"` + Cols []model.CIStr `json:"view_cols"` +} + +// Some constants for sequence. +const ( + DefaultSequenceCacheBool = true + DefaultSequenceCycleBool = false + DefaultSequenceOrderBool = false + DefaultSequenceCacheValue = int64(1000) + DefaultSequenceIncrementValue = int64(1) + DefaultPositiveSequenceStartValue = int64(1) + DefaultNegativeSequenceStartValue = int64(-1) + DefaultPositiveSequenceMinValue = int64(1) + DefaultPositiveSequenceMaxValue = int64(9223372036854775806) + DefaultNegativeSequenceMaxValue = int64(-1) + DefaultNegativeSequenceMinValue = int64(-9223372036854775807) +) + +// SequenceInfo provide meta data describing a DB sequence. +type SequenceInfo struct { + Start int64 `json:"sequence_start"` + Cache bool `json:"sequence_cache"` + Cycle bool `json:"sequence_cycle"` + MinValue int64 `json:"sequence_min_value"` + MaxValue int64 `json:"sequence_max_value"` + Increment int64 `json:"sequence_increment"` + CacheValue int64 `json:"sequence_cache_value"` + Comment string `json:"sequence_comment"` +} + +// ExchangePartitionInfo provides exchange partition info. +type ExchangePartitionInfo struct { + // It is nt tableID when table which has the info is a partition table, else pt tableID. + ExchangePartitionTableID int64 `json:"exchange_partition_id"` + ExchangePartitionDefID int64 `json:"exchange_partition_def_id"` + // Deprecated, not used + XXXExchangePartitionFlag bool `json:"exchange_partition_flag"` +} + +// UpdateIndexInfo is to carry the entries in the list of indexes in UPDATE INDEXES +// during ALTER TABLE t PARTITION BY ... UPDATE INDEXES (idx_a GLOBAL, idx_b LOCAL...) +type UpdateIndexInfo struct { + IndexName string `json:"index_name"` + Global bool `json:"global"` +} + +// PartitionInfo provides table partition info. +type PartitionInfo struct { + Type model.PartitionType `json:"type"` + Expr string `json:"expr"` + Columns []model.CIStr `json:"columns"` + + // User may already create table with partition but table partition is not + // yet supported back then. When Enable is true, write/read need use tid + // rather than pid. + Enable bool `json:"enable"` + + // IsEmptyColumns is for syntax like `partition by key()`. + // When IsEmptyColums is true, it will not display column name in `show create table` stmt. + IsEmptyColumns bool `json:"is_empty_columns"` + + Definitions []PartitionDefinition `json:"definitions"` + // AddingDefinitions is filled when adding partitions that is in the mid state. + AddingDefinitions []PartitionDefinition `json:"adding_definitions"` + // DroppingDefinitions is filled when dropping/truncating partitions that is in the mid state. + DroppingDefinitions []PartitionDefinition `json:"dropping_definitions"` + // NewPartitionIDs is filled when truncating partitions that is in the mid state. + NewPartitionIDs []int64 + + States []PartitionState `json:"states"` + Num uint64 `json:"num"` + // Only used during ReorganizePartition so far + DDLState SchemaState `json:"ddl_state"` + // Set during ALTER TABLE ... if the table id needs to change + // like if there is a global index or going between non-partitioned + // and partitioned table, to make the data dropping / range delete + // optimized. + NewTableID int64 `json:"new_table_id"` + // Set during ALTER TABLE ... PARTITION BY ... + // First as the new partition scheme, then in StateDeleteReorg as the old + DDLType model.PartitionType `json:"ddl_type"` + DDLExpr string `json:"ddl_expr"` + DDLColumns []model.CIStr `json:"ddl_columns"` + // For ActionAlterTablePartitioning, UPDATE INDEXES + DDLUpdateIndexes []UpdateIndexInfo `json:"ddl_update_indexes"` +} + +// Clone clones itself. +func (pi *PartitionInfo) Clone() *PartitionInfo { + newPi := *pi + newPi.Columns = make([]model.CIStr, len(pi.Columns)) + copy(newPi.Columns, pi.Columns) + + newPi.Definitions = make([]PartitionDefinition, len(pi.Definitions)) + for i := range pi.Definitions { + newPi.Definitions[i] = pi.Definitions[i].Clone() + } + + newPi.AddingDefinitions = make([]PartitionDefinition, len(pi.AddingDefinitions)) + for i := range pi.AddingDefinitions { + newPi.AddingDefinitions[i] = pi.AddingDefinitions[i].Clone() + } + + newPi.DroppingDefinitions = make([]PartitionDefinition, len(pi.DroppingDefinitions)) + for i := range pi.DroppingDefinitions { + newPi.DroppingDefinitions[i] = pi.DroppingDefinitions[i].Clone() + } + + return &newPi +} + +// GetNameByID gets the partition name by ID. +// TODO: Remove the need for this function! +func (pi *PartitionInfo) GetNameByID(id int64) string { + definitions := pi.Definitions + // do not convert this loop to `for _, def := range definitions`. + // see https://github.com/pingcap/parser/pull/1072 for the benchmark. + for i := range definitions { + if id == definitions[i].ID { + return definitions[i].Name.O + } + } + return "" +} + +// GetStateByID gets the partition state by ID. +func (pi *PartitionInfo) GetStateByID(id int64) SchemaState { + for _, pstate := range pi.States { + if pstate.ID == id { + return pstate.State + } + } + return StatePublic +} + +// SetStateByID sets the state of the partition by ID. +func (pi *PartitionInfo) SetStateByID(id int64, state SchemaState) { + newState := PartitionState{ID: id, State: state} + for i, pstate := range pi.States { + if pstate.ID == id { + pi.States[i] = newState + return + } + } + if pi.States == nil { + pi.States = make([]PartitionState, 0, 1) + } + pi.States = append(pi.States, newState) +} + +// GCPartitionStates cleans up the partition state. +func (pi *PartitionInfo) GCPartitionStates() { + if len(pi.States) < 1 { + return + } + newStates := make([]PartitionState, 0, len(pi.Definitions)) + for _, state := range pi.States { + found := false + for _, def := range pi.Definitions { + if def.ID == state.ID { + found = true + break + } + } + if found { + newStates = append(newStates, state) + } + } + pi.States = newStates +} + +// HasTruncatingPartitionID checks whether the pid is truncating. +func (pi *PartitionInfo) HasTruncatingPartitionID(pid int64) bool { + for i := range pi.NewPartitionIDs { + if pi.NewPartitionIDs[i] == pid { + return true + } + } + return false +} + +// ClearReorgIntermediateInfo remove intermediate information used during reorganize partition. +func (pi *PartitionInfo) ClearReorgIntermediateInfo() { + pi.DDLType = model.PartitionTypeNone + pi.DDLExpr = "" + pi.DDLColumns = nil + pi.NewTableID = 0 +} + +// FindPartitionDefinitionByName finds PartitionDefinition by name. +func (pi *PartitionInfo) FindPartitionDefinitionByName(partitionDefinitionName string) int { + lowConstrName := strings.ToLower(partitionDefinitionName) + definitions := pi.Definitions + for i := range definitions { + if definitions[i].Name.L == lowConstrName { + return i + } + } + return -1 +} + +// GetPartitionIDByName gets the partition ID by name. +func (pi *PartitionInfo) GetPartitionIDByName(partitionDefinitionName string) int64 { + lowConstrName := strings.ToLower(partitionDefinitionName) + for _, definition := range pi.Definitions { + if definition.Name.L == lowConstrName { + return definition.ID + } + } + return -1 +} + +// PartitionState is the state of the partition. +type PartitionState struct { + ID int64 `json:"id"` + State SchemaState `json:"state"` +} + +// PartitionDefinition defines a single partition. +type PartitionDefinition struct { + ID int64 `json:"id"` + Name model.CIStr `json:"name"` + LessThan []string `json:"less_than"` + InValues [][]string `json:"in_values"` + PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"` + Comment string `json:"comment,omitempty"` +} + +// Clone clones ConstraintInfo. +func (ci *PartitionDefinition) Clone() PartitionDefinition { + nci := *ci + nci.LessThan = make([]string, len(ci.LessThan)) + copy(nci.LessThan, ci.LessThan) + return nci +} + +const emptyPartitionDefinitionSize = int64(unsafe.Sizeof(PartitionState{})) + +// MemoryUsage return the memory usage of PartitionDefinition +func (ci *PartitionDefinition) MemoryUsage() (sum int64) { + if ci == nil { + return + } + + sum = emptyPartitionDefinitionSize + ci.Name.MemoryUsage() + if ci.PlacementPolicyRef != nil { + sum += int64(unsafe.Sizeof(ci.PlacementPolicyRef.ID)) + ci.PlacementPolicyRef.Name.MemoryUsage() + } + + for _, str := range ci.LessThan { + sum += int64(len(str)) + } + for _, strs := range ci.InValues { + for _, str := range strs { + sum += int64(len(str)) + } + } + return +} + +// ConstraintInfo provides meta data describing check-expression constraint. +type ConstraintInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"constraint_name"` + Table model.CIStr `json:"tbl_name"` // Table name. + ConstraintCols []model.CIStr `json:"constraint_cols"` // Depended column names. + Enforced bool `json:"enforced"` + InColumn bool `json:"in_column"` // Indicate whether the constraint is column type check. + ExprString string `json:"expr_string"` + State SchemaState `json:"state"` +} + +// Clone clones ConstraintInfo. +func (ci *ConstraintInfo) Clone() *ConstraintInfo { + nci := *ci + + nci.ConstraintCols = make([]model.CIStr, len(ci.ConstraintCols)) + copy(nci.ConstraintCols, ci.ConstraintCols) + return &nci +} + +// FKInfo provides meta data describing a foreign key constraint. +type FKInfo struct { + ID int64 `json:"id"` + Name model.CIStr `json:"fk_name"` + RefSchema model.CIStr `json:"ref_schema"` + RefTable model.CIStr `json:"ref_table"` + RefCols []model.CIStr `json:"ref_cols"` + Cols []model.CIStr `json:"cols"` + OnDelete int `json:"on_delete"` + OnUpdate int `json:"on_update"` + State SchemaState `json:"state"` + Version int `json:"version"` +} + +// String returns the string representation of FKInfo. +func (fk *FKInfo) String(db, tb string) string { + buf := bytes.Buffer{} + buf.WriteString("`" + db + "`.`") + buf.WriteString(tb + "`, CONSTRAINT `") + buf.WriteString(fk.Name.O + "` FOREIGN KEY (") + for i, col := range fk.Cols { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString("`" + col.O + "`") + } + buf.WriteString(") REFERENCES `") + if fk.RefSchema.L != db { + buf.WriteString(fk.RefSchema.L) + buf.WriteString("`.`") + } + buf.WriteString(fk.RefTable.L) + buf.WriteString("` (") + for i, col := range fk.RefCols { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString("`" + col.O + "`") + } + buf.WriteString(")") + if onDelete := model.ReferOptionType(fk.OnDelete); onDelete != model.ReferOptionNoOption { + buf.WriteString(" ON DELETE ") + buf.WriteString(onDelete.String()) + } + if onUpdate := model.ReferOptionType(fk.OnUpdate); onUpdate != model.ReferOptionNoOption { + buf.WriteString(" ON UPDATE ") + buf.WriteString(onUpdate.String()) + } + return buf.String() +} + +// Clone clones FKInfo. +func (fk *FKInfo) Clone() *FKInfo { + nfk := *fk + + nfk.RefCols = make([]model.CIStr, len(fk.RefCols)) + nfk.Cols = make([]model.CIStr, len(fk.Cols)) + copy(nfk.RefCols, fk.RefCols) + copy(nfk.Cols, fk.Cols) + + return &nfk +} + +const ( + // FKVersion0 indicate the FKInfo version is 0. + // In FKVersion0, TiDB only supported syntax of foreign key, but the foreign key constraint doesn't take effect. + FKVersion0 = 0 + // FKVersion1 indicate the FKInfo version is 1. + // In FKVersion1, TiDB supports the foreign key constraint. + FKVersion1 = 1 +) + +// ReferredFKInfo provides the cited foreign key in the child table. +type ReferredFKInfo struct { + Cols []model.CIStr `json:"cols"` + ChildSchema model.CIStr `json:"child_schema"` + ChildTable model.CIStr `json:"child_table"` + ChildFKName model.CIStr `json:"child_fk_name"` +} + +// TableItemID is composed by table ID and column/index ID +type TableItemID struct { + TableID int64 + ID int64 + IsIndex bool + IsSyncLoadFailed bool +} + +// Key is used to generate unique key for TableItemID to use in the syncload +func (t TableItemID) Key() string { + return fmt.Sprintf("%d#%d#%t", t.ID, t.TableID, t.IsIndex) +} + +// StatsLoadItem represents the load unit for statistics's memory loading. +type StatsLoadItem struct { + TableItemID + FullLoad bool +} + +// Key is used to generate unique key for TableItemID to use in the syncload +func (s StatsLoadItem) Key() string { + return fmt.Sprintf("%s#%t", s.TableItemID.Key(), s.FullLoad) +} + +// StatsOptions is the struct to store the stats options. +type StatsOptions struct { + *StatsWindowSettings + AutoRecalc bool `json:"auto_recalc"` + ColumnChoice model.ColumnChoice `json:"column_choice"` + ColumnList []model.CIStr `json:"column_list"` + SampleNum uint64 `json:"sample_num"` + SampleRate float64 `json:"sample_rate"` + Buckets uint64 `json:"buckets"` + TopN uint64 `json:"topn"` + Concurrency uint `json:"concurrency"` +} + +// NewStatsOptions creates a new StatsOptions. +func NewStatsOptions() *StatsOptions { + return &StatsOptions{ + AutoRecalc: true, + ColumnChoice: model.DefaultChoice, + ColumnList: []model.CIStr{}, + SampleNum: uint64(0), + SampleRate: 0.0, + Buckets: uint64(0), + TopN: uint64(0), + Concurrency: uint(0), + } +} + +// StatsWindowSettings is the settings of the stats window. +type StatsWindowSettings struct { + WindowStart time.Time `json:"window_start"` + WindowEnd time.Time `json:"window_end"` + RepeatType WindowRepeatType `json:"repeat_type"` + RepeatInterval uint `json:"repeat_interval"` +} + +// WindowRepeatType is the type of the window repeat. +type WindowRepeatType byte + +// WindowRepeatType values. +const ( + Never WindowRepeatType = iota + Day + Week + Month +) + +// String implements fmt.Stringer interface. +func (s WindowRepeatType) String() string { + switch s { + case Never: + return "Never" + case Day: + return "Day" + case Week: + return "Week" + case Month: + return "Month" + default: + return "" + } +} + +// DefaultJobInterval sets the default interval between TTL jobs +const DefaultJobInterval = time.Hour + +// TTLInfo records the TTL config +type TTLInfo struct { + ColumnName model.CIStr `json:"column"` + IntervalExprStr string `json:"interval_expr"` + // `IntervalTimeUnit` is actually ast.TimeUnitType. Use `int` to avoid cycle dependency + IntervalTimeUnit int `json:"interval_time_unit"` + Enable bool `json:"enable"` + // JobInterval is the interval between two TTL scan jobs. + // It's suggested to get a duration with `(*TTLInfo).GetJobInterval` + JobInterval string `json:"job_interval"` +} + +// Clone clones TTLInfo +func (t *TTLInfo) Clone() *TTLInfo { + cloned := *t + return &cloned +} + +// GetJobInterval parses the job interval and return +// if the job interval is an empty string, the "1h" will be returned, to keep compatible with 6.5 (in which +// TTL_JOB_INTERVAL attribute doesn't exist) +// Didn't set TTL_JOB_INTERVAL during upgrade and bootstrap because setting default value here is much simpler +// and could avoid bugs blocking users from upgrading or bootstrapping the cluster. +func (t *TTLInfo) GetJobInterval() (time.Duration, error) { + if len(t.JobInterval) == 0 { + return DefaultJobInterval, nil + } + + return duration.ParseDuration(t.JobInterval) +} diff --git a/pkg/meta/model/table_test.go b/pkg/meta/model/table_test.go new file mode 100644 index 0000000000000..f07e4585b79a5 --- /dev/null +++ b/pkg/meta/model/table_test.go @@ -0,0 +1,237 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "testing" + "time" + + "github.com/pingcap/tidb/pkg/parser/charset" + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/types" + "github.com/stretchr/testify/require" +) + +func checkOffsets(t *testing.T, tbl *TableInfo, ids ...int) { + require.Equal(t, len(ids), len(tbl.Columns)) + for i := 0; i < len(ids); i++ { + expected := fmt.Sprintf("c_%d", ids[i]) + require.Equal(t, expected, tbl.Columns[i].Name.L) + require.Equal(t, i, tbl.Columns[i].Offset) + } + for _, col := range tbl.Columns { + for _, idx := range tbl.Indices { + for _, idxCol := range idx.Columns { + if col.Name.L != idxCol.Name.L { + continue + } + // Columns with the same name should have a same offset. + require.Equal(t, col.Offset, idxCol.Offset) + } + } + } +} + +func TestMoveColumnInfo(t *testing.T) { + c0 := newColumnForTest(0, 0) + c1 := newColumnForTest(1, 1) + c2 := newColumnForTest(2, 2) + c3 := newColumnForTest(3, 3) + c4 := newColumnForTest(4, 4) + + i0 := newIndexForTest(0, c0, c1, c2, c3, c4) + i1 := newIndexForTest(1, c4, c2) + i2 := newIndexForTest(2, c0, c4) + i3 := newIndexForTest(3, c1, c2, c3) + i4 := newIndexForTest(4, c3, c2, c1) + + tbl := &TableInfo{ + ID: 1, + Name: model.NewCIStr("t"), + Columns: []*ColumnInfo{c0, c1, c2, c3, c4}, + Indices: []*IndexInfo{i0, i1, i2, i3, i4}, + } + + // Original offsets: [0, 1, 2, 3, 4] + tbl.MoveColumnInfo(4, 0) + checkOffsets(t, tbl, 4, 0, 1, 2, 3) + tbl.MoveColumnInfo(2, 3) + checkOffsets(t, tbl, 4, 0, 2, 1, 3) + tbl.MoveColumnInfo(3, 2) + checkOffsets(t, tbl, 4, 0, 1, 2, 3) + tbl.MoveColumnInfo(0, 4) + checkOffsets(t, tbl, 0, 1, 2, 3, 4) + tbl.MoveColumnInfo(2, 2) + checkOffsets(t, tbl, 0, 1, 2, 3, 4) + tbl.MoveColumnInfo(0, 0) + checkOffsets(t, tbl, 0, 1, 2, 3, 4) + tbl.MoveColumnInfo(1, 4) + checkOffsets(t, tbl, 0, 2, 3, 4, 1) + tbl.MoveColumnInfo(3, 0) + checkOffsets(t, tbl, 4, 0, 2, 3, 1) +} + +func TestModelBasic(t *testing.T) { + column := &ColumnInfo{ + ID: 1, + Name: model.NewCIStr("c"), + Offset: 0, + DefaultValue: 0, + FieldType: *types.NewFieldType(0), + Hidden: true, + } + column.AddFlag(mysql.PriKeyFlag) + + index := &IndexInfo{ + Name: model.NewCIStr("key"), + Table: model.NewCIStr("t"), + Columns: []*IndexColumn{ + { + Name: model.NewCIStr("c"), + Offset: 0, + Length: 10, + }}, + Unique: true, + Primary: true, + } + + fk := &FKInfo{ + RefCols: []model.CIStr{model.NewCIStr("a")}, + Cols: []model.CIStr{model.NewCIStr("a")}, + } + + seq := &SequenceInfo{ + Increment: 1, + MinValue: 1, + MaxValue: 100, + } + + table := &TableInfo{ + ID: 1, + Name: model.NewCIStr("t"), + Charset: "utf8", + Collate: "utf8_bin", + Columns: []*ColumnInfo{column}, + Indices: []*IndexInfo{index}, + ForeignKeys: []*FKInfo{fk}, + PKIsHandle: true, + } + + table2 := &TableInfo{ + ID: 2, + Name: model.NewCIStr("s"), + Sequence: seq, + } + + dbInfo := &DBInfo{ + ID: 1, + Name: model.NewCIStr("test"), + Charset: "utf8", + Collate: "utf8_bin", + } + dbInfo.Deprecated.Tables = []*TableInfo{table} + + n := dbInfo.Clone() + require.Equal(t, dbInfo, n) + + pkName := table.GetPkName() + require.Equal(t, model.NewCIStr("c"), pkName) + newColumn := table.GetPkColInfo() + require.Equal(t, true, newColumn.Hidden) + require.Equal(t, column, newColumn) + inIdx := table.ColumnIsInIndex(column) + require.Equal(t, true, inIdx) + tp := model.IndexTypeBtree + require.Equal(t, "BTREE", tp.String()) + tp = model.IndexTypeHash + require.Equal(t, "HASH", tp.String()) + tp = 1e5 + require.Equal(t, "", tp.String()) + has := index.HasPrefixIndex() + require.Equal(t, true, has) + require.Equal(t, TSConvert2Time(table.UpdateTS), table.GetUpdateTime()) + require.True(t, table2.IsSequence()) + require.False(t, table2.IsBaseTable()) + + // Corner cases + column.ToggleFlag(mysql.PriKeyFlag) + pkName = table.GetPkName() + require.Equal(t, model.NewCIStr(""), pkName) + newColumn = table.GetPkColInfo() + require.Nil(t, newColumn) + anCol := &ColumnInfo{ + Name: model.NewCIStr("d"), + } + exIdx := table.ColumnIsInIndex(anCol) + require.Equal(t, false, exIdx) + anIndex := &IndexInfo{ + Columns: []*IndexColumn{}, + } + no := anIndex.HasPrefixIndex() + require.Equal(t, false, no) + + extraPK := NewExtraHandleColInfo() + require.Equal(t, mysql.NotNullFlag|mysql.PriKeyFlag, extraPK.GetFlag()) + require.Equal(t, charset.CharsetBin, extraPK.GetCharset()) + require.Equal(t, charset.CollationBin, extraPK.GetCollate()) +} + +func TestTTLInfoClone(t *testing.T) { + ttlInfo := &TTLInfo{ + ColumnName: model.NewCIStr("test"), + IntervalExprStr: "test_expr", + IntervalTimeUnit: 5, + Enable: true, + } + + clonedTTLInfo := ttlInfo.Clone() + clonedTTLInfo.ColumnName = model.NewCIStr("test_2") + clonedTTLInfo.IntervalExprStr = "test_expr_2" + clonedTTLInfo.IntervalTimeUnit = 9 + clonedTTLInfo.Enable = false + + require.Equal(t, "test", ttlInfo.ColumnName.O) + require.Equal(t, "test_expr", ttlInfo.IntervalExprStr) + require.Equal(t, 5, ttlInfo.IntervalTimeUnit) + require.Equal(t, true, ttlInfo.Enable) +} + +func TestTTLJobInterval(t *testing.T) { + ttlInfo := &TTLInfo{} + + interval, err := ttlInfo.GetJobInterval() + require.NoError(t, err) + require.Equal(t, time.Hour, interval) + + ttlInfo = &TTLInfo{JobInterval: "200h"} + interval, err = ttlInfo.GetJobInterval() + require.NoError(t, err) + require.Equal(t, time.Hour*200, interval) +} + +func TestClearReorgIntermediateInfo(t *testing.T) { + ptInfo := &PartitionInfo{} + ptInfo.DDLType = model.PartitionTypeHash + ptInfo.DDLExpr = "Test DDL Expr" + ptInfo.NewTableID = 1111 + + ptInfo.ClearReorgIntermediateInfo() + require.Equal(t, model.PartitionTypeNone, ptInfo.DDLType) + require.Equal(t, "", ptInfo.DDLExpr) + require.Equal(t, true, ptInfo.DDLColumns == nil) + require.Equal(t, int64(0), ptInfo.NewTableID) +} diff --git a/pkg/parser/ast/BUILD.bazel b/pkg/parser/ast/BUILD.bazel index 249c3000de383..e07c360c2cc16 100644 --- a/pkg/parser/ast/BUILD.bazel +++ b/pkg/parser/ast/BUILD.bazel @@ -56,10 +56,8 @@ go_test( "//pkg/parser/auth", "//pkg/parser/charset", "//pkg/parser/format", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/test_driver", - "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/parser/ast/ast.go b/pkg/parser/ast/ast.go index 80868b1fbc3e1..a4aa11737b8d8 100644 --- a/pkg/parser/ast/ast.go +++ b/pkg/parser/ast/ast.go @@ -20,7 +20,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/types" ) @@ -120,26 +119,6 @@ type DMLNode interface { dmlStatement() } -// ResultField represents a result field which can be a column from a table, -// or an expression in select field. It is a generated property during -// binding process. ResultField is the key element to evaluate a ColumnNameExpr. -// After resolving process, every ColumnNameExpr will be resolved to a ResultField. -// During execution, every row retrieved from table will set the row value to -// ResultFields of that table, so ColumnNameExpr resolved to that ResultField can be -// easily evaluated. -type ResultField struct { - Column *model.ColumnInfo - ColumnAsName model.CIStr - // EmptyOrgName indicates whether this field has an empty org_name. A field has an empty org name, if it's an - // expression. It's not sure whether it's safe to use empty string in `.Column.Name`, so a new field is added to - // indicate whether it's empty. - EmptyOrgName bool - - Table *model.TableInfo - TableAsName model.CIStr - DBName model.CIStr -} - // ResultSetNode interface has a ResultFields property, represents a Node that returns result set. // Implementations include SelectStmt, SubqueryExpr, TableSource, TableName, Join and SetOprStmt. type ResultSetNode interface { diff --git a/pkg/parser/ast/ddl.go b/pkg/parser/ast/ddl.go index 0c39cbfceaf9a..92b8a09743632 100644 --- a/pkg/parser/ast/ddl.go +++ b/pkg/parser/ast/ddl.go @@ -2302,17 +2302,9 @@ func (n *ResourceGroupOption) Restore(ctx *format.RestoreCtx) error { return nil } -type RunawayOptionType int - -const ( - RunawayRule RunawayOptionType = iota - RunawayAction - RunawayWatch -) - // ResourceGroupRunawayOption is used for parsing resource group runaway rule option. type ResourceGroupRunawayOption struct { - Tp RunawayOptionType + Tp model.RunawayOptionType RuleOption *ResourceGroupRunawayRuleOption ActionOption *ResourceGroupRunawayActionOption WatchOption *ResourceGroupRunawayWatchOption @@ -2320,11 +2312,11 @@ type ResourceGroupRunawayOption struct { func (n *ResourceGroupRunawayOption) Restore(ctx *format.RestoreCtx) error { switch n.Tp { - case RunawayRule: + case model.RunawayRule: n.RuleOption.restore(ctx) - case RunawayAction: + case model.RunawayAction: n.ActionOption.Restore(ctx) - case RunawayWatch: + case model.RunawayWatch: n.WatchOption.restore(ctx) default: return errors.Errorf("invalid ResourceGroupRunawayOption: %d", n.Tp) diff --git a/pkg/parser/ast/misc.go b/pkg/parser/ast/misc.go index 2be542f069cfe..9d163d8d4e4f7 100644 --- a/pkg/parser/ast/misc.go +++ b/pkg/parser/ast/misc.go @@ -2418,40 +2418,6 @@ const ( BDRRoleNone BDRRole = "" ) -// DeniedByBDR checks whether the DDL is denied by BDR. -func DeniedByBDR(role BDRRole, action model.ActionType, job *model.Job) (denied bool) { - ddlType, ok := model.ActionBDRMap[action] - switch role { - case BDRRolePrimary: - if !ok { - return true - } - - // Can't add unique index on primary role. - if job != nil && (action == model.ActionAddIndex || action == model.ActionAddPrimaryKey) && - len(job.Args) >= 1 && job.Args[0].(bool) { - // job.Args[0] is unique when job.Type is ActionAddIndex or ActionAddPrimaryKey. - return true - } - - if ddlType == model.SafeDDL || ddlType == model.UnmanagementDDL { - return false - } - case BDRRoleSecondary: - if !ok { - return true - } - if ddlType == model.UnmanagementDDL { - return false - } - default: - // if user do not set bdr role, we will not deny any ddl as `none` - return false - } - - return true -} - type StatementScope int const ( @@ -3751,12 +3717,12 @@ type TableOptimizerHint struct { // See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html#optimizer-hints-execution-time // - MAX_EXECUTION_TIME => uint64 // - MEMORY_QUOTA => int64 - // - QUERY_TYPE => model.CIStr + // - QUERY_TYPE => CIStr // // Time Range is used to hint the time range of inspection tables // e.g: select /*+ time_range('','') */ * from information_schema.inspection_result. // - TIME_RANGE => ast.HintTimeRange - // - READ_FROM_STORAGE => model.CIStr + // - READ_FROM_STORAGE => CIStr // - USE_TOJA => bool // - NTH_PLAN => int64 HintData interface{} diff --git a/pkg/parser/ast/misc_test.go b/pkg/parser/ast/misc_test.go index c720f269b612b..de8c6401c8b64 100644 --- a/pkg/parser/ast/misc_test.go +++ b/pkg/parser/ast/misc_test.go @@ -20,9 +20,7 @@ import ( "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -427,393 +425,6 @@ func TestRedactURL(t *testing.T) { } } -func TestDeniedByBDR(t *testing.T) { - testCases := []struct { - role ast.BDRRole - action model.ActionType - expected bool - }{ - // Roles for ActionCreateSchema - {ast.BDRRolePrimary, model.ActionCreateSchema, false}, - {ast.BDRRoleSecondary, model.ActionCreateSchema, true}, - {ast.BDRRoleNone, model.ActionCreateSchema, false}, - - // Roles for ActionDropSchema - {ast.BDRRolePrimary, model.ActionDropSchema, true}, - {ast.BDRRoleSecondary, model.ActionDropSchema, true}, - {ast.BDRRoleNone, model.ActionDropSchema, false}, - - // Roles for ActionCreateTable - {ast.BDRRolePrimary, model.ActionCreateTable, false}, - {ast.BDRRoleSecondary, model.ActionCreateTable, true}, - {ast.BDRRoleNone, model.ActionCreateTable, false}, - - // Roles for ActionDropTable - {ast.BDRRolePrimary, model.ActionDropTable, true}, - {ast.BDRRoleSecondary, model.ActionDropTable, true}, - {ast.BDRRoleNone, model.ActionDropTable, false}, - - // Roles for ActionAddColumn - {ast.BDRRolePrimary, model.ActionAddColumn, false}, - {ast.BDRRoleSecondary, model.ActionAddColumn, true}, - {ast.BDRRoleNone, model.ActionAddColumn, false}, - - // Roles for ActionDropColumn - {ast.BDRRolePrimary, model.ActionDropColumn, true}, - {ast.BDRRoleSecondary, model.ActionDropColumn, true}, - {ast.BDRRoleNone, model.ActionDropColumn, false}, - - // Roles for ActionAddIndex - {ast.BDRRolePrimary, model.ActionAddIndex, false}, - {ast.BDRRoleSecondary, model.ActionAddIndex, true}, - {ast.BDRRoleNone, model.ActionAddIndex, false}, - - // Roles for ActionDropIndex - {ast.BDRRolePrimary, model.ActionDropIndex, false}, - {ast.BDRRoleSecondary, model.ActionDropIndex, true}, - {ast.BDRRoleNone, model.ActionDropIndex, false}, - - // Roles for ActionAddForeignKey - {ast.BDRRolePrimary, model.ActionAddForeignKey, true}, - {ast.BDRRoleSecondary, model.ActionAddForeignKey, true}, - {ast.BDRRoleNone, model.ActionAddForeignKey, false}, - - // Roles for ActionDropForeignKey - {ast.BDRRolePrimary, model.ActionDropForeignKey, true}, - {ast.BDRRoleSecondary, model.ActionDropForeignKey, true}, - {ast.BDRRoleNone, model.ActionDropForeignKey, false}, - - // Roles for ActionTruncateTable - {ast.BDRRolePrimary, model.ActionTruncateTable, true}, - {ast.BDRRoleSecondary, model.ActionTruncateTable, true}, - {ast.BDRRoleNone, model.ActionTruncateTable, false}, - - // Roles for ActionModifyColumn - {ast.BDRRolePrimary, model.ActionModifyColumn, false}, - {ast.BDRRoleSecondary, model.ActionModifyColumn, true}, - {ast.BDRRoleNone, model.ActionModifyColumn, false}, - - // Roles for ActionRebaseAutoID - {ast.BDRRolePrimary, model.ActionRebaseAutoID, true}, - {ast.BDRRoleSecondary, model.ActionRebaseAutoID, true}, - {ast.BDRRoleNone, model.ActionRebaseAutoID, false}, - - // Roles for ActionRenameTable - {ast.BDRRolePrimary, model.ActionRenameTable, true}, - {ast.BDRRoleSecondary, model.ActionRenameTable, true}, - {ast.BDRRoleNone, model.ActionRenameTable, false}, - - // Roles for ActionSetDefaultValue - {ast.BDRRolePrimary, model.ActionSetDefaultValue, false}, - {ast.BDRRoleSecondary, model.ActionSetDefaultValue, true}, - {ast.BDRRoleNone, model.ActionSetDefaultValue, false}, - - // Roles for ActionShardRowID - {ast.BDRRolePrimary, model.ActionShardRowID, true}, - {ast.BDRRoleSecondary, model.ActionShardRowID, true}, - {ast.BDRRoleNone, model.ActionShardRowID, false}, - - // Roles for ActionModifyTableComment - {ast.BDRRolePrimary, model.ActionModifyTableComment, false}, - {ast.BDRRoleSecondary, model.ActionModifyTableComment, true}, - {ast.BDRRoleNone, model.ActionModifyTableComment, false}, - - // Roles for ActionRenameIndex - {ast.BDRRolePrimary, model.ActionRenameIndex, false}, - {ast.BDRRoleSecondary, model.ActionRenameIndex, true}, - {ast.BDRRoleNone, model.ActionRenameIndex, false}, - - // Roles for ActionAddTablePartition - {ast.BDRRolePrimary, model.ActionAddTablePartition, false}, - {ast.BDRRoleSecondary, model.ActionAddTablePartition, true}, - {ast.BDRRoleNone, model.ActionAddTablePartition, false}, - - // Roles for ActionDropTablePartition - {ast.BDRRolePrimary, model.ActionDropTablePartition, true}, - {ast.BDRRoleSecondary, model.ActionDropTablePartition, true}, - {ast.BDRRoleNone, model.ActionDropTablePartition, false}, - - // Roles for ActionCreateView - {ast.BDRRolePrimary, model.ActionCreateView, false}, - {ast.BDRRoleSecondary, model.ActionCreateView, true}, - {ast.BDRRoleNone, model.ActionCreateView, false}, - - // Roles for ActionModifyTableCharsetAndCollate - {ast.BDRRolePrimary, model.ActionModifyTableCharsetAndCollate, true}, - {ast.BDRRoleSecondary, model.ActionModifyTableCharsetAndCollate, true}, - {ast.BDRRoleNone, model.ActionModifyTableCharsetAndCollate, false}, - - // Roles for ActionTruncateTablePartition - {ast.BDRRolePrimary, model.ActionTruncateTablePartition, true}, - {ast.BDRRoleSecondary, model.ActionTruncateTablePartition, true}, - {ast.BDRRoleNone, model.ActionTruncateTablePartition, false}, - - // Roles for ActionDropView - {ast.BDRRolePrimary, model.ActionDropView, false}, - {ast.BDRRoleSecondary, model.ActionDropView, true}, - {ast.BDRRoleNone, model.ActionDropView, false}, - - // Roles for ActionRecoverTable - {ast.BDRRolePrimary, model.ActionRecoverTable, true}, - {ast.BDRRoleSecondary, model.ActionRecoverTable, true}, - {ast.BDRRoleNone, model.ActionRecoverTable, false}, - - // Roles for ActionModifySchemaCharsetAndCollate - {ast.BDRRolePrimary, model.ActionModifySchemaCharsetAndCollate, true}, - {ast.BDRRoleSecondary, model.ActionModifySchemaCharsetAndCollate, true}, - {ast.BDRRoleNone, model.ActionModifySchemaCharsetAndCollate, false}, - - // Roles for ActionLockTable - {ast.BDRRolePrimary, model.ActionLockTable, true}, - {ast.BDRRoleSecondary, model.ActionLockTable, true}, - {ast.BDRRoleNone, model.ActionLockTable, false}, - - // Roles for ActionUnlockTable - {ast.BDRRolePrimary, model.ActionUnlockTable, true}, - {ast.BDRRoleSecondary, model.ActionUnlockTable, true}, - {ast.BDRRoleNone, model.ActionUnlockTable, false}, - - // Roles for ActionRepairTable - {ast.BDRRolePrimary, model.ActionRepairTable, true}, - {ast.BDRRoleSecondary, model.ActionRepairTable, true}, - {ast.BDRRoleNone, model.ActionRepairTable, false}, - - // Roles for ActionSetTiFlashReplica - {ast.BDRRolePrimary, model.ActionSetTiFlashReplica, true}, - {ast.BDRRoleSecondary, model.ActionSetTiFlashReplica, true}, - {ast.BDRRoleNone, model.ActionSetTiFlashReplica, false}, - - // Roles for ActionUpdateTiFlashReplicaStatus - {ast.BDRRolePrimary, model.ActionUpdateTiFlashReplicaStatus, true}, - {ast.BDRRoleSecondary, model.ActionUpdateTiFlashReplicaStatus, true}, - {ast.BDRRoleNone, model.ActionUpdateTiFlashReplicaStatus, false}, - - // Roles for ActionAddPrimaryKey - {ast.BDRRolePrimary, model.ActionAddPrimaryKey, true}, - {ast.BDRRoleSecondary, model.ActionAddPrimaryKey, true}, - {ast.BDRRoleNone, model.ActionAddPrimaryKey, false}, - - // Roles for ActionDropPrimaryKey - {ast.BDRRolePrimary, model.ActionDropPrimaryKey, false}, - {ast.BDRRoleSecondary, model.ActionDropPrimaryKey, true}, - {ast.BDRRoleNone, model.ActionDropPrimaryKey, false}, - - // Roles for ActionCreateSequence - {ast.BDRRolePrimary, model.ActionCreateSequence, true}, - {ast.BDRRoleSecondary, model.ActionCreateSequence, true}, - {ast.BDRRoleNone, model.ActionCreateSequence, false}, - - // Roles for ActionAlterSequence - {ast.BDRRolePrimary, model.ActionAlterSequence, true}, - {ast.BDRRoleSecondary, model.ActionAlterSequence, true}, - {ast.BDRRoleNone, model.ActionAlterSequence, false}, - - // Roles for ActionDropSequence - {ast.BDRRolePrimary, model.ActionDropSequence, true}, - {ast.BDRRoleSecondary, model.ActionDropSequence, true}, - {ast.BDRRoleNone, model.ActionDropSequence, false}, - - // Roles for ActionModifyTableAutoIdCache - {ast.BDRRolePrimary, model.ActionModifyTableAutoIdCache, true}, - {ast.BDRRoleSecondary, model.ActionModifyTableAutoIdCache, true}, - {ast.BDRRoleNone, model.ActionModifyTableAutoIdCache, false}, - - // Roles for ActionRebaseAutoRandomBase - {ast.BDRRolePrimary, model.ActionRebaseAutoRandomBase, true}, - {ast.BDRRoleSecondary, model.ActionRebaseAutoRandomBase, true}, - {ast.BDRRoleNone, model.ActionRebaseAutoRandomBase, false}, - - // Roles for ActionAlterIndexVisibility - {ast.BDRRolePrimary, model.ActionAlterIndexVisibility, false}, - {ast.BDRRoleSecondary, model.ActionAlterIndexVisibility, true}, - {ast.BDRRoleNone, model.ActionAlterIndexVisibility, false}, - - // Roles for ActionExchangeTablePartition - {ast.BDRRolePrimary, model.ActionExchangeTablePartition, true}, - {ast.BDRRoleSecondary, model.ActionExchangeTablePartition, true}, - {ast.BDRRoleNone, model.ActionExchangeTablePartition, false}, - - // Roles for ActionAddCheckConstraint - {ast.BDRRolePrimary, model.ActionAddCheckConstraint, true}, - {ast.BDRRoleSecondary, model.ActionAddCheckConstraint, true}, - {ast.BDRRoleNone, model.ActionAddCheckConstraint, false}, - - // Roles for ActionDropCheckConstraint - {ast.BDRRolePrimary, model.ActionDropCheckConstraint, true}, - {ast.BDRRoleSecondary, model.ActionDropCheckConstraint, true}, - {ast.BDRRoleNone, model.ActionDropCheckConstraint, false}, - - // Roles for ActionAlterCheckConstraint - {ast.BDRRolePrimary, model.ActionAlterCheckConstraint, true}, - {ast.BDRRoleSecondary, model.ActionAlterCheckConstraint, true}, - {ast.BDRRoleNone, model.ActionAlterCheckConstraint, false}, - - // Roles for ActionRenameTables - {ast.BDRRolePrimary, model.ActionRenameTables, true}, - {ast.BDRRoleSecondary, model.ActionRenameTables, true}, - {ast.BDRRoleNone, model.ActionRenameTables, false}, - - // Roles for ActionAlterTableAttributes - {ast.BDRRolePrimary, model.ActionAlterTableAttributes, true}, - {ast.BDRRoleSecondary, model.ActionAlterTableAttributes, true}, - {ast.BDRRoleNone, model.ActionAlterTableAttributes, false}, - - // Roles for ActionAlterTablePartitionAttributes - {ast.BDRRolePrimary, model.ActionAlterTablePartitionAttributes, true}, - {ast.BDRRoleSecondary, model.ActionAlterTablePartitionAttributes, true}, - {ast.BDRRoleNone, model.ActionAlterTablePartitionAttributes, false}, - - // Roles for ActionCreatePlacementPolicy - {ast.BDRRolePrimary, model.ActionCreatePlacementPolicy, false}, - {ast.BDRRoleSecondary, model.ActionCreatePlacementPolicy, false}, - {ast.BDRRoleNone, model.ActionCreatePlacementPolicy, false}, - - // Roles for ActionAlterPlacementPolicy - {ast.BDRRolePrimary, model.ActionAlterPlacementPolicy, false}, - {ast.BDRRoleSecondary, model.ActionAlterPlacementPolicy, false}, - {ast.BDRRoleNone, model.ActionAlterPlacementPolicy, false}, - - // Roles for ActionDropPlacementPolicy - {ast.BDRRolePrimary, model.ActionDropPlacementPolicy, false}, - {ast.BDRRoleSecondary, model.ActionDropPlacementPolicy, false}, - {ast.BDRRoleNone, model.ActionDropPlacementPolicy, false}, - - // Roles for ActionAlterTablePartitionPlacement - {ast.BDRRolePrimary, model.ActionAlterTablePartitionPlacement, true}, - {ast.BDRRoleSecondary, model.ActionAlterTablePartitionPlacement, true}, - {ast.BDRRoleNone, model.ActionAlterTablePartitionPlacement, false}, - - // Roles for ActionModifySchemaDefaultPlacement - {ast.BDRRolePrimary, model.ActionModifySchemaDefaultPlacement, true}, - {ast.BDRRoleSecondary, model.ActionModifySchemaDefaultPlacement, true}, - {ast.BDRRoleNone, model.ActionModifySchemaDefaultPlacement, false}, - - // Roles for ActionAlterTablePlacement - {ast.BDRRolePrimary, model.ActionAlterTablePlacement, true}, - {ast.BDRRoleSecondary, model.ActionAlterTablePlacement, true}, - {ast.BDRRoleNone, model.ActionAlterTablePlacement, false}, - - // Roles for ActionAlterCacheTable - {ast.BDRRolePrimary, model.ActionAlterCacheTable, true}, - {ast.BDRRoleSecondary, model.ActionAlterCacheTable, true}, - {ast.BDRRoleNone, model.ActionAlterCacheTable, false}, - - // Roles for ActionAlterTableStatsOptions - {ast.BDRRolePrimary, model.ActionAlterTableStatsOptions, true}, - {ast.BDRRoleSecondary, model.ActionAlterTableStatsOptions, true}, - {ast.BDRRoleNone, model.ActionAlterTableStatsOptions, false}, - - // Roles for ActionAlterNoCacheTable - {ast.BDRRolePrimary, model.ActionAlterNoCacheTable, true}, - {ast.BDRRoleSecondary, model.ActionAlterNoCacheTable, true}, - {ast.BDRRoleNone, model.ActionAlterNoCacheTable, false}, - - // Roles for ActionCreateTables - {ast.BDRRolePrimary, model.ActionCreateTables, false}, - {ast.BDRRoleSecondary, model.ActionCreateTables, true}, - {ast.BDRRoleNone, model.ActionCreateTables, false}, - - // Roles for ActionMultiSchemaChange - {ast.BDRRolePrimary, model.ActionMultiSchemaChange, true}, - {ast.BDRRoleSecondary, model.ActionMultiSchemaChange, true}, - {ast.BDRRoleNone, model.ActionMultiSchemaChange, false}, - - // Roles for ActionFlashbackCluster - {ast.BDRRolePrimary, model.ActionFlashbackCluster, true}, - {ast.BDRRoleSecondary, model.ActionFlashbackCluster, true}, - {ast.BDRRoleNone, model.ActionFlashbackCluster, false}, - - // Roles for ActionRecoverSchema - {ast.BDRRolePrimary, model.ActionRecoverSchema, true}, - {ast.BDRRoleSecondary, model.ActionRecoverSchema, true}, - {ast.BDRRoleNone, model.ActionRecoverSchema, false}, - - // Roles for ActionReorganizePartition - {ast.BDRRolePrimary, model.ActionReorganizePartition, true}, - {ast.BDRRoleSecondary, model.ActionReorganizePartition, true}, - {ast.BDRRoleNone, model.ActionReorganizePartition, false}, - - // Roles for ActionAlterTTLInfo - {ast.BDRRolePrimary, model.ActionAlterTTLInfo, false}, - {ast.BDRRoleSecondary, model.ActionAlterTTLInfo, true}, - {ast.BDRRoleNone, model.ActionAlterTTLInfo, false}, - - // Roles for ActionAlterTTLRemove - {ast.BDRRolePrimary, model.ActionAlterTTLRemove, false}, - {ast.BDRRoleSecondary, model.ActionAlterTTLRemove, true}, - {ast.BDRRoleNone, model.ActionAlterTTLRemove, false}, - - // Roles for ActionCreateResourceGroup - {ast.BDRRolePrimary, model.ActionCreateResourceGroup, false}, - {ast.BDRRoleSecondary, model.ActionCreateResourceGroup, false}, - {ast.BDRRoleNone, model.ActionCreateResourceGroup, false}, - - // Roles for ActionAlterResourceGroup - {ast.BDRRolePrimary, model.ActionAlterResourceGroup, false}, - {ast.BDRRoleSecondary, model.ActionAlterResourceGroup, false}, - {ast.BDRRoleNone, model.ActionAlterResourceGroup, false}, - - // Roles for ActionDropResourceGroup - {ast.BDRRolePrimary, model.ActionDropResourceGroup, false}, - {ast.BDRRoleSecondary, model.ActionDropResourceGroup, false}, - {ast.BDRRoleNone, model.ActionDropResourceGroup, false}, - - // Roles for ActionAlterTablePartitioning - {ast.BDRRolePrimary, model.ActionAlterTablePartitioning, true}, - {ast.BDRRoleSecondary, model.ActionAlterTablePartitioning, true}, - {ast.BDRRoleNone, model.ActionAlterTablePartitioning, false}, - - // Roles for ActionRemovePartitioning - {ast.BDRRolePrimary, model.ActionRemovePartitioning, true}, - {ast.BDRRoleSecondary, model.ActionRemovePartitioning, true}, - {ast.BDRRoleNone, model.ActionRemovePartitioning, false}, - } - - for _, tc := range testCases { - assert.Equal(t, tc.expected, ast.DeniedByBDR(tc.role, tc.action, nil), fmt.Sprintf("role: %v, action: %v", tc.role, tc.action)) - } - - // test special cases - testCases2 := []struct { - role ast.BDRRole - action model.ActionType - job *model.Job - expected bool - }{ - { - role: ast.BDRRolePrimary, - action: model.ActionAddPrimaryKey, - job: &model.Job{ - Type: model.ActionAddPrimaryKey, - Args: []interface{}{true}, - }, - expected: true, - }, - { - role: ast.BDRRolePrimary, - action: model.ActionAddIndex, - job: &model.Job{ - Type: model.ActionAddIndex, - Args: []interface{}{true}, - }, - expected: true, - }, - { - role: ast.BDRRolePrimary, - action: model.ActionAddIndex, - job: &model.Job{ - Type: model.ActionAddIndex, - Args: []interface{}{false}, - }, - expected: false, - }, - } - - for _, tc := range testCases2 { - assert.Equal(t, tc.expected, ast.DeniedByBDR(tc.role, tc.action, tc.job), fmt.Sprintf("role: %v, action: %v", tc.role, tc.action)) - } -} - func TestAddQueryWatchStmtRestore(t *testing.T) { testCases := []NodeRestoreTestCase{ { diff --git a/pkg/parser/model/BUILD.bazel b/pkg/parser/model/BUILD.bazel index 5ad4d0c5d0640..13c7c1aaa5127 100644 --- a/pkg/parser/model/BUILD.bazel +++ b/pkg/parser/model/BUILD.bazel @@ -2,40 +2,17 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "model", - srcs = [ - "ddl.go", - "flags.go", - "model.go", - "reorg.go", - ], + srcs = ["model.go"], importpath = "github.com/pingcap/tidb/pkg/parser/model", visibility = ["//visibility:public"], - deps = [ - "//pkg/parser/auth", - "//pkg/parser/charset", - "//pkg/parser/duration", - "//pkg/parser/mysql", - "//pkg/parser/terror", - "//pkg/parser/types", - "@com_github_pingcap_errors//:errors", - ], + deps = ["@com_github_pingcap_errors//:errors"], ) go_test( name = "model_test", timeout = "short", - srcs = [ - "ddl_test.go", - "model_test.go", - ], + srcs = ["model_test.go"], embed = [":model"], flaky = True, - shard_count = 23, - deps = [ - "//pkg/parser/charset", - "//pkg/parser/mysql", - "//pkg/parser/terror", - "//pkg/parser/types", - "@com_github_stretchr_testify//require", - ], + deps = ["@com_github_stretchr_testify//require"], ) diff --git a/pkg/parser/model/ddl_test.go b/pkg/parser/model/ddl_test.go deleted file mode 100644 index 43db16008a190..0000000000000 --- a/pkg/parser/model/ddl_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package model_test - -import ( - "testing" - "unsafe" - - "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/stretchr/testify/require" -) - -func TestJobClone(t *testing.T) { - job := &model.Job{ - ID: 100, - Type: model.ActionCreateTable, - SchemaID: 101, - TableID: 102, - SchemaName: "test", - TableName: "t", - State: model.JobStateDone, - MultiSchemaInfo: nil, - } - clone := job.Clone() - require.Equal(t, job.ID, clone.ID) - require.Equal(t, job.Type, clone.Type) - require.Equal(t, job.SchemaID, clone.SchemaID) - require.Equal(t, job.TableID, clone.TableID) - require.Equal(t, job.SchemaName, clone.SchemaName) - require.Equal(t, job.TableName, clone.TableName) - require.Equal(t, job.State, clone.State) - require.Equal(t, job.MultiSchemaInfo, clone.MultiSchemaInfo) -} - -func TestJobSize(t *testing.T) { - msg := `Please make sure that the following methods work as expected: -- SubJob.FromProxyJob() -- SubJob.ToProxyJob() -` - job := model.Job{} - require.Equal(t, 400, int(unsafe.Sizeof(job)), msg) -} - -func TestBackfillMetaCodec(t *testing.T) { - jm := &model.JobMeta{ - SchemaID: 1, - TableID: 2, - Query: "alter table t add index idx(a)", - Priority: 1, - } - bm := &model.BackfillMeta{ - EndInclude: true, - Error: terror.ErrResultUndetermined, - JobMeta: jm, - } - bmBytes, err := bm.Encode() - require.NoError(t, err) - bmRet := &model.BackfillMeta{} - bmRet.Decode(bmBytes) - require.Equal(t, bm, bmRet) -} - -func TestMayNeedReorg(t *testing.T) { - //TODO(bb7133): add more test cases for different ActionType. - reorgJobTypes := []model.ActionType{ - model.ActionReorganizePartition, - model.ActionRemovePartitioning, - model.ActionAlterTablePartitioning, - model.ActionAddIndex, - model.ActionAddPrimaryKey, - } - generalJobTypes := []model.ActionType{ - model.ActionCreateTable, - model.ActionDropTable, - } - job := &model.Job{ - ID: 100, - Type: model.ActionCreateTable, - SchemaID: 101, - TableID: 102, - SchemaName: "test", - TableName: "t", - State: model.JobStateDone, - MultiSchemaInfo: nil, - } - for _, jobType := range reorgJobTypes { - job.Type = jobType - require.True(t, job.MayNeedReorg()) - } - for _, jobType := range generalJobTypes { - job.Type = jobType - require.False(t, job.MayNeedReorg()) - } -} - -func TestActionBDRMap(t *testing.T) { - require.Equal(t, len(model.ActionMap), len(model.ActionBDRMap)) - - totalActions := 0 - for bdrType, actions := range model.BDRActionMap { - for _, action := range actions { - require.Equal(t, bdrType, model.ActionBDRMap[action], "action %s", action) - } - totalActions += len(actions) - } - - require.Equal(t, totalActions, len(model.ActionBDRMap)) -} - -func TestInFinalState(t *testing.T) { - for s, v := range map[model.JobState]bool{ - model.JobStateSynced: true, - model.JobStateCancelled: true, - model.JobStatePaused: true, - model.JobStateCancelling: false, - model.JobStateRollbackDone: false, - } { - require.Equal(t, v, (&model.Job{State: s}).InFinalState()) - } -} diff --git a/pkg/parser/model/model.go b/pkg/parser/model/model.go index 6debb8a59a509..37fa732779fbc 100644 --- a/pkg/parser/model/model.go +++ b/pkg/parser/model/model.go @@ -14,649 +14,13 @@ package model import ( - "bytes" "encoding/json" - "fmt" - "strconv" "strings" - "time" "unsafe" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/duration" - "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/parser/types" ) -// SchemaState is the state for schema elements. -type SchemaState byte - -const ( - // StateNone means this schema element is absent and can't be used. - StateNone SchemaState = iota - // StateDeleteOnly means we can only delete items for this schema element. - StateDeleteOnly - // StateWriteOnly means we can use any write operation on this schema element, - // but outer can't read the changed data. - StateWriteOnly - // StateWriteReorganization means we are re-organizing whole data after write only state. - StateWriteReorganization - // StateDeleteReorganization means we are re-organizing whole data after delete only state. - StateDeleteReorganization - // StatePublic means this schema element is ok for all write and read operations. - StatePublic - // StateReplicaOnly means we're waiting tiflash replica to be finished. - StateReplicaOnly - // StateGlobalTxnOnly means we can only use global txn for operator on this schema element - StateGlobalTxnOnly - /* - * Please add the new state at the end to keep the values consistent across versions. - */ -) - -// String implements fmt.Stringer interface. -func (s SchemaState) String() string { - switch s { - case StateDeleteOnly: - return "delete only" - case StateWriteOnly: - return "write only" - case StateWriteReorganization: - return "write reorganization" - case StateDeleteReorganization: - return "delete reorganization" - case StatePublic: - return "public" - case StateReplicaOnly: - return "replica only" - case StateGlobalTxnOnly: - return "global txn only" - default: - return "none" - } -} - -const ( - // ColumnInfoVersion0 means the column info version is 0. - ColumnInfoVersion0 = uint64(0) - // ColumnInfoVersion1 means the column info version is 1. - ColumnInfoVersion1 = uint64(1) - // ColumnInfoVersion2 means the column info version is 2. - // This is for v2.1.7 to Compatible with older versions charset problem. - // Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8. - // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. - // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. - ColumnInfoVersion2 = uint64(2) - - // CurrLatestColumnInfoVersion means the latest column info in the current TiDB. - CurrLatestColumnInfoVersion = ColumnInfoVersion2 -) - -// ChangeStateInfo is used for recording the information of schema changing. -type ChangeStateInfo struct { - // DependencyColumnOffset is the changing column offset that the current column depends on when executing modify/change column. - DependencyColumnOffset int `json:"relative_col_offset"` -} - -// ColumnInfo provides meta data describing of a table column. -type ColumnInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"name"` - Offset int `json:"offset"` - OriginDefaultValue interface{} `json:"origin_default"` - OriginDefaultValueBit []byte `json:"origin_default_bit"` - DefaultValue interface{} `json:"default"` - DefaultValueBit []byte `json:"default_bit"` - // DefaultIsExpr is indicates the default value string is expr. - DefaultIsExpr bool `json:"default_is_expr"` - GeneratedExprString string `json:"generated_expr_string"` - GeneratedStored bool `json:"generated_stored"` - Dependences map[string]struct{} `json:"dependences"` - FieldType types.FieldType `json:"type"` - State SchemaState `json:"state"` - Comment string `json:"comment"` - // A hidden column is used internally(expression index) and are not accessible by users. - Hidden bool `json:"hidden"` - *ChangeStateInfo `json:"change_state_info"` - // Version means the version of the column info. - // Version = 0: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in system time zone. - // That is a bug if multiple TiDB servers in different system time zone. - // Version = 1: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in UTC time zone. - // This will fix bug in version 0. For compatibility with version 0, we add version field in column info struct. - Version uint64 `json:"version"` -} - -// IsVirtualGenerated checks the column if it is virtual. -func (c *ColumnInfo) IsVirtualGenerated() bool { - return c.IsGenerated() && !c.GeneratedStored -} - -// Clone clones ColumnInfo. -func (c *ColumnInfo) Clone() *ColumnInfo { - if c == nil { - return nil - } - nc := *c - return &nc -} - -// GetType returns the type of ColumnInfo. -func (c *ColumnInfo) GetType() byte { - return c.FieldType.GetType() -} - -// GetFlag returns the flag of ColumnInfo. -func (c *ColumnInfo) GetFlag() uint { - return c.FieldType.GetFlag() -} - -// GetFlen returns the flen of ColumnInfo. -func (c *ColumnInfo) GetFlen() int { - return c.FieldType.GetFlen() -} - -// GetDecimal returns the decimal of ColumnInfo. -func (c *ColumnInfo) GetDecimal() int { - return c.FieldType.GetDecimal() -} - -// GetCharset returns the charset of ColumnInfo. -func (c *ColumnInfo) GetCharset() string { - return c.FieldType.GetCharset() -} - -// GetCollate returns the collation of ColumnInfo. -func (c *ColumnInfo) GetCollate() string { - return c.FieldType.GetCollate() -} - -// GetElems returns the elems of ColumnInfo. -func (c *ColumnInfo) GetElems() []string { - return c.FieldType.GetElems() -} - -// SetType set the type of ColumnInfo. -func (c *ColumnInfo) SetType(tp byte) { - c.FieldType.SetType(tp) -} - -// SetFlag set the flag of ColumnInfo. -func (c *ColumnInfo) SetFlag(flag uint) { - c.FieldType.SetFlag(flag) -} - -// AddFlag adds the flag of ColumnInfo. -func (c *ColumnInfo) AddFlag(flag uint) { - c.FieldType.AddFlag(flag) -} - -// AndFlag adds a flag to the column. -func (c *ColumnInfo) AndFlag(flag uint) { - c.FieldType.AndFlag(flag) -} - -// ToggleFlag flips the flag according to the value. -func (c *ColumnInfo) ToggleFlag(flag uint) { - c.FieldType.ToggleFlag(flag) -} - -// DelFlag removes the flag from the column's flag. -func (c *ColumnInfo) DelFlag(flag uint) { - c.FieldType.DelFlag(flag) -} - -// SetFlen sets the flen of ColumnInfo. -func (c *ColumnInfo) SetFlen(flen int) { - c.FieldType.SetFlen(flen) -} - -// SetDecimal sets the decimal of ColumnInfo. -func (c *ColumnInfo) SetDecimal(decimal int) { - c.FieldType.SetDecimal(decimal) -} - -// SetCharset sets charset of the ColumnInfo -func (c *ColumnInfo) SetCharset(charset string) { - c.FieldType.SetCharset(charset) -} - -// SetCollate sets the collation of the column. -func (c *ColumnInfo) SetCollate(collate string) { - c.FieldType.SetCollate(collate) -} - -// SetElems set the elements of enum column. -func (c *ColumnInfo) SetElems(elems []string) { - c.FieldType.SetElems(elems) -} - -// IsGenerated returns true if the column is generated column. -func (c *ColumnInfo) IsGenerated() bool { - return len(c.GeneratedExprString) != 0 -} - -// SetOriginDefaultValue sets the origin default value. -// For mysql.TypeBit type, the default value storage format must be a string. -// Other value such as int must convert to string format first. -// The mysql.TypeBit type supports the null default value. -func (c *ColumnInfo) SetOriginDefaultValue(value interface{}) error { - c.OriginDefaultValue = value - if c.GetType() == mysql.TypeBit { - if value == nil { - return nil - } - if v, ok := value.(string); ok { - c.OriginDefaultValueBit = []byte(v) - return nil - } - return types.ErrInvalidDefault.GenWithStackByArgs(c.Name) - } - return nil -} - -// GetOriginDefaultValue gets the origin default value. -func (c *ColumnInfo) GetOriginDefaultValue() interface{} { - if c.GetType() == mysql.TypeBit && c.OriginDefaultValueBit != nil { - // If the column type is BIT, both `OriginDefaultValue` and `DefaultValue` of ColumnInfo are corrupted, - // because the content before json.Marshal is INCONSISTENT with the content after json.Unmarshal. - return string(c.OriginDefaultValueBit) - } - return c.OriginDefaultValue -} - -// SetDefaultValue sets the default value. -func (c *ColumnInfo) SetDefaultValue(value interface{}) error { - c.DefaultValue = value - if c.GetType() == mysql.TypeBit { - // For mysql.TypeBit type, the default value storage format must be a string. - // Other value such as int must convert to string format first. - // The mysql.TypeBit type supports the null default value. - if value == nil { - return nil - } - if v, ok := value.(string); ok { - c.DefaultValueBit = []byte(v) - return nil - } - return types.ErrInvalidDefault.GenWithStackByArgs(c.Name) - } - return nil -} - -// GetDefaultValue gets the default value of the column. -// Default value use to stored in DefaultValue field, but now, -// bit type default value will store in DefaultValueBit for fix bit default value decode/encode bug. -func (c *ColumnInfo) GetDefaultValue() interface{} { - if c.GetType() == mysql.TypeBit && c.DefaultValueBit != nil { - return string(c.DefaultValueBit) - } - return c.DefaultValue -} - -// GetTypeDesc gets the description for column type. -func (c *ColumnInfo) GetTypeDesc() string { - desc := c.FieldType.CompactStr() - if mysql.HasUnsignedFlag(c.GetFlag()) && c.GetType() != mysql.TypeBit && c.GetType() != mysql.TypeYear { - desc += " unsigned" - } - if mysql.HasZerofillFlag(c.GetFlag()) && c.GetType() != mysql.TypeYear { - desc += " zerofill" - } - return desc -} - -// EmptyColumnInfoSize is the memory usage of ColumnInfoSize -const EmptyColumnInfoSize = int64(unsafe.Sizeof(ColumnInfo{})) - -// FindColumnInfo finds ColumnInfo in cols by name. -func FindColumnInfo(cols []*ColumnInfo, name string) *ColumnInfo { - name = strings.ToLower(name) - for _, col := range cols { - if col.Name.L == name { - return col - } - } - return nil -} - -// FindColumnInfoByID finds ColumnInfo in cols by id. -func FindColumnInfoByID(cols []*ColumnInfo, id int64) *ColumnInfo { - for _, col := range cols { - if col.ID == id { - return col - } - } - return nil -} - -// FindIndexInfoByID finds IndexInfo in indices by id. -func FindIndexInfoByID(indices []*IndexInfo, id int64) *IndexInfo { - for _, idx := range indices { - if idx.ID == id { - return idx - } - } - return nil -} - -// FindFKInfoByName finds FKInfo in fks by lowercase name. -func FindFKInfoByName(fks []*FKInfo, name string) *FKInfo { - for _, fk := range fks { - if fk.Name.L == name { - return fk - } - } - return nil -} - -// FindIndexByColumns find IndexInfo in indices which is cover the specified columns. -func FindIndexByColumns(tbInfo *TableInfo, indices []*IndexInfo, cols ...CIStr) *IndexInfo { - for _, index := range indices { - if IsIndexPrefixCovered(tbInfo, index, cols...) { - return index - } - } - return nil -} - -// IsIndexPrefixCovered checks the index's columns beginning with the cols. -func IsIndexPrefixCovered(tbInfo *TableInfo, index *IndexInfo, cols ...CIStr) bool { - if len(index.Columns) < len(cols) { - return false - } - for i := range cols { - if cols[i].L != index.Columns[i].Name.L || - index.Columns[i].Offset >= len(tbInfo.Columns) { - return false - } - colInfo := tbInfo.Columns[index.Columns[i].Offset] - if index.Columns[i].Length != types.UnspecifiedLength && index.Columns[i].Length < colInfo.GetFlen() { - return false - } - } - return true -} - -// ExtraHandleID is the column ID of column which we need to append to schema to occupy the handle's position -// for use of execution phase. -const ExtraHandleID = -1 - -// Deprecated: Use ExtraPhysTblID instead. -// const ExtraPidColID = -2 - -// ExtraPhysTblID is the column ID of column that should be filled in with the physical table id. -// Primarily used for table partition dynamic prune mode, to return which partition (physical table id) the row came from. -// If used with a global index, the partition ID decoded from the key value will be filled in. -const ExtraPhysTblID = -3 - -// ExtraRowChecksumID is the column ID of column which holds the row checksum info. -const ExtraRowChecksumID = -4 - -const ( - // TableInfoVersion0 means the table info version is 0. - // Upgrade from v2.1.1 or v2.1.2 to v2.1.3 and later, and then execute a "change/modify column" statement - // that does not specify a charset value for column. Then the following error may be reported: - // ERROR 1105 (HY000): unsupported modify charset from utf8mb4 to utf8. - // To eliminate this error, we will not modify the charset of this column - // when executing a change/modify column statement that does not specify a charset value for column. - // This behavior is not compatible with MySQL. - TableInfoVersion0 = uint16(0) - // TableInfoVersion1 means the table info version is 1. - // When we execute a change/modify column statement that does not specify a charset value for column, - // we set the charset of this column to the charset of table. This behavior is compatible with MySQL. - TableInfoVersion1 = uint16(1) - // TableInfoVersion2 means the table info version is 2. - // This is for v2.1.7 to Compatible with older versions charset problem. - // Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8. - // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. - // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. - TableInfoVersion2 = uint16(2) - // TableInfoVersion3 means the table info version is 3. - // This version aims to deal with upper-cased charset name in TableInfo stored by versions prior to TiDB v2.1.9: - // TiDB always suppose all charsets / collations as lower-cased and try to convert them if they're not. - // However, the convert is missed in some scenarios before v2.1.9, so for all those tables prior to TableInfoVersion3, their - // charsets / collations will be converted to lower-case while loading from the storage. - TableInfoVersion3 = uint16(3) - // TableInfoVersion4 is not used. - TableInfoVersion4 = uint16(4) - // TableInfoVersion5 indicates that the auto_increment allocator in TiDB has been separated from - // _tidb_rowid allocator when AUTO_ID_CACHE is 1. This version is introduced to preserve the compatibility of old tables: - // the tables with version <= TableInfoVersion4 still use a single allocator for auto_increment and _tidb_rowid. - // Also see https://github.com/pingcap/tidb/issues/982. - TableInfoVersion5 = uint16(5) - - // CurrLatestTableInfoVersion means the latest table info in the current TiDB. - CurrLatestTableInfoVersion = TableInfoVersion5 -) - -// ExtraHandleName is the name of ExtraHandle Column. -var ExtraHandleName = NewCIStr("_tidb_rowid") - -// Deprecated: Use ExtraPhysTblIdName instead. -// var ExtraPartitionIdName = NewCIStr("_tidb_pid") //nolint:revive - -// ExtraPhysTblIdName is the name of ExtraPhysTblID Column. -var ExtraPhysTblIdName = NewCIStr("_tidb_tid") //nolint:revive - -// TableInfo provides meta data describing a DB table. -type TableInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"name"` - Charset string `json:"charset"` - Collate string `json:"collate"` - // Columns are listed in the order in which they appear in the schema. - Columns []*ColumnInfo `json:"cols"` - Indices []*IndexInfo `json:"index_info"` - Constraints []*ConstraintInfo `json:"constraint_info"` - ForeignKeys []*FKInfo `json:"fk_info"` - State SchemaState `json:"state"` - // PKIsHandle is true when primary key is a single integer column. - PKIsHandle bool `json:"pk_is_handle"` - // IsCommonHandle is true when clustered index feature is - // enabled and the primary key is not a single integer column. - IsCommonHandle bool `json:"is_common_handle"` - // CommonHandleVersion is the version of the clustered index. - // 0 for the clustered index created == 5.0.0 RC. - // 1 for the clustered index created > 5.0.0 RC. - CommonHandleVersion uint16 `json:"common_handle_version"` - - Comment string `json:"comment"` - AutoIncID int64 `json:"auto_inc_id"` - - // Only used by BR when: - // 1. SepAutoInc() is true - // 2. The table is nonclustered and has auto_increment column. - // In that case, both auto_increment_id and tidb_rowid need to be backup & recover. - // See also https://github.com/pingcap/tidb/issues/46093 - // - // It should have been named TiDBRowID, but for historial reasons, we do not use separate meta key for _tidb_rowid and auto_increment_id, - // and field `AutoIncID` is used to serve both _tidb_rowid and auto_increment_id. - // If we introduce a TiDBRowID here, it could make furthur misunderstanding: - // in most cases, AutoIncID is _tidb_rowid and TiDBRowID is null - // but in some cases, AutoIncID is auto_increment_id and TiDBRowID is _tidb_rowid - // So let's just use another name AutoIncIDExtra to avoid misconception. - AutoIncIDExtra int64 `json:"auto_inc_id_extra,omitempty"` - - AutoIdCache int64 `json:"auto_id_cache"` //nolint:revive - AutoRandID int64 `json:"auto_rand_id"` - MaxColumnID int64 `json:"max_col_id"` - MaxIndexID int64 `json:"max_idx_id"` - MaxForeignKeyID int64 `json:"max_fk_id"` - MaxConstraintID int64 `json:"max_cst_id"` - // UpdateTS is used to record the timestamp of updating the table's schema information. - // These changing schema operations don't include 'truncate table' and 'rename table'. - UpdateTS uint64 `json:"update_timestamp"` - // OldSchemaID : - // Because auto increment ID has schemaID as prefix, - // We need to save original schemaID to keep autoID unchanged - // while renaming a table from one database to another. - // Only set if table has been renamed across schemas - // Old name 'old_schema_id' is kept for backwards compatibility - AutoIDSchemaID int64 `json:"old_schema_id,omitempty"` - - // ShardRowIDBits specify if the implicit row ID is sharded. - ShardRowIDBits uint64 - // MaxShardRowIDBits uses to record the max ShardRowIDBits be used so far. - MaxShardRowIDBits uint64 `json:"max_shard_row_id_bits"` - // AutoRandomBits is used to set the bit number to shard automatically when PKIsHandle. - AutoRandomBits uint64 `json:"auto_random_bits"` - // AutoRandomRangeBits represents the bit number of the int primary key that will be used by TiDB. - AutoRandomRangeBits uint64 `json:"auto_random_range_bits"` - // PreSplitRegions specify the pre-split region when create table. - // The pre-split region num is 2^(PreSplitRegions-1). - // And the PreSplitRegions should less than or equal to ShardRowIDBits. - PreSplitRegions uint64 `json:"pre_split_regions"` - - Partition *PartitionInfo `json:"partition"` - - Compression string `json:"compression"` - - View *ViewInfo `json:"view"` - - Sequence *SequenceInfo `json:"sequence"` - - // Lock represent the table lock info. - Lock *TableLockInfo `json:"Lock"` - - // Version means the version of the table info. - Version uint16 `json:"version"` - - // TiFlashReplica means the TiFlash replica info. - TiFlashReplica *TiFlashReplicaInfo `json:"tiflash_replica"` - - // IsColumnar means the table is column-oriented. - // It's true when the engine of the table is TiFlash only. - IsColumnar bool `json:"is_columnar"` - - TempTableType `json:"temp_table_type"` - TableCacheStatusType `json:"cache_table_status"` - PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"` - - // StatsOptions is used when do analyze/auto-analyze for each table - StatsOptions *StatsOptions `json:"stats_options"` - - ExchangePartitionInfo *ExchangePartitionInfo `json:"exchange_partition_info"` - - TTLInfo *TTLInfo `json:"ttl_info"` - - // Revision is per table schema's version, it will be increased when the schema changed. - Revision uint64 `json:"revision"` - - DBID int64 `json:"-"` -} - -// TableNameInfo provides meta data describing a table name info. -type TableNameInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"name"` -} - -// SepAutoInc decides whether _rowid and auto_increment id use separate allocator. -func (t *TableInfo) SepAutoInc() bool { - return t.Version >= TableInfoVersion5 && t.AutoIdCache == 1 -} - -// TableCacheStatusType is the type of the table cache status -type TableCacheStatusType int - -//revive:disable:exported -const ( - TableCacheStatusDisable TableCacheStatusType = iota - TableCacheStatusEnable - TableCacheStatusSwitching -) - -//revive:enable:exported - -func (t TableCacheStatusType) String() string { - switch t { - case TableCacheStatusDisable: - return "disable" - case TableCacheStatusEnable: - return "enable" - case TableCacheStatusSwitching: - return "switching" - default: - return "" - } -} - -// TempTableType is the type of the temp table -type TempTableType byte - -//revive:disable:exported -const ( - TempTableNone TempTableType = iota - TempTableGlobal - TempTableLocal -) - -//revive:enable:exported - -func (t TempTableType) String() string { - switch t { - case TempTableGlobal: - return "global" - case TempTableLocal: - return "local" - default: - return "" - } -} - -// TableLockInfo provides meta data describing a table lock. -type TableLockInfo struct { - Tp TableLockType - // Use array because there may be multiple sessions holding the same read lock. - Sessions []SessionInfo - State TableLockState - // TS is used to record the timestamp this table lock been locked. - TS uint64 -} - -// SessionInfo contain the session ID and the server ID. -type SessionInfo struct { - ServerID string - SessionID uint64 -} - -func (s SessionInfo) String() string { - return "server: " + s.ServerID + "_session: " + strconv.FormatUint(s.SessionID, 10) -} - -// TableLockTpInfo is composed by schema ID, table ID and table lock type. -type TableLockTpInfo struct { - SchemaID int64 - TableID int64 - Tp TableLockType -} - -// TableLockState is the state for table lock. -type TableLockState byte - -const ( - // TableLockStateNone means this table lock is absent. - TableLockStateNone TableLockState = iota - // TableLockStatePreLock means this table lock is pre-lock state. Other session doesn't hold this lock should't do corresponding operation according to the lock type. - TableLockStatePreLock - // TableLockStatePublic means this table lock is public state. - TableLockStatePublic -) - -// String implements fmt.Stringer interface. -func (t TableLockState) String() string { - switch t { - case TableLockStatePreLock: - return "pre-lock" - case TableLockStatePublic: - return "public" - default: - return "none" - } -} - // TableLockType is the type of the table lock. type TableLockType byte @@ -679,6 +43,7 @@ const ( TableLockWriteLocal ) +// String implements fmt.Stringer interface. func (t TableLockType) String() string { switch t { case TableLockNone: @@ -697,352 +62,18 @@ func (t TableLockType) String() string { return "" } -// TiFlashReplicaInfo means the flash replica info. -type TiFlashReplicaInfo struct { - Count uint64 - LocationLabels []string - Available bool - AvailablePartitionIDs []int64 -} - -// IsPartitionAvailable checks whether the partition table replica was available. -func (tr *TiFlashReplicaInfo) IsPartitionAvailable(pid int64) bool { - for _, id := range tr.AvailablePartitionIDs { - if id == pid { - return true - } - } - return false -} - -// GetPartitionInfo returns the partition information. -func (t *TableInfo) GetPartitionInfo() *PartitionInfo { - if t.Partition != nil && t.Partition.Enable { - return t.Partition - } - return nil -} - -// GetUpdateTime gets the table's updating time. -func (t *TableInfo) GetUpdateTime() time.Time { - return TSConvert2Time(t.UpdateTS) -} - -// GetAutoIDSchemaID returns the schema ID that was used to create an allocator. -func (t *TableInfo) GetAutoIDSchemaID(dbID int64) int64 { - if t.AutoIDSchemaID != 0 { - return t.AutoIDSchemaID - } - return dbID -} - -// Clone clones TableInfo. -func (t *TableInfo) Clone() *TableInfo { - nt := *t - nt.Columns = make([]*ColumnInfo, len(t.Columns)) - nt.Indices = make([]*IndexInfo, len(t.Indices)) - nt.ForeignKeys = make([]*FKInfo, len(t.ForeignKeys)) - - for i := range t.Columns { - nt.Columns[i] = t.Columns[i].Clone() - } - - for i := range t.Indices { - nt.Indices[i] = t.Indices[i].Clone() - } - - for i := range t.ForeignKeys { - nt.ForeignKeys[i] = t.ForeignKeys[i].Clone() - } - - if t.Partition != nil { - nt.Partition = t.Partition.Clone() - } - if t.TTLInfo != nil { - nt.TTLInfo = t.TTLInfo.Clone() - } - - return &nt -} - -// GetPkName will return the pk name if pk exists. -func (t *TableInfo) GetPkName() CIStr { - for _, colInfo := range t.Columns { - if mysql.HasPriKeyFlag(colInfo.GetFlag()) { - return colInfo.Name - } - } - return CIStr{} -} - -// GetPkColInfo gets the ColumnInfo of pk if exists. -// Make sure PkIsHandle checked before call this method. -func (t *TableInfo) GetPkColInfo() *ColumnInfo { - for _, colInfo := range t.Columns { - if mysql.HasPriKeyFlag(colInfo.GetFlag()) { - return colInfo - } - } - return nil -} - -// GetAutoIncrementColInfo gets the ColumnInfo of auto_increment column if exists. -func (t *TableInfo) GetAutoIncrementColInfo() *ColumnInfo { - for _, colInfo := range t.Columns { - if mysql.HasAutoIncrementFlag(colInfo.GetFlag()) { - return colInfo - } - } - return nil -} - -// IsAutoIncColUnsigned checks whether the auto increment column is unsigned. -func (t *TableInfo) IsAutoIncColUnsigned() bool { - col := t.GetAutoIncrementColInfo() - if col == nil { - return false - } - return mysql.HasUnsignedFlag(col.GetFlag()) -} - -// ContainsAutoRandomBits indicates whether a table contains auto_random column. -func (t *TableInfo) ContainsAutoRandomBits() bool { - return t.AutoRandomBits != 0 -} - -// IsAutoRandomBitColUnsigned indicates whether the auto_random column is unsigned. Make sure the table contains auto_random before calling this method. -func (t *TableInfo) IsAutoRandomBitColUnsigned() bool { - if !t.PKIsHandle || t.AutoRandomBits == 0 { - return false - } - return mysql.HasUnsignedFlag(t.GetPkColInfo().GetFlag()) -} - -// Cols returns the columns of the table in public state. -func (t *TableInfo) Cols() []*ColumnInfo { - publicColumns := make([]*ColumnInfo, len(t.Columns)) - maxOffset := -1 - for _, col := range t.Columns { - if col.State != StatePublic { - continue - } - publicColumns[col.Offset] = col - if maxOffset < col.Offset { - maxOffset = col.Offset - } - } - return publicColumns[0 : maxOffset+1] -} - -// GetColumnByID finds the column by ID. -func (t *TableInfo) GetColumnByID(id int64) *ColumnInfo { - for _, col := range t.Columns { - if col.State != StatePublic { - continue - } - if col.ID == id { - return col - } - } - return nil -} - -// FindIndexByName finds index by name. -func (t *TableInfo) FindIndexByName(idxName string) *IndexInfo { - for _, idx := range t.Indices { - if idx.Name.L == idxName { - return idx - } - } - return nil -} - -// FindPublicColumnByName finds the public column by name. -func (t *TableInfo) FindPublicColumnByName(colNameL string) *ColumnInfo { - for _, col := range t.Cols() { - if col.Name.L == colNameL { - return col - } - } - return nil -} - -// IsLocked checks whether the table was locked. -func (t *TableInfo) IsLocked() bool { - return t.Lock != nil && len(t.Lock.Sessions) > 0 -} - -// MoveColumnInfo moves a column to another offset. It maintains the offsets of all affects columns and index columns, -func (t *TableInfo) MoveColumnInfo(from, to int) { - if from == to { - return - } - updatedOffsets := make(map[int]int) - src := t.Columns[from] - if from < to { - for i := from; i < to; i++ { - t.Columns[i] = t.Columns[i+1] - t.Columns[i].Offset = i - updatedOffsets[i+1] = i - } - } else if from > to { - for i := from; i > to; i-- { - t.Columns[i] = t.Columns[i-1] - t.Columns[i].Offset = i - updatedOffsets[i-1] = i - } - } - t.Columns[to] = src - t.Columns[to].Offset = to - updatedOffsets[from] = to - for _, idx := range t.Indices { - for _, idxCol := range idx.Columns { - newOffset, ok := updatedOffsets[idxCol.Offset] - if ok { - idxCol.Offset = newOffset - } - } - } -} - -// ClearPlacement clears all table and partitions' placement settings -func (t *TableInfo) ClearPlacement() { - t.PlacementPolicyRef = nil - if t.Partition != nil { - for i := range t.Partition.Definitions { - def := &t.Partition.Definitions[i] - def.PlacementPolicyRef = nil - } - } -} - -// NewExtraHandleColInfo mocks a column info for extra handle column. -func NewExtraHandleColInfo() *ColumnInfo { - colInfo := &ColumnInfo{ - ID: ExtraHandleID, - Name: ExtraHandleName, - } - - colInfo.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag) - colInfo.SetType(mysql.TypeLonglong) - - flen, decimal := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeLonglong) - colInfo.SetFlen(flen) - colInfo.SetDecimal(decimal) - - colInfo.SetCharset(charset.CharsetBin) - colInfo.SetCollate(charset.CollationBin) - return colInfo -} - -// NewExtraPhysTblIDColInfo mocks a column info for extra partition id column. -func NewExtraPhysTblIDColInfo() *ColumnInfo { - colInfo := &ColumnInfo{ - ID: ExtraPhysTblID, - Name: ExtraPhysTblIdName, - } - colInfo.SetType(mysql.TypeLonglong) - flen, decimal := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeLonglong) - colInfo.SetFlen(flen) - colInfo.SetDecimal(decimal) - colInfo.SetCharset(charset.CharsetBin) - colInfo.SetCollate(charset.CollationBin) - return colInfo -} - -// GetPrimaryKey extract the primary key in a table and return `IndexInfo` -// The returned primary key could be explicit or implicit. -// If there is no explicit primary key in table, -// the first UNIQUE INDEX on NOT NULL columns will be the implicit primary key. -// For more information about implicit primary key, see -// https://dev.mysql.com/doc/refman/8.0/en/invisible-indexes.html -func (t *TableInfo) GetPrimaryKey() *IndexInfo { - var implicitPK *IndexInfo - - for _, key := range t.Indices { - if key.Primary { - // table has explicit primary key - return key - } - // The case index without any columns should never happen, but still do a check here - if len(key.Columns) == 0 { - continue - } - // find the first unique key with NOT NULL columns - if implicitPK == nil && key.Unique { - // ensure all columns in unique key have NOT NULL flag - allColNotNull := true - skip := false - for _, idxCol := range key.Columns { - col := FindColumnInfo(t.Cols(), idxCol.Name.L) - // This index has a column in DeleteOnly state, - // or it is expression index (it defined on a hidden column), - // it can not be implicit PK, go to next index iterator - if col == nil || col.Hidden { - skip = true - break - } - if !mysql.HasNotNullFlag(col.GetFlag()) { - allColNotNull = false - break - } - } - if skip { - continue - } - if allColNotNull { - implicitPK = key - } - } - } - return implicitPK -} - -// ColumnIsInIndex checks whether c is included in any indices of t. -func (t *TableInfo) ColumnIsInIndex(c *ColumnInfo) bool { - for _, index := range t.Indices { - for _, column := range index.Columns { - if column.Name.L == c.Name.L { - return true - } - } - } - return false -} - -// HasClusteredIndex checks whether the table has a clustered index. -func (t *TableInfo) HasClusteredIndex() bool { - return t.PKIsHandle || t.IsCommonHandle -} - -// IsView checks if TableInfo is a view. -func (t *TableInfo) IsView() bool { - return t.View != nil -} - -// IsSequence checks if TableInfo is a sequence. -func (t *TableInfo) IsSequence() bool { - return t.Sequence != nil -} - -// IsBaseTable checks to see the table is neither a view or a sequence. -func (t *TableInfo) IsBaseTable() bool { - return t.Sequence == nil && t.View == nil -} - // ViewAlgorithm is VIEW's SQL ALGORITHM characteristic. // See https://dev.mysql.com/doc/refman/5.7/en/view-algorithms.html type ViewAlgorithm int -//revive:disable:exported +// ViewAlgorithm values. const ( AlgorithmUndefined ViewAlgorithm = iota AlgorithmMerge AlgorithmTemptable ) -//revive:enable:exported - +// String implements fmt.Stringer interface. func (v *ViewAlgorithm) String() string { switch *v { case AlgorithmMerge: @@ -1060,14 +91,13 @@ func (v *ViewAlgorithm) String() string { // See https://dev.mysql.com/doc/refman/5.7/en/create-view.html type ViewSecurity int -//revive:disable:exported +// ViewSecurity values. const ( SecurityDefiner ViewSecurity = iota SecurityInvoker ) -//revive:enable:exported - +// String implements fmt.Stringer interface. func (v *ViewSecurity) String() string { switch *v { case SecurityInvoker: @@ -1083,350 +113,65 @@ func (v *ViewSecurity) String() string { // See https://dev.mysql.com/doc/refman/5.7/en/view-check-option.html type ViewCheckOption int -//revive:disable:exported -const ( - CheckOptionLocal ViewCheckOption = iota - CheckOptionCascaded -) - -//revive:enable:exported - -func (v *ViewCheckOption) String() string { - switch *v { - case CheckOptionLocal: - return "LOCAL" - case CheckOptionCascaded: - return "CASCADED" - default: - return "CASCADED" - } -} - -// ViewInfo provides meta data describing a DB view. -// -//revive:disable:exported -type ViewInfo struct { - Algorithm ViewAlgorithm `json:"view_algorithm"` - Definer *auth.UserIdentity `json:"view_definer"` - Security ViewSecurity `json:"view_security"` - SelectStmt string `json:"view_select"` - CheckOption ViewCheckOption `json:"view_checkoption"` - Cols []CIStr `json:"view_cols"` -} - -const ( - DefaultSequenceCacheBool = true - DefaultSequenceCycleBool = false - DefaultSequenceOrderBool = false - DefaultSequenceCacheValue = int64(1000) - DefaultSequenceIncrementValue = int64(1) - DefaultPositiveSequenceStartValue = int64(1) - DefaultNegativeSequenceStartValue = int64(-1) - DefaultPositiveSequenceMinValue = int64(1) - DefaultPositiveSequenceMaxValue = int64(9223372036854775806) - DefaultNegativeSequenceMaxValue = int64(-1) - DefaultNegativeSequenceMinValue = int64(-9223372036854775807) -) - -// SequenceInfo provide meta data describing a DB sequence. -type SequenceInfo struct { - Start int64 `json:"sequence_start"` - Cache bool `json:"sequence_cache"` - Cycle bool `json:"sequence_cycle"` - MinValue int64 `json:"sequence_min_value"` - MaxValue int64 `json:"sequence_max_value"` - Increment int64 `json:"sequence_increment"` - CacheValue int64 `json:"sequence_cache_value"` - Comment string `json:"sequence_comment"` -} - -//revive:enable:exported - -// PartitionType is the type for PartitionInfo -type PartitionType int - -// Partition types. -const ( - // Actually non-partitioned, but during DDL keeping the table as - // a single partition - PartitionTypeNone PartitionType = 0 - - PartitionTypeRange PartitionType = 1 - PartitionTypeHash PartitionType = 2 - PartitionTypeList PartitionType = 3 - PartitionTypeKey PartitionType = 4 - PartitionTypeSystemTime PartitionType = 5 -) - -func (p PartitionType) String() string { - switch p { - case PartitionTypeRange: - return "RANGE" - case PartitionTypeHash: - return "HASH" - case PartitionTypeList: - return "LIST" - case PartitionTypeKey: - return "KEY" - case PartitionTypeSystemTime: - return "SYSTEM_TIME" - case PartitionTypeNone: - return "NONE" - default: - return "" - } -} - -// ExchangePartitionInfo provides exchange partition info. -type ExchangePartitionInfo struct { - // It is nt tableID when table which has the info is a partition table, else pt tableID. - ExchangePartitionTableID int64 `json:"exchange_partition_id"` - ExchangePartitionDefID int64 `json:"exchange_partition_def_id"` - // Deprecated, not used - XXXExchangePartitionFlag bool `json:"exchange_partition_flag"` -} - -// UpdateIndexInfo is to carry the entries in the list of indexes in UPDATE INDEXES -// during ALTER TABLE t PARTITION BY ... UPDATE INDEXES (idx_a GLOBAL, idx_b LOCAL...) -type UpdateIndexInfo struct { - IndexName string `json:"index_name"` - Global bool `json:"global"` -} - -// PartitionInfo provides table partition info. -type PartitionInfo struct { - Type PartitionType `json:"type"` - Expr string `json:"expr"` - Columns []CIStr `json:"columns"` - - // User may already create table with partition but table partition is not - // yet supported back then. When Enable is true, write/read need use tid - // rather than pid. - Enable bool `json:"enable"` - - // IsEmptyColumns is for syntax like `partition by key()`. - // When IsEmptyColums is true, it will not display column name in `show create table` stmt. - IsEmptyColumns bool `json:"is_empty_columns"` - - Definitions []PartitionDefinition `json:"definitions"` - // AddingDefinitions is filled when adding partitions that is in the mid state. - AddingDefinitions []PartitionDefinition `json:"adding_definitions"` - // DroppingDefinitions is filled when dropping/truncating partitions that is in the mid state. - DroppingDefinitions []PartitionDefinition `json:"dropping_definitions"` - // NewPartitionIDs is filled when truncating partitions that is in the mid state. - NewPartitionIDs []int64 - - States []PartitionState `json:"states"` - Num uint64 `json:"num"` - // Only used during ReorganizePartition so far - DDLState SchemaState `json:"ddl_state"` - // Set during ALTER TABLE ... if the table id needs to change - // like if there is a global index or going between non-partitioned - // and partitioned table, to make the data dropping / range delete - // optimized. - NewTableID int64 `json:"new_table_id"` - // Set during ALTER TABLE ... PARTITION BY ... - // First as the new partition scheme, then in StateDeleteReorg as the old - DDLType PartitionType `json:"ddl_type"` - DDLExpr string `json:"ddl_expr"` - DDLColumns []CIStr `json:"ddl_columns"` - // For ActionAlterTablePartitioning, UPDATE INDEXES - DDLUpdateIndexes []UpdateIndexInfo `json:"ddl_update_indexes"` -} - -// Clone clones itself. -func (pi *PartitionInfo) Clone() *PartitionInfo { - newPi := *pi - newPi.Columns = make([]CIStr, len(pi.Columns)) - copy(newPi.Columns, pi.Columns) - - newPi.Definitions = make([]PartitionDefinition, len(pi.Definitions)) - for i := range pi.Definitions { - newPi.Definitions[i] = pi.Definitions[i].Clone() - } - - newPi.AddingDefinitions = make([]PartitionDefinition, len(pi.AddingDefinitions)) - for i := range pi.AddingDefinitions { - newPi.AddingDefinitions[i] = pi.AddingDefinitions[i].Clone() - } - - newPi.DroppingDefinitions = make([]PartitionDefinition, len(pi.DroppingDefinitions)) - for i := range pi.DroppingDefinitions { - newPi.DroppingDefinitions[i] = pi.DroppingDefinitions[i].Clone() - } - - return &newPi -} - -// GetNameByID gets the partition name by ID. -// TODO: Remove the need for this function! -func (pi *PartitionInfo) GetNameByID(id int64) string { - definitions := pi.Definitions - // do not convert this loop to `for _, def := range definitions`. - // see https://github.com/pingcap/parser/pull/1072 for the benchmark. - for i := range definitions { - if id == definitions[i].ID { - return definitions[i].Name.O - } - } - return "" -} - -// GetStateByID gets the partition state by ID. -func (pi *PartitionInfo) GetStateByID(id int64) SchemaState { - for _, pstate := range pi.States { - if pstate.ID == id { - return pstate.State - } - } - return StatePublic -} - -// SetStateByID sets the state of the partition by ID. -func (pi *PartitionInfo) SetStateByID(id int64, state SchemaState) { - newState := PartitionState{ID: id, State: state} - for i, pstate := range pi.States { - if pstate.ID == id { - pi.States[i] = newState - return - } - } - if pi.States == nil { - pi.States = make([]PartitionState, 0, 1) - } - pi.States = append(pi.States, newState) -} - -// GCPartitionStates cleans up the partition state. -func (pi *PartitionInfo) GCPartitionStates() { - if len(pi.States) < 1 { - return - } - newStates := make([]PartitionState, 0, len(pi.Definitions)) - for _, state := range pi.States { - found := false - for _, def := range pi.Definitions { - if def.ID == state.ID { - found = true - break - } - } - if found { - newStates = append(newStates, state) - } - } - pi.States = newStates -} - -// HasTruncatingPartitionID checks whether the pid is truncating. -func (pi *PartitionInfo) HasTruncatingPartitionID(pid int64) bool { - for i := range pi.NewPartitionIDs { - if pi.NewPartitionIDs[i] == pid { - return true - } - } - return false -} - -// ClearReorgIntermediateInfo remove intermediate information used during reorganize partition. -func (pi *PartitionInfo) ClearReorgIntermediateInfo() { - pi.DDLType = PartitionTypeNone - pi.DDLExpr = "" - pi.DDLColumns = nil - pi.NewTableID = 0 -} - -// PartitionState is the state of the partition. -type PartitionState struct { - ID int64 `json:"id"` - State SchemaState `json:"state"` -} - -// PartitionDefinition defines a single partition. -type PartitionDefinition struct { - ID int64 `json:"id"` - Name CIStr `json:"name"` - LessThan []string `json:"less_than"` - InValues [][]string `json:"in_values"` - PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"` - Comment string `json:"comment,omitempty"` -} - -// Clone clones ConstraintInfo. -func (ci *PartitionDefinition) Clone() PartitionDefinition { - nci := *ci - nci.LessThan = make([]string, len(ci.LessThan)) - copy(nci.LessThan, ci.LessThan) - return nci -} - -const emptyPartitionDefinitionSize = int64(unsafe.Sizeof(PartitionState{})) - -// MemoryUsage return the memory usage of PartitionDefinition -func (ci *PartitionDefinition) MemoryUsage() (sum int64) { - if ci == nil { - return - } - - sum = emptyPartitionDefinitionSize + ci.Name.MemoryUsage() - if ci.PlacementPolicyRef != nil { - sum += int64(unsafe.Sizeof(ci.PlacementPolicyRef.ID)) + ci.PlacementPolicyRef.Name.MemoryUsage() - } +// ViewCheckOption values. +const ( + CheckOptionLocal ViewCheckOption = iota + CheckOptionCascaded +) - for _, str := range ci.LessThan { - sum += int64(len(str)) - } - for _, strs := range ci.InValues { - for _, str := range strs { - sum += int64(len(str)) - } +// String implements fmt.Stringer interface. +func (v *ViewCheckOption) String() string { + switch *v { + case CheckOptionLocal: + return "LOCAL" + case CheckOptionCascaded: + return "CASCADED" + default: + return "CASCADED" } - return } -// FindPartitionDefinitionByName finds PartitionDefinition by name. -func (pi *PartitionInfo) FindPartitionDefinitionByName(partitionDefinitionName string) int { - lowConstrName := strings.ToLower(partitionDefinitionName) - definitions := pi.Definitions - for i := range definitions { - if definitions[i].Name.L == lowConstrName { - return i - } - } - return -1 -} +// PartitionType is the type for PartitionInfo +type PartitionType int -// GetPartitionIDByName gets the partition ID by name. -func (pi *PartitionInfo) GetPartitionIDByName(partitionDefinitionName string) int64 { - lowConstrName := strings.ToLower(partitionDefinitionName) - for _, definition := range pi.Definitions { - if definition.Name.L == lowConstrName { - return definition.ID - } - } - return -1 -} +// PartitionType types. +const ( + // Actually non-partitioned, but during DDL keeping the table as + // a single partition + PartitionTypeNone PartitionType = 0 -// IndexColumn provides index column info. -type IndexColumn struct { - Name CIStr `json:"name"` // Index name - Offset int `json:"offset"` // Index offset - // Length of prefix when using column prefix - // for indexing; - // UnspecifedLength if not using prefix indexing - Length int `json:"length"` -} + PartitionTypeRange PartitionType = 1 + PartitionTypeHash PartitionType = 2 + PartitionTypeList PartitionType = 3 + PartitionTypeKey PartitionType = 4 + PartitionTypeSystemTime PartitionType = 5 +) -// Clone clones IndexColumn. -func (i *IndexColumn) Clone() *IndexColumn { - ni := *i - return &ni +// String implements fmt.Stringer interface. +func (p PartitionType) String() string { + switch p { + case PartitionTypeRange: + return "RANGE" + case PartitionTypeHash: + return "HASH" + case PartitionTypeList: + return "LIST" + case PartitionTypeKey: + return "KEY" + case PartitionTypeSystemTime: + return "SYSTEM_TIME" + case PartitionTypeNone: + return "NONE" + default: + return "" + } } // PrimaryKeyType is the type of primary key. // Available values are "clustered", "nonclustered", and ""(default). type PrimaryKeyType int8 +// String implements fmt.Stringer interface. func (p PrimaryKeyType) String() string { switch p { case PrimaryKeyTypeClustered: @@ -1438,15 +183,13 @@ func (p PrimaryKeyType) String() string { } } -//revive:disable:exported +// PrimaryKeyType values. const ( PrimaryKeyTypeDefault PrimaryKeyType = iota PrimaryKeyTypeClustered PrimaryKeyTypeNonClustered ) -//revive:enable:exported - // IndexType is the type of index type IndexType int @@ -1475,160 +218,6 @@ const ( IndexTypeHypo ) -// IndexInfo provides meta data describing a DB index. -// It corresponds to the statement `CREATE INDEX Name ON Table (Column);` -// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html -type IndexInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"idx_name"` // Index name. - Table CIStr `json:"tbl_name"` // Table name. - Columns []*IndexColumn `json:"idx_cols"` // Index columns. - State SchemaState `json:"state"` - BackfillState BackfillState `json:"backfill_state"` - Comment string `json:"comment"` // Comment - Tp IndexType `json:"index_type"` // Index type: Btree, Hash or Rtree - Unique bool `json:"is_unique"` // Whether the index is unique. - Primary bool `json:"is_primary"` // Whether the index is primary key. - Invisible bool `json:"is_invisible"` // Whether the index is invisible. - Global bool `json:"is_global"` // Whether the index is global. - MVIndex bool `json:"mv_index"` // Whether the index is multivalued index. -} - -// Clone clones IndexInfo. -func (index *IndexInfo) Clone() *IndexInfo { - if index == nil { - return nil - } - ni := *index - ni.Columns = make([]*IndexColumn, len(index.Columns)) - for i := range index.Columns { - ni.Columns[i] = index.Columns[i].Clone() - } - return &ni -} - -// HasPrefixIndex returns whether any columns of this index uses prefix length. -func (index *IndexInfo) HasPrefixIndex() bool { - for _, ic := range index.Columns { - if ic.Length != types.UnspecifiedLength { - return true - } - } - return false -} - -// HasColumnInIndexColumns checks whether the index contains the column with the specified ID. -func (index *IndexInfo) HasColumnInIndexColumns(tblInfo *TableInfo, colID int64) bool { - for _, ic := range index.Columns { - if tblInfo.Columns[ic.Offset].ID == colID { - return true - } - } - return false -} - -// FindColumnByName finds the index column with the specified name. -func (index *IndexInfo) FindColumnByName(nameL string) *IndexColumn { - _, ret := FindIndexColumnByName(index.Columns, nameL) - return ret -} - -// IsPublic checks if the index state is public -func (index *IndexInfo) IsPublic() bool { - return index.State == StatePublic -} - -// FindIndexColumnByName finds IndexColumn by name. When IndexColumn is not found, returns (-1, nil). -func FindIndexColumnByName(indexCols []*IndexColumn, nameL string) (int, *IndexColumn) { - for i, ic := range indexCols { - if ic.Name.L == nameL { - return i, ic - } - } - return -1, nil -} - -// ConstraintInfo provides meta data describing check-expression constraint. -type ConstraintInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"constraint_name"` - Table CIStr `json:"tbl_name"` // Table name. - ConstraintCols []CIStr `json:"constraint_cols"` // Depended column names. - Enforced bool `json:"enforced"` - InColumn bool `json:"in_column"` // Indicate whether the constraint is column type check. - ExprString string `json:"expr_string"` - State SchemaState `json:"state"` -} - -// Clone clones ConstraintInfo. -func (ci *ConstraintInfo) Clone() *ConstraintInfo { - nci := *ci - - nci.ConstraintCols = make([]CIStr, len(ci.ConstraintCols)) - copy(nci.ConstraintCols, ci.ConstraintCols) - return &nci -} - -// FindConstraintInfoByName finds constraintInfo by name. -func (t *TableInfo) FindConstraintInfoByName(constrName string) *ConstraintInfo { - lowConstrName := strings.ToLower(constrName) - for _, chk := range t.Constraints { - if chk.Name.L == lowConstrName { - return chk - } - } - return nil -} - -// FindIndexNameByID finds index name by id. -func (t *TableInfo) FindIndexNameByID(id int64) string { - indexInfo := FindIndexInfoByID(t.Indices, id) - if indexInfo != nil { - return indexInfo.Name.L - } - return "" -} - -// FindColumnNameByID finds column name by id. -func (t *TableInfo) FindColumnNameByID(id int64) string { - colInfo := FindColumnInfoByID(t.Columns, id) - if colInfo != nil { - return colInfo.Name.L - } - return "" -} - -// FKInfo provides meta data describing a foreign key constraint. -type FKInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"fk_name"` - RefSchema CIStr `json:"ref_schema"` - RefTable CIStr `json:"ref_table"` - RefCols []CIStr `json:"ref_cols"` - Cols []CIStr `json:"cols"` - OnDelete int `json:"on_delete"` - OnUpdate int `json:"on_update"` - State SchemaState `json:"state"` - Version int `json:"version"` -} - -const ( - // FKVersion0 indicate the FKInfo version is 0. - // In FKVersion0, TiDB only supported syntax of foreign key, but the foreign key constraint doesn't take effect. - FKVersion0 = 0 - // FKVersion1 indicate the FKInfo version is 1. - // In FKVersion1, TiDB supports the foreign key constraint. - FKVersion1 = 1 -) - -// ReferredFKInfo provides the cited foreign key in the child table. -type ReferredFKInfo struct { - Cols []CIStr `json:"cols"` - ChildSchema CIStr `json:"child_schema"` - ChildTable CIStr `json:"child_table"` - ChildFKName CIStr `json:"child_fk_name"` -} - // ReferOptionType is the type for refer options. type ReferOptionType int @@ -1659,91 +248,6 @@ func (r ReferOptionType) String() string { return "" } -func (fk *FKInfo) String(db, tb string) string { - buf := bytes.Buffer{} - buf.WriteString("`" + db + "`.`") - buf.WriteString(tb + "`, CONSTRAINT `") - buf.WriteString(fk.Name.O + "` FOREIGN KEY (") - for i, col := range fk.Cols { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString("`" + col.O + "`") - } - buf.WriteString(") REFERENCES `") - if fk.RefSchema.L != db { - buf.WriteString(fk.RefSchema.L) - buf.WriteString("`.`") - } - buf.WriteString(fk.RefTable.L) - buf.WriteString("` (") - for i, col := range fk.RefCols { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString("`" + col.O + "`") - } - buf.WriteString(")") - if onDelete := ReferOptionType(fk.OnDelete); onDelete != ReferOptionNoOption { - buf.WriteString(" ON DELETE ") - buf.WriteString(onDelete.String()) - } - if onUpdate := ReferOptionType(fk.OnUpdate); onUpdate != ReferOptionNoOption { - buf.WriteString(" ON UPDATE ") - buf.WriteString(onUpdate.String()) - } - return buf.String() -} - -// Clone clones FKInfo. -func (fk *FKInfo) Clone() *FKInfo { - nfk := *fk - - nfk.RefCols = make([]CIStr, len(fk.RefCols)) - nfk.Cols = make([]CIStr, len(fk.Cols)) - copy(nfk.RefCols, fk.RefCols) - copy(nfk.Cols, fk.Cols) - - return &nfk -} - -// DBInfo provides meta data describing a DB. -type DBInfo struct { - ID int64 `json:"id"` // Database ID - Name CIStr `json:"db_name"` // DB name. - Charset string `json:"charset"` - Collate string `json:"collate"` - Deprecated struct { // Tables is not set in infoschema v2, use infoschema SchemaTableInfos() instead. - Tables []*TableInfo `json:"-"` // Tables in the DB. - } - State SchemaState `json:"state"` - PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"` - TableName2ID map[string]int64 `json:"-"` -} - -// Clone clones DBInfo. -func (db *DBInfo) Clone() *DBInfo { - newInfo := *db - newInfo.Deprecated.Tables = make([]*TableInfo, len(db.Deprecated.Tables)) - for i := range db.Deprecated.Tables { - newInfo.Deprecated.Tables[i] = db.Deprecated.Tables[i].Clone() - } - return &newInfo -} - -// Copy shallow copies DBInfo. -func (db *DBInfo) Copy() *DBInfo { - newInfo := *db - newInfo.Deprecated.Tables = make([]*TableInfo, len(db.Deprecated.Tables)) - copy(newInfo.Deprecated.Tables, db.Deprecated.Tables) - return &newInfo -} - -// LessDBInfo is used for sorting DBInfo by DBInfo.Name. -func LessDBInfo(a *DBInfo, b *DBInfo) int { - return strings.Compare(a.Name.L, b.Name.L) -} - // CIStr is case insensitive string. type CIStr struct { O string `json:"O"` // Original string. @@ -1790,186 +294,10 @@ func (cis *CIStr) MemoryUsage() (sum int64) { return int64(unsafe.Sizeof(cis.O))*2 + int64(len(cis.O)+len(cis.L)) } -// TableItemID is composed by table ID and column/index ID -type TableItemID struct { - TableID int64 - ID int64 - IsIndex bool - IsSyncLoadFailed bool -} - -// Key is used to generate unique key for TableItemID to use in the syncload -func (t TableItemID) Key() string { - return fmt.Sprintf("%d#%d#%t", t.ID, t.TableID, t.IsIndex) -} - -// StatsLoadItem represents the load unit for statistics's memory loading. -type StatsLoadItem struct { - TableItemID - FullLoad bool -} - -// Key is used to generate unique key for TableItemID to use in the syncload -func (s StatsLoadItem) Key() string { - return fmt.Sprintf("%s#%t", s.TableItemID.Key(), s.FullLoad) -} - -// PolicyRefInfo is the struct to refer the placement policy. -type PolicyRefInfo struct { - ID int64 `json:"id"` - Name CIStr `json:"name"` -} - -// PlacementSettings is the settings of the placement -type PlacementSettings struct { - PrimaryRegion string `json:"primary_region"` - Regions string `json:"regions"` - Learners uint64 `json:"learners"` - Followers uint64 `json:"followers"` - Voters uint64 `json:"voters"` - Schedule string `json:"schedule"` - Constraints string `json:"constraints"` - LeaderConstraints string `json:"leader_constraints"` - LearnerConstraints string `json:"learner_constraints"` - FollowerConstraints string `json:"follower_constraints"` - VoterConstraints string `json:"voter_constraints"` - SurvivalPreferences string `json:"survival_preferences"` -} - -// PolicyInfo is the struct to store the placement policy. -type PolicyInfo struct { - *PlacementSettings - ID int64 `json:"id"` - Name CIStr `json:"name"` - State SchemaState `json:"state"` -} - -// Clone clones PolicyInfo. -func (p *PolicyInfo) Clone() *PolicyInfo { - cloned := *p - cloned.PlacementSettings = p.PlacementSettings.Clone() - return &cloned -} - -// DefaultJobInterval sets the default interval between TTL jobs -const DefaultJobInterval = time.Hour - -// TTLInfo records the TTL config -type TTLInfo struct { - ColumnName CIStr `json:"column"` - IntervalExprStr string `json:"interval_expr"` - // `IntervalTimeUnit` is actually ast.TimeUnitType. Use `int` to avoid cycle dependency - IntervalTimeUnit int `json:"interval_time_unit"` - Enable bool `json:"enable"` - // JobInterval is the interval between two TTL scan jobs. - // It's suggested to get a duration with `(*TTLInfo).GetJobInterval` - JobInterval string `json:"job_interval"` -} - -// Clone clones TTLInfo -func (t *TTLInfo) Clone() *TTLInfo { - cloned := *t - return &cloned -} - -// GetJobInterval parses the job interval and return -// if the job interval is an empty string, the "1h" will be returned, to keep compatible with 6.5 (in which -// TTL_JOB_INTERVAL attribute doesn't exist) -// Didn't set TTL_JOB_INTERVAL during upgrade and bootstrap because setting default value here is much simpler -// and could avoid bugs blocking users from upgrading or bootstrapping the cluster. -func (t *TTLInfo) GetJobInterval() (time.Duration, error) { - if len(t.JobInterval) == 0 { - return DefaultJobInterval, nil - } - - return duration.ParseDuration(t.JobInterval) -} - -func writeSettingItemToBuilder(sb *strings.Builder, item string, separatorFns ...func()) { - if sb.Len() != 0 { - for _, fn := range separatorFns { - fn() - } - if len(separatorFns) == 0 { - sb.WriteString(" ") - } - } - sb.WriteString(item) -} -func writeSettingStringToBuilder(sb *strings.Builder, item string, value string, separatorFns ...func()) { - writeSettingItemToBuilder(sb, fmt.Sprintf("%s=\"%s\"", item, strings.ReplaceAll(value, "\"", "\\\"")), separatorFns...) -} -func writeSettingIntegerToBuilder(sb *strings.Builder, item string, value uint64, separatorFns ...func()) { - writeSettingItemToBuilder(sb, fmt.Sprintf("%s=%d", item, value), separatorFns...) -} - -func writeSettingDurationToBuilder(sb *strings.Builder, item string, dur time.Duration, separatorFns ...func()) { - writeSettingStringToBuilder(sb, item, dur.String(), separatorFns...) -} - -func (p *PlacementSettings) String() string { - sb := new(strings.Builder) - if len(p.PrimaryRegion) > 0 { - writeSettingStringToBuilder(sb, "PRIMARY_REGION", p.PrimaryRegion) - } - - if len(p.Regions) > 0 { - writeSettingStringToBuilder(sb, "REGIONS", p.Regions) - } - - if len(p.Schedule) > 0 { - writeSettingStringToBuilder(sb, "SCHEDULE", p.Schedule) - } - - if len(p.Constraints) > 0 { - writeSettingStringToBuilder(sb, "CONSTRAINTS", p.Constraints) - } - - if len(p.LeaderConstraints) > 0 { - writeSettingStringToBuilder(sb, "LEADER_CONSTRAINTS", p.LeaderConstraints) - } - - if p.Voters > 0 { - writeSettingIntegerToBuilder(sb, "VOTERS", p.Voters) - } - - if len(p.VoterConstraints) > 0 { - writeSettingStringToBuilder(sb, "VOTER_CONSTRAINTS", p.VoterConstraints) - } - - if p.Followers > 0 { - writeSettingIntegerToBuilder(sb, "FOLLOWERS", p.Followers) - } - - if len(p.FollowerConstraints) > 0 { - writeSettingStringToBuilder(sb, "FOLLOWER_CONSTRAINTS", p.FollowerConstraints) - } - - if p.Learners > 0 { - writeSettingIntegerToBuilder(sb, "LEARNERS", p.Learners) - } - - if len(p.LearnerConstraints) > 0 { - writeSettingStringToBuilder(sb, "LEARNER_CONSTRAINTS", p.LearnerConstraints) - } - - if len(p.SurvivalPreferences) > 0 { - writeSettingStringToBuilder(sb, "SURVIVAL_PREFERENCES", p.SurvivalPreferences) - } - - return sb.String() -} - -// Clone clones the placement settings. -func (p *PlacementSettings) Clone() *PlacementSettings { - cloned := *p - return &cloned -} - // RunawayActionType is the type of runaway action. type RunawayActionType int32 -//revive:disable:exported +// RunawayActionType values. const ( RunawayActionNone RunawayActionType = iota RunawayActionDryRun @@ -1980,7 +308,7 @@ const ( // RunawayWatchType is the type of runaway watch. type RunawayWatchType int32 -//revive:disable:exported +// RunawayWatchType values. const ( WatchNone RunawayWatchType = iota WatchExact @@ -1988,6 +316,7 @@ const ( WatchPlan ) +// String implements fmt.Stringer interface. func (t RunawayWatchType) String() string { switch t { case WatchExact: @@ -2004,13 +333,14 @@ func (t RunawayWatchType) String() string { // RunawayOptionType is the runaway's option type. type RunawayOptionType int -//revive:disable:exported +// RunawayOptionType values. const ( RunawayRule RunawayOptionType = iota RunawayAction RunawayWatch ) +// String implements fmt.Stringer interface. func (t RunawayActionType) String() string { switch t { case RunawayActionDryRun: @@ -2024,165 +354,10 @@ func (t RunawayActionType) String() string { } } -// ResourceGroupRunawaySettings is the runaway settings of the resource group -type ResourceGroupRunawaySettings struct { - ExecElapsedTimeMs uint64 `json:"exec_elapsed_time_ms"` - Action RunawayActionType `json:"action"` - WatchType RunawayWatchType `json:"watch_type"` - WatchDurationMs int64 `json:"watch_duration_ms"` -} - -type ResourceGroupBackgroundSettings struct { - JobTypes []string `json:"job_types"` -} - -// ResourceGroupSettings is the settings of the resource group -type ResourceGroupSettings struct { - RURate uint64 `json:"ru_per_sec"` - Priority uint64 `json:"priority"` - CPULimiter string `json:"cpu_limit"` - IOReadBandwidth string `json:"io_read_bandwidth"` - IOWriteBandwidth string `json:"io_write_bandwidth"` - BurstLimit int64 `json:"burst_limit"` - Runaway *ResourceGroupRunawaySettings `json:"runaway"` - Background *ResourceGroupBackgroundSettings `json:"background"` -} - -// NewResourceGroupSettings creates a new ResourceGroupSettings. -func NewResourceGroupSettings() *ResourceGroupSettings { - return &ResourceGroupSettings{ - RURate: 0, - Priority: MediumPriorityValue, - CPULimiter: "", - IOReadBandwidth: "", - IOWriteBandwidth: "", - BurstLimit: 0, - } -} - -// PriorityValueToName converts the priority value to corresponding name -func PriorityValueToName(value uint64) string { - switch value { - case LowPriorityValue: - return "LOW" - case MediumPriorityValue: - return "MEDIUM" - case HighPriorityValue: - return "HIGH" - default: - return "MEDIUM" - } -} - -//revive:disable:exported -const ( - LowPriorityValue = 1 - MediumPriorityValue = 8 - HighPriorityValue = 16 -) - -func (p *ResourceGroupSettings) String() string { - sb := new(strings.Builder) - separatorFn := func() { - sb.WriteString(", ") - } - if p.RURate != 0 { - writeSettingIntegerToBuilder(sb, "RU_PER_SEC", p.RURate, separatorFn) - } - writeSettingItemToBuilder(sb, "PRIORITY="+PriorityValueToName(p.Priority), separatorFn) - if len(p.CPULimiter) > 0 { - writeSettingStringToBuilder(sb, "CPU", p.CPULimiter, separatorFn) - } - if len(p.IOReadBandwidth) > 0 { - writeSettingStringToBuilder(sb, "IO_READ_BANDWIDTH", p.IOReadBandwidth, separatorFn) - } - if len(p.IOWriteBandwidth) > 0 { - writeSettingStringToBuilder(sb, "IO_WRITE_BANDWIDTH", p.IOWriteBandwidth, separatorFn) - } - // Once burst limit is negative, meaning allow burst with unlimit. - if p.BurstLimit < 0 { - writeSettingItemToBuilder(sb, "BURSTABLE", separatorFn) - } - if p.Runaway != nil { - writeSettingDurationToBuilder(sb, "QUERY_LIMIT=(EXEC_ELAPSED", time.Duration(p.Runaway.ExecElapsedTimeMs)*time.Millisecond, separatorFn) - writeSettingItemToBuilder(sb, "ACTION="+p.Runaway.Action.String()) - if p.Runaway.WatchType != WatchNone { - writeSettingItemToBuilder(sb, "WATCH="+p.Runaway.WatchType.String()) - if p.Runaway.WatchDurationMs > 0 { - writeSettingDurationToBuilder(sb, "DURATION", time.Duration(p.Runaway.WatchDurationMs)*time.Millisecond) - } else { - writeSettingItemToBuilder(sb, "DURATION=UNLIMITED") - } - } - sb.WriteString(")") - } - if p.Background != nil { - fmt.Fprintf(sb, ", BACKGROUND=(TASK_TYPES='%s')", strings.Join(p.Background.JobTypes, ",")) - } - - return sb.String() -} - -// Adjust adjusts the resource group settings. -func (p *ResourceGroupSettings) Adjust() { - // Curretly we only support ru_per_sec sytanx, so BurstLimit(capicity) is always same as ru_per_sec except burstable. - if p.BurstLimit >= 0 { - p.BurstLimit = int64(p.RURate) - } -} - -// Clone clones the resource group settings. -func (p *ResourceGroupSettings) Clone() *ResourceGroupSettings { - cloned := *p - return &cloned -} - -// ResourceGroupInfo is the struct to store the resource group. -type ResourceGroupInfo struct { - *ResourceGroupSettings - ID int64 `json:"id"` - Name CIStr `json:"name"` - State SchemaState `json:"state"` -} - -// Clone clones the ResourceGroupInfo. -func (p *ResourceGroupInfo) Clone() *ResourceGroupInfo { - cloned := *p - cloned.ResourceGroupSettings = p.ResourceGroupSettings.Clone() - return &cloned -} - -// StatsOptions is the struct to store the stats options. -type StatsOptions struct { - *StatsWindowSettings - AutoRecalc bool `json:"auto_recalc"` - ColumnChoice ColumnChoice `json:"column_choice"` - ColumnList []CIStr `json:"column_list"` - SampleNum uint64 `json:"sample_num"` - SampleRate float64 `json:"sample_rate"` - Buckets uint64 `json:"buckets"` - TopN uint64 `json:"topn"` - Concurrency uint `json:"concurrency"` -} - -// NewStatsOptions creates a new StatsOptions. -func NewStatsOptions() *StatsOptions { - return &StatsOptions{ - AutoRecalc: true, - ColumnChoice: DefaultChoice, - ColumnList: []CIStr{}, - SampleNum: uint64(0), - SampleRate: 0.0, - Buckets: uint64(0), - TopN: uint64(0), - Concurrency: uint(0), - } -} - // ColumnChoice is the type of the column choice. type ColumnChoice byte -//revive:disable:exported +// ColumnChoice values. const ( DefaultChoice ColumnChoice = iota AllColumns @@ -2190,8 +365,7 @@ const ( ColumnList ) -//revive:enable:exported - +// String implements fmt.Stringer interface. func (s ColumnChoice) String() string { switch s { case AllColumns: @@ -2205,46 +379,23 @@ func (s ColumnChoice) String() string { } } -// StatsWindowSettings is the settings of the stats window. -type StatsWindowSettings struct { - WindowStart time.Time `json:"window_start"` - WindowEnd time.Time `json:"window_end"` - RepeatType WindowRepeatType `json:"repeat_type"` - RepeatInterval uint `json:"repeat_interval"` -} - -// WindowRepeatType is the type of the window repeat. -type WindowRepeatType byte - -//revive:disable:exported +// Priority values. const ( - Never WindowRepeatType = iota - Day - Week - Month + LowPriorityValue = 1 + MediumPriorityValue = 8 + HighPriorityValue = 16 ) -//revive:enable:exported - -func (s WindowRepeatType) String() string { - switch s { - case Never: - return "Never" - case Day: - return "Day" - case Week: - return "Week" - case Month: - return "Month" +// PriorityValueToName converts the priority value to corresponding name +func PriorityValueToName(value uint64) string { + switch value { + case LowPriorityValue: + return "LOW" + case MediumPriorityValue: + return "MEDIUM" + case HighPriorityValue: + return "HIGH" default: - return "" + return "MEDIUM" } } - -// TraceInfo is the information for trace. -type TraceInfo struct { - // ConnectionID is the id of the connection - ConnectionID uint64 `json:"connection_id"` - // SessionAlias is the alias of session - SessionAlias string `json:"session_alias"` -} diff --git a/pkg/parser/model/model_test.go b/pkg/parser/model/model_test.go index 76bfd0b19790e..15e8112c65e36 100644 --- a/pkg/parser/model/model_test.go +++ b/pkg/parser/model/model_test.go @@ -15,13 +15,8 @@ package model import ( "encoding/json" - "fmt" "testing" - "time" - "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/parser/types" "github.com/stretchr/testify/require" ) @@ -32,544 +27,6 @@ func TestT(t *testing.T) { require.Equal(t, "aBC", abc.String()) } -func newColumnForTest(id int64, offset int) *ColumnInfo { - return &ColumnInfo{ - ID: id, - Name: NewCIStr(fmt.Sprintf("c_%d", id)), - Offset: offset, - } -} - -func newIndexForTest(id int64, cols ...*ColumnInfo) *IndexInfo { - idxCols := make([]*IndexColumn, 0, len(cols)) - for _, c := range cols { - idxCols = append(idxCols, &IndexColumn{Offset: c.Offset, Name: c.Name}) - } - return &IndexInfo{ - ID: id, - Name: NewCIStr(fmt.Sprintf("i_%d", id)), - Columns: idxCols, - } -} - -func checkOffsets(t *testing.T, tbl *TableInfo, ids ...int) { - require.Equal(t, len(ids), len(tbl.Columns)) - for i := 0; i < len(ids); i++ { - expected := fmt.Sprintf("c_%d", ids[i]) - require.Equal(t, expected, tbl.Columns[i].Name.L) - require.Equal(t, i, tbl.Columns[i].Offset) - } - for _, col := range tbl.Columns { - for _, idx := range tbl.Indices { - for _, idxCol := range idx.Columns { - if col.Name.L != idxCol.Name.L { - continue - } - // Columns with the same name should have a same offset. - require.Equal(t, col.Offset, idxCol.Offset) - } - } - } -} - -func TestMoveColumnInfo(t *testing.T) { - c0 := newColumnForTest(0, 0) - c1 := newColumnForTest(1, 1) - c2 := newColumnForTest(2, 2) - c3 := newColumnForTest(3, 3) - c4 := newColumnForTest(4, 4) - - i0 := newIndexForTest(0, c0, c1, c2, c3, c4) - i1 := newIndexForTest(1, c4, c2) - i2 := newIndexForTest(2, c0, c4) - i3 := newIndexForTest(3, c1, c2, c3) - i4 := newIndexForTest(4, c3, c2, c1) - - tbl := &TableInfo{ - ID: 1, - Name: NewCIStr("t"), - Columns: []*ColumnInfo{c0, c1, c2, c3, c4}, - Indices: []*IndexInfo{i0, i1, i2, i3, i4}, - } - - // Original offsets: [0, 1, 2, 3, 4] - tbl.MoveColumnInfo(4, 0) - checkOffsets(t, tbl, 4, 0, 1, 2, 3) - tbl.MoveColumnInfo(2, 3) - checkOffsets(t, tbl, 4, 0, 2, 1, 3) - tbl.MoveColumnInfo(3, 2) - checkOffsets(t, tbl, 4, 0, 1, 2, 3) - tbl.MoveColumnInfo(0, 4) - checkOffsets(t, tbl, 0, 1, 2, 3, 4) - tbl.MoveColumnInfo(2, 2) - checkOffsets(t, tbl, 0, 1, 2, 3, 4) - tbl.MoveColumnInfo(0, 0) - checkOffsets(t, tbl, 0, 1, 2, 3, 4) - tbl.MoveColumnInfo(1, 4) - checkOffsets(t, tbl, 0, 2, 3, 4, 1) - tbl.MoveColumnInfo(3, 0) - checkOffsets(t, tbl, 4, 0, 2, 3, 1) -} - -func TestModelBasic(t *testing.T) { - column := &ColumnInfo{ - ID: 1, - Name: NewCIStr("c"), - Offset: 0, - DefaultValue: 0, - FieldType: *types.NewFieldType(0), - Hidden: true, - } - column.AddFlag(mysql.PriKeyFlag) - - index := &IndexInfo{ - Name: NewCIStr("key"), - Table: NewCIStr("t"), - Columns: []*IndexColumn{ - { - Name: NewCIStr("c"), - Offset: 0, - Length: 10, - }}, - Unique: true, - Primary: true, - } - - fk := &FKInfo{ - RefCols: []CIStr{NewCIStr("a")}, - Cols: []CIStr{NewCIStr("a")}, - } - - seq := &SequenceInfo{ - Increment: 1, - MinValue: 1, - MaxValue: 100, - } - - table := &TableInfo{ - ID: 1, - Name: NewCIStr("t"), - Charset: "utf8", - Collate: "utf8_bin", - Columns: []*ColumnInfo{column}, - Indices: []*IndexInfo{index}, - ForeignKeys: []*FKInfo{fk}, - PKIsHandle: true, - } - - table2 := &TableInfo{ - ID: 2, - Name: NewCIStr("s"), - Sequence: seq, - } - - dbInfo := &DBInfo{ - ID: 1, - Name: NewCIStr("test"), - Charset: "utf8", - Collate: "utf8_bin", - } - dbInfo.Deprecated.Tables = []*TableInfo{table} - - n := dbInfo.Clone() - require.Equal(t, dbInfo, n) - - pkName := table.GetPkName() - require.Equal(t, NewCIStr("c"), pkName) - newColumn := table.GetPkColInfo() - require.Equal(t, true, newColumn.Hidden) - require.Equal(t, column, newColumn) - inIdx := table.ColumnIsInIndex(column) - require.Equal(t, true, inIdx) - tp := IndexTypeBtree - require.Equal(t, "BTREE", tp.String()) - tp = IndexTypeHash - require.Equal(t, "HASH", tp.String()) - tp = 1e5 - require.Equal(t, "", tp.String()) - has := index.HasPrefixIndex() - require.Equal(t, true, has) - require.Equal(t, TSConvert2Time(table.UpdateTS), table.GetUpdateTime()) - require.True(t, table2.IsSequence()) - require.False(t, table2.IsBaseTable()) - - // Corner cases - column.ToggleFlag(mysql.PriKeyFlag) - pkName = table.GetPkName() - require.Equal(t, NewCIStr(""), pkName) - newColumn = table.GetPkColInfo() - require.Nil(t, newColumn) - anCol := &ColumnInfo{ - Name: NewCIStr("d"), - } - exIdx := table.ColumnIsInIndex(anCol) - require.Equal(t, false, exIdx) - anIndex := &IndexInfo{ - Columns: []*IndexColumn{}, - } - no := anIndex.HasPrefixIndex() - require.Equal(t, false, no) - - extraPK := NewExtraHandleColInfo() - require.Equal(t, mysql.NotNullFlag|mysql.PriKeyFlag, extraPK.GetFlag()) - require.Equal(t, charset.CharsetBin, extraPK.GetCharset()) - require.Equal(t, charset.CollationBin, extraPK.GetCollate()) -} - -func TestJobStartTime(t *testing.T) { - job := &Job{ - ID: 123, - BinlogInfo: &HistoryInfo{}, - } - require.Equal(t, TSConvert2Time(job.StartTS), time.Unix(0, 0)) - require.Equal(t, fmt.Sprintf("ID:123, Type:none, State:none, SchemaState:none, SchemaID:0, TableID:0, RowCount:0, ArgLen:0, start time: %s, Err:, ErrCount:0, SnapshotVersion:0, LocalMode: false", time.Unix(0, 0)), job.String()) -} - -func TestJobCodec(t *testing.T) { - type A struct { - Name string - } - tzName, tzOffset := time.Now().In(time.UTC).Zone() - job := &Job{ - ID: 1, - TableID: 2, - SchemaID: 1, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{NewCIStr("a"), A{Name: "abc"}}, - ReorgMeta: &DDLReorgMeta{ - Location: &TimeZoneLocation{Name: tzName, Offset: tzOffset}, - }, - } - job.BinlogInfo.AddDBInfo(123, &DBInfo{ID: 1, Name: NewCIStr("test_history_db")}) - job.BinlogInfo.AddTableInfo(123, &TableInfo{ID: 1, Name: NewCIStr("test_history_tbl")}) - - // Test IsDependentOn. - // job: table ID is 2 - // job1: table ID is 2 - var err error - job1 := &Job{ - ID: 2, - TableID: 2, - SchemaID: 1, - Type: ActionRenameTable, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{int64(3), NewCIStr("new_table_name")}, - } - job1.RawArgs, err = json.Marshal(job1.Args) - require.NoError(t, err) - isDependent, err := job.IsDependentOn(job1) - require.NoError(t, err) - require.True(t, isDependent) - // job1: rename table, old schema ID is 3 - // job2: create schema, schema ID is 3 - job2 := &Job{ - ID: 3, - TableID: 3, - SchemaID: 3, - Type: ActionCreateSchema, - BinlogInfo: &HistoryInfo{}, - } - isDependent, err = job2.IsDependentOn(job1) - require.NoError(t, err) - require.True(t, isDependent) - - // Test IsDependentOn for exchange partition with table. - // test ActionCreateSchema and ActionExchangeTablePartition is dependent. - job3 := &Job{ - ID: 4, - TableID: 4, - SchemaID: 4, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{int64(6), int64(3), int64(5), "pt", true}, - } - job3.RawArgs, err = json.Marshal(job3.Args) - require.NoError(t, err) - isDependent, err = job3.IsDependentOn(job2) - require.NoError(t, err) - require.True(t, isDependent) - - // test random and ActionExchangeTablePartition is dependent because TableID is same. - job4 := &Job{ - ID: 5, - TableID: 5, - SchemaID: 3, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{6, 4, 2, "pt", true}, - } - job4.RawArgs, err = json.Marshal(job4.Args) - require.NoError(t, err) - isDependent, err = job4.IsDependentOn(job) - require.NoError(t, err) - require.True(t, isDependent) - - // test ActionExchangeTablePartition and ActionExchangeTablePartition is dependent. - job5 := &Job{ - ID: 6, - TableID: 6, - SchemaID: 6, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{2, 6, 5, "pt", true}, - } - job5.RawArgs, err = json.Marshal(job5.Args) - require.NoError(t, err) - isDependent, err = job5.IsDependentOn(job4) - require.NoError(t, err) - require.True(t, isDependent) - - job6 := &Job{ - ID: 7, - TableID: 7, - SchemaID: 7, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{6, 4, 2, "pt", true}, - } - job6.RawArgs, err = json.Marshal(job6.Args) - require.NoError(t, err) - isDependent, err = job6.IsDependentOn(job5) - require.NoError(t, err) - require.True(t, isDependent) - - job7 := &Job{ - ID: 8, - TableID: 8, - SchemaID: 8, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{8, 4, 6, "pt", true}, - } - job7.RawArgs, err = json.Marshal(job7.Args) - require.NoError(t, err) - isDependent, err = job7.IsDependentOn(job6) - require.NoError(t, err) - require.True(t, isDependent) - - job8 := &Job{ - ID: 9, - TableID: 9, - SchemaID: 9, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{8, 9, 9, "pt", true}, - } - job8.RawArgs, err = json.Marshal(job8.Args) - require.NoError(t, err) - isDependent, err = job8.IsDependentOn(job7) - require.NoError(t, err) - require.True(t, isDependent) - - job9 := &Job{ - ID: 10, - TableID: 10, - SchemaID: 10, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{10, 10, 8, "pt", true}, - } - job9.RawArgs, err = json.Marshal(job9.Args) - require.NoError(t, err) - isDependent, err = job9.IsDependentOn(job8) - require.NoError(t, err) - require.True(t, isDependent) - - // test ActionDropSchema and ActionExchangeTablePartition is dependent. - job10 := &Job{ - ID: 11, - TableID: 11, - SchemaID: 11, - Type: ActionDropSchema, - BinlogInfo: &HistoryInfo{}, - } - job10.RawArgs, err = json.Marshal(job10.Args) - require.NoError(t, err) - - job11 := &Job{ - ID: 12, - TableID: 12, - SchemaID: 11, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{10, 10, 8, "pt", true}, - } - job11.RawArgs, err = json.Marshal(job11.Args) - require.NoError(t, err) - isDependent, err = job11.IsDependentOn(job10) - require.NoError(t, err) - require.True(t, isDependent) - - // test ActionDropTable and ActionExchangeTablePartition is dependent. - job12 := &Job{ - ID: 13, - TableID: 13, - SchemaID: 11, - Type: ActionDropTable, - BinlogInfo: &HistoryInfo{}, - } - job12.RawArgs, err = json.Marshal(job12.Args) - require.NoError(t, err) - isDependent, err = job11.IsDependentOn(job12) - require.NoError(t, err) - require.False(t, isDependent) - - job13 := &Job{ - ID: 14, - TableID: 12, - SchemaID: 14, - Type: ActionDropTable, - BinlogInfo: &HistoryInfo{}, - } - job13.RawArgs, err = json.Marshal(job13.Args) - require.NoError(t, err) - isDependent, err = job11.IsDependentOn(job13) - require.NoError(t, err) - require.True(t, isDependent) - - // test ActionDropTable and ActionExchangeTablePartition is dependent. - job14 := &Job{ - ID: 15, - TableID: 15, - SchemaID: 15, - Type: ActionExchangeTablePartition, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{16, 17, 12, "pt", true}, - } - job14.RawArgs, err = json.Marshal(job14.Args) - require.NoError(t, err) - isDependent, err = job13.IsDependentOn(job14) - require.NoError(t, err) - require.True(t, isDependent) - - // test ActionFlashbackCluster with other ddl jobs are dependent. - job15 := &Job{ - ID: 16, - Type: ActionFlashbackCluster, - BinlogInfo: &HistoryInfo{}, - Args: []interface{}{0, map[string]interface{}{}, "ON", true}, - } - job15.RawArgs, err = json.Marshal(job15.Args) - require.NoError(t, err) - isDependent, err = job.IsDependentOn(job15) - require.NoError(t, err) - require.True(t, isDependent) - - require.Equal(t, false, job.IsCancelled()) - b, err := job.Encode(false) - require.NoError(t, err) - newJob := &Job{} - err = newJob.Decode(b) - require.NoError(t, err) - require.Equal(t, job.BinlogInfo, newJob.BinlogInfo) - name := CIStr{} - a := A{} - err = newJob.DecodeArgs(&name, &a) - require.NoError(t, err) - require.Equal(t, NewCIStr(""), name) - require.Equal(t, A{Name: ""}, a) - require.Greater(t, len(newJob.String()), 0) - require.Equal(t, newJob.ReorgMeta.Location.Name, tzName) - require.Equal(t, newJob.ReorgMeta.Location.Offset, tzOffset) - - job.BinlogInfo.Clean() - b1, err := job.Encode(true) - require.NoError(t, err) - newJob = &Job{} - err = newJob.Decode(b1) - require.NoError(t, err) - require.Equal(t, &HistoryInfo{}, newJob.BinlogInfo) - name = CIStr{} - a = A{} - err = newJob.DecodeArgs(&name, &a) - require.NoError(t, err) - require.Equal(t, NewCIStr("a"), name) - require.Equal(t, A{Name: "abc"}, a) - require.Greater(t, len(newJob.String()), 0) - - b2, err := job.Encode(true) - require.NoError(t, err) - newJob = &Job{} - err = newJob.Decode(b2) - require.NoError(t, err) - name = CIStr{} - // Don't decode to a here. - err = newJob.DecodeArgs(&name) - require.NoError(t, err) - require.Equal(t, NewCIStr("a"), name) - require.Greater(t, len(newJob.String()), 0) - - job.State = JobStateDone - require.True(t, job.IsDone()) - require.True(t, job.IsFinished()) - require.False(t, job.IsRunning()) - require.False(t, job.IsSynced()) - require.False(t, job.IsRollbackDone()) - job.SetRowCount(3) - require.Equal(t, int64(3), job.GetRowCount()) -} - -func TestState(t *testing.T) { - schemaTbl := []SchemaState{ - StateDeleteOnly, - StateWriteOnly, - StateWriteReorganization, - StateDeleteReorganization, - StatePublic, - StateGlobalTxnOnly, - } - - for _, state := range schemaTbl { - require.Greater(t, len(state.String()), 0) - } - - jobTbl := []JobState{ - JobStateRunning, - JobStateDone, - JobStateCancelled, - JobStateRollingback, - JobStateRollbackDone, - JobStateSynced, - } - - for _, state := range jobTbl { - require.Greater(t, len(state.String()), 0) - } -} - -func TestString(t *testing.T) { - acts := []struct { - act ActionType - result string - }{ - {ActionNone, "none"}, - {ActionAddForeignKey, "add foreign key"}, - {ActionDropForeignKey, "drop foreign key"}, - {ActionTruncateTable, "truncate table"}, - {ActionModifyColumn, "modify column"}, - {ActionRenameTable, "rename table"}, - {ActionRenameTables, "rename tables"}, - {ActionSetDefaultValue, "set default value"}, - {ActionCreateSchema, "create schema"}, - {ActionDropSchema, "drop schema"}, - {ActionCreateTable, "create table"}, - {ActionDropTable, "drop table"}, - {ActionAddIndex, "add index"}, - {ActionDropIndex, "drop index"}, - {ActionAddColumn, "add column"}, - {ActionDropColumn, "drop column"}, - {ActionModifySchemaCharsetAndCollate, "modify schema charset and collate"}, - {ActionAlterTablePlacement, "alter table placement"}, - {ActionAlterTablePartitionPlacement, "alter table partition placement"}, - {ActionAlterNoCacheTable, "alter table nocache"}, - } - - for _, v := range acts { - str := v.act.String() - require.Equal(t, v.result, str) - } -} - func TestUnmarshalCIStr(t *testing.T) { var ci CIStr @@ -588,246 +45,3 @@ func TestUnmarshalCIStr(t *testing.T) { require.Equal(t, str, ci.O) require.Equal(t, "aabb", ci.L) } - -func TestDefaultValue(t *testing.T) { - srcCol := &ColumnInfo{ - ID: 1, - } - randPlainStr := "random_plain_string" - - oldPlainCol := srcCol.Clone() - oldPlainCol.Name = NewCIStr("oldPlainCol") - oldPlainCol.FieldType = *types.NewFieldType(mysql.TypeLong) - oldPlainCol.DefaultValue = randPlainStr - oldPlainCol.OriginDefaultValue = randPlainStr - - newPlainCol := srcCol.Clone() - newPlainCol.Name = NewCIStr("newPlainCol") - newPlainCol.FieldType = *types.NewFieldType(mysql.TypeLong) - err := newPlainCol.SetDefaultValue(1) - require.NoError(t, err) - require.Equal(t, 1, newPlainCol.GetDefaultValue()) - err = newPlainCol.SetDefaultValue(randPlainStr) - require.NoError(t, err) - require.Equal(t, randPlainStr, newPlainCol.GetDefaultValue()) - - randBitStr := string([]byte{25, 185}) - - oldBitCol := srcCol.Clone() - oldBitCol.Name = NewCIStr("oldBitCol") - oldBitCol.FieldType = *types.NewFieldType(mysql.TypeBit) - oldBitCol.DefaultValue = randBitStr - oldBitCol.OriginDefaultValue = randBitStr - - newBitCol := srcCol.Clone() - newBitCol.Name = NewCIStr("newBitCol") - newBitCol.FieldType = *types.NewFieldType(mysql.TypeBit) - err = newBitCol.SetDefaultValue(1) - // Only string type is allowed in BIT column. - require.Error(t, err) - require.Contains(t, err.Error(), "Invalid default value") - require.Equal(t, 1, newBitCol.GetDefaultValue()) - err = newBitCol.SetDefaultValue(randBitStr) - require.NoError(t, err) - require.Equal(t, randBitStr, newBitCol.GetDefaultValue()) - - nullBitCol := srcCol.Clone() - nullBitCol.Name = NewCIStr("nullBitCol") - nullBitCol.FieldType = *types.NewFieldType(mysql.TypeBit) - err = nullBitCol.SetOriginDefaultValue(nil) - require.NoError(t, err) - require.Nil(t, nullBitCol.GetOriginDefaultValue()) - - testCases := []struct { - col *ColumnInfo - isConsistent bool - }{ - {oldPlainCol, true}, - {oldBitCol, false}, - {newPlainCol, true}, - {newBitCol, true}, - {nullBitCol, true}, - } - for _, tc := range testCases { - col, isConsistent := tc.col, tc.isConsistent - comment := fmt.Sprintf("%s assertion failed", col.Name.O) - bytes, err := json.Marshal(col) - require.NoError(t, err, comment) - var newCol ColumnInfo - err = json.Unmarshal(bytes, &newCol) - require.NoError(t, err, comment) - if isConsistent { - require.Equal(t, col.GetDefaultValue(), newCol.GetDefaultValue(), comment) - require.Equal(t, col.GetOriginDefaultValue(), newCol.GetOriginDefaultValue(), comment) - } else { - require.NotEqual(t, col.GetDefaultValue(), newCol.GetDefaultValue(), comment) - require.NotEqual(t, col.GetOriginDefaultValue(), newCol.GetOriginDefaultValue(), comment) - } - } -} - -func TestPlacementSettingsString(t *testing.T) { - settings := &PlacementSettings{ - PrimaryRegion: "us-east-1", - Regions: "us-east-1,us-east-2", - Schedule: "EVEN", - } - require.Equal(t, "PRIMARY_REGION=\"us-east-1\" REGIONS=\"us-east-1,us-east-2\" SCHEDULE=\"EVEN\"", settings.String()) - - settings = &PlacementSettings{ - LeaderConstraints: "[+region=bj]", - } - require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=bj]\"", settings.String()) - - settings = &PlacementSettings{ - Voters: 1, - VoterConstraints: "[+region=us-east-1]", - Followers: 2, - FollowerConstraints: "[+disk=ssd]", - Learners: 3, - LearnerConstraints: "[+region=us-east-2]", - } - require.Equal(t, "VOTERS=1 VOTER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=2 FOLLOWER_CONSTRAINTS=\"[+disk=ssd]\" LEARNERS=3 LEARNER_CONSTRAINTS=\"[+region=us-east-2]\"", settings.String()) - - settings = &PlacementSettings{ - Voters: 3, - Followers: 2, - Learners: 1, - Constraints: "{\"+us-east-1\":1,+us-east-2:1}", - } - require.Equal(t, "CONSTRAINTS=\"{\\\"+us-east-1\\\":1,+us-east-2:1}\" VOTERS=3 FOLLOWERS=2 LEARNERS=1", settings.String()) -} - -func TestPlacementSettingsClone(t *testing.T) { - settings := &PlacementSettings{} - clonedSettings := settings.Clone() - clonedSettings.PrimaryRegion = "r1" - clonedSettings.Regions = "r1,r2" - clonedSettings.Followers = 1 - clonedSettings.Voters = 2 - clonedSettings.Followers = 3 - clonedSettings.Constraints = "[+zone=z1]" - clonedSettings.LearnerConstraints = "[+region=r1]" - clonedSettings.FollowerConstraints = "[+disk=ssd]" - clonedSettings.LeaderConstraints = "[+region=r2]" - clonedSettings.VoterConstraints = "[+zone=z2]" - clonedSettings.Schedule = "even" - require.Equal(t, PlacementSettings{}, *settings) -} - -func TestPlacementPolicyClone(t *testing.T) { - policy := &PolicyInfo{ - PlacementSettings: &PlacementSettings{}, - } - clonedPolicy := policy.Clone() - clonedPolicy.ID = 100 - clonedPolicy.Name = NewCIStr("p2") - clonedPolicy.State = StateDeleteOnly - clonedPolicy.PlacementSettings.Followers = 10 - - require.Equal(t, int64(0), policy.ID) - require.Equal(t, NewCIStr(""), policy.Name) - require.Equal(t, StateNone, policy.State) - require.Equal(t, PlacementSettings{}, *(policy.PlacementSettings)) -} - -func TestLocation(t *testing.T) { - // test offset = 0 - loc := &TimeZoneLocation{} - nLoc, err := loc.GetLocation() - require.NoError(t, err) - require.Equal(t, nLoc.String(), "UTC") - // test loc.location != nil - loc.Name = "Asia/Shanghai" - nLoc, err = loc.GetLocation() - require.NoError(t, err) - require.Equal(t, nLoc.String(), "UTC") - // timezone +05:00 - loc1 := &TimeZoneLocation{Name: "UTC", Offset: 18000} - loc1Byte, err := json.Marshal(loc1) - require.NoError(t, err) - loc2 := &TimeZoneLocation{} - err = json.Unmarshal(loc1Byte, loc2) - require.NoError(t, err) - require.Equal(t, loc2.Offset, loc1.Offset) - require.Equal(t, loc2.Name, loc1.Name) - nLoc, err = loc2.GetLocation() - require.NoError(t, err) - require.Equal(t, nLoc.String(), "UTC") - location := time.FixedZone("UTC", loc1.Offset) - require.Equal(t, nLoc, location) -} - -func TestIsIndexPrefixCovered(t *testing.T) { - c0 := newColumnForTest(0, 0) - c1 := newColumnForTest(1, 1) - c2 := newColumnForTest(2, 2) - c3 := newColumnForTest(3, 3) - c4 := newColumnForTest(4, 4) - - i0 := newIndexForTest(0, c0, c1, c2) - i1 := newIndexForTest(1, c4, c2) - - tbl := &TableInfo{ - ID: 1, - Name: NewCIStr("t"), - Columns: []*ColumnInfo{c0, c1, c2, c3, c4}, - Indices: []*IndexInfo{i0, i1}, - } - require.Equal(t, true, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_0"))) - require.Equal(t, true, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_0"), NewCIStr("c_1"), NewCIStr("c_2"))) - require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_1"))) - require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_2"))) - require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_1"), NewCIStr("c_2"))) - require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_0"), NewCIStr("c_2"))) - - require.Equal(t, true, IsIndexPrefixCovered(tbl, i1, NewCIStr("c_4"))) - require.Equal(t, true, IsIndexPrefixCovered(tbl, i1, NewCIStr("c_4"), NewCIStr("c_2"))) - require.Equal(t, false, IsIndexPrefixCovered(tbl, i0, NewCIStr("c_2"))) -} - -func TestTTLInfoClone(t *testing.T) { - ttlInfo := &TTLInfo{ - ColumnName: NewCIStr("test"), - IntervalExprStr: "test_expr", - IntervalTimeUnit: 5, - Enable: true, - } - - clonedTTLInfo := ttlInfo.Clone() - clonedTTLInfo.ColumnName = NewCIStr("test_2") - clonedTTLInfo.IntervalExprStr = "test_expr_2" - clonedTTLInfo.IntervalTimeUnit = 9 - clonedTTLInfo.Enable = false - - require.Equal(t, "test", ttlInfo.ColumnName.O) - require.Equal(t, "test_expr", ttlInfo.IntervalExprStr) - require.Equal(t, 5, ttlInfo.IntervalTimeUnit) - require.Equal(t, true, ttlInfo.Enable) -} - -func TestTTLJobInterval(t *testing.T) { - ttlInfo := &TTLInfo{} - - interval, err := ttlInfo.GetJobInterval() - require.NoError(t, err) - require.Equal(t, time.Hour, interval) - - ttlInfo = &TTLInfo{JobInterval: "200h"} - interval, err = ttlInfo.GetJobInterval() - require.NoError(t, err) - require.Equal(t, time.Hour*200, interval) -} - -func TestClearReorgIntermediateInfo(t *testing.T) { - ptInfo := &PartitionInfo{} - ptInfo.DDLType = PartitionTypeHash - ptInfo.DDLExpr = "Test DDL Expr" - ptInfo.NewTableID = 1111 - - ptInfo.ClearReorgIntermediateInfo() - require.Equal(t, PartitionTypeNone, ptInfo.DDLType) - require.Equal(t, "", ptInfo.DDLExpr) - require.Equal(t, true, ptInfo.DDLColumns == nil) - require.Equal(t, int64(0), ptInfo.NewTableID) -} diff --git a/pkg/parser/parser.go b/pkg/parser/parser.go index 5c8c5cc545a09..4e7f854190ac0 100644 --- a/pkg/parser/parser.go +++ b/pkg/parser/parser.go @@ -13375,7 +13375,7 @@ yynewstate: return 1 } parser.yyVAL.item = &ast.ResourceGroupRunawayOption{ - Tp: ast.RunawayRule, + Tp: model.RunawayRule, RuleOption: &ast.ResourceGroupRunawayRuleOption{ ExecElapsed: yyS[yypt-0].ident, }, @@ -13384,7 +13384,7 @@ yynewstate: case 25: { parser.yyVAL.item = &ast.ResourceGroupRunawayOption{ - Tp: ast.RunawayAction, + Tp: model.RunawayAction, ActionOption: yyS[yypt-0].item.(*ast.ResourceGroupRunawayActionOption), } } @@ -13402,7 +13402,7 @@ yynewstate: } } parser.yyVAL.item = &ast.ResourceGroupRunawayOption{ - Tp: ast.RunawayWatch, + Tp: model.RunawayWatch, WatchOption: &ast.ResourceGroupRunawayWatchOption{ Type: yyS[yypt-1].item.(model.RunawayWatchType), Duration: dur, diff --git a/pkg/parser/parser.y b/pkg/parser/parser.y index c5e68b56fd375..60024d376dbde 100644 --- a/pkg/parser/parser.y +++ b/pkg/parser/parser.y @@ -1834,7 +1834,7 @@ DirectResourceGroupRunawayOption: return 1 } $$ = &ast.ResourceGroupRunawayOption{ - Tp: ast.RunawayRule, + Tp: model.RunawayRule, RuleOption: &ast.ResourceGroupRunawayRuleOption{ ExecElapsed: $3, }, @@ -1843,7 +1843,7 @@ DirectResourceGroupRunawayOption: | "ACTION" EqOpt ResourceGroupRunawayActionOption { $$ = &ast.ResourceGroupRunawayOption{ - Tp: ast.RunawayAction, + Tp: model.RunawayAction, ActionOption: $3.(*ast.ResourceGroupRunawayActionOption), } } @@ -1861,7 +1861,7 @@ DirectResourceGroupRunawayOption: } } $$ = &ast.ResourceGroupRunawayOption{ - Tp: ast.RunawayWatch, + Tp: model.RunawayWatch, WatchOption: &ast.ResourceGroupRunawayWatchOption{ Type: $3.(model.RunawayWatchType), Duration: dur, diff --git a/pkg/planner/cardinality/BUILD.bazel b/pkg/planner/cardinality/BUILD.bazel index f470a3dc21486..b308d5479ae28 100644 --- a/pkg/planner/cardinality/BUILD.bazel +++ b/pkg/planner/cardinality/BUILD.bazel @@ -18,9 +18,9 @@ go_library( deps = [ "//pkg/expression", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/format", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/planner/context", "//pkg/planner/property", @@ -67,6 +67,7 @@ go_test( "//pkg/expression", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/planner/cardinality/row_count_test.go b/pkg/planner/cardinality/row_count_test.go index d1c436f9aede0..d205753e20a32 100644 --- a/pkg/planner/cardinality/row_count_test.go +++ b/pkg/planner/cardinality/row_count_test.go @@ -17,7 +17,7 @@ package cardinality import ( "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/planner/cardinality/selectivity.go b/pkg/planner/cardinality/selectivity.go index a23db7fe0e7f5..6241160461aad 100644 --- a/pkg/planner/cardinality/selectivity.go +++ b/pkg/planner/cardinality/selectivity.go @@ -22,8 +22,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/context" planutil "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/planner/util/debugtrace" diff --git a/pkg/planner/cardinality/selectivity_test.go b/pkg/planner/cardinality/selectivity_test.go index f5622d4c7d06f..1f61b3c3089e4 100644 --- a/pkg/planner/cardinality/selectivity_test.go +++ b/pkg/planner/cardinality/selectivity_test.go @@ -28,7 +28,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cardinality" plannercore "github.com/pingcap/tidb/pkg/planner/core" @@ -127,7 +128,7 @@ func TestOutOfRangeEstimation(t *testing.T) { testKit.MustExec("analyze table t with 2000 samples") h := dom.StatsHandle() - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl := h.GetTableStats(table.Meta()) sctx := mock.NewContext() @@ -230,7 +231,7 @@ func TestEstimationForUnknownValues(t *testing.T) { } require.Nil(t, h.DumpStatsDeltaToKV(true)) require.Nil(t, h.Update(context.Background(), dom.InfoSchema())) - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl := h.GetTableStats(table.Meta()) @@ -260,7 +261,7 @@ func TestEstimationForUnknownValues(t *testing.T) { testKit.MustExec("truncate table t") testKit.MustExec("insert into t values (null, null)") testKit.MustExec("analyze table t") - table, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl = h.GetTableStats(table.Meta()) @@ -273,7 +274,7 @@ func TestEstimationForUnknownValues(t *testing.T) { testKit.MustExec("create table t(a int, b int, index idx(b))") testKit.MustExec("insert into t values (1,1)") testKit.MustExec("analyze table t") - table, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl = h.GetTableStats(table.Meta()) @@ -296,7 +297,7 @@ func TestEstimationUniqueKeyEqualConds(t *testing.T) { testKit.MustExec("create table t(a int, b int, c int, unique key(b))") testKit.MustExec("insert into t values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7)") testKit.MustExec("analyze table t all columns with 4 cmsketch width, 1 cmsketch depth;") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl := dom.StatsHandle().GetTableStats(table.Meta()) @@ -486,7 +487,7 @@ func TestDNFCondSelectivity(t *testing.T) { ctx := context.Background() h := dom.StatsHandle() - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() statsTbl := h.GetTableStats(tblInfo) @@ -598,7 +599,7 @@ func TestSmallRangeEstimation(t *testing.T) { testKit.MustExec("analyze table t with 0 topn") h := dom.StatsHandle() - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl := h.GetTableStats(table.Meta()) sctx := mock.NewContext() @@ -682,7 +683,7 @@ func prepareSelectivity(testKit *testkit.TestKit, dom *domain.Domain) (*statisti testKit.MustExec("create table t(a int primary key, b int, c int, d int, e int, index idx_cd(c, d), index idx_de(d, e))") is := dom.InfoSchema() - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) if err != nil { return nil, err } @@ -906,7 +907,7 @@ func TestIssue39593(t *testing.T) { testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int, b int, index idx(a, b))") is := dom.InfoSchema() - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() @@ -956,7 +957,7 @@ func TestIndexJoinInnerRowCountUpperBound(t *testing.T) { testKit.MustExec("create table t(a int, b int, index idx(b))") require.NoError(t, h.HandleDDLEvent(<-h.DDLEventCh())) is := dom.InfoSchema() - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() @@ -1027,7 +1028,7 @@ func TestOrderingIdxSelectivityThreshold(t *testing.T) { testKit.MustExec("create table t(a int primary key , b int, c int, index ib(b), index ic(c))") require.NoError(t, h.HandleDDLEvent(<-h.DDLEventCh())) is := dom.InfoSchema() - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() @@ -1110,7 +1111,7 @@ func TestOrderingIdxSelectivityRatio(t *testing.T) { testKit.MustExec("create table t(a int primary key, b int, c int, index ib(b), index ic(c))") require.NoError(t, h.HandleDDLEvent(<-h.DDLEventCh())) is := dom.InfoSchema() - tb, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() @@ -1349,7 +1350,7 @@ func TestBuiltinInEstWithoutStats(t *testing.T) { tk.MustQuery("explain format='brief' select * from t where a in (1, 2, 3, 4, 5, 6, 7, 8)").Check(expectedA) tk.MustQuery("explain format='brief' select * from t where b in (1, 2, 3, 4, 5, 6, 7, 8)").Check(expectedB) require.NoError(t, h.Update(context.Background(), is)) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl, found := h.Get(tbl.Meta().ID) require.True(t, found) diff --git a/pkg/planner/cardinality/trace.go b/pkg/planner/cardinality/trace.go index e5d5a7e83ee01..42513d8c4f742 100644 --- a/pkg/planner/cardinality/trace.go +++ b/pkg/planner/cardinality/trace.go @@ -21,9 +21,9 @@ import ( perrors "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/planner/util/debugtrace" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" diff --git a/pkg/planner/cardinality/trace_test.go b/pkg/planner/cardinality/trace_test.go index e1759c6707a32..ac2ce80bc67ca 100644 --- a/pkg/planner/cardinality/trace_test.go +++ b/pkg/planner/cardinality/trace_test.go @@ -26,8 +26,9 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/cardinality" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -191,7 +192,7 @@ func TestTraceDebugSelectivity(t *testing.T) { require.NoError(t, err) sctx := tk.Session().(sessionctx.Context) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() statsTbl := statsHandle.GetTableStats(tblInfo) diff --git a/pkg/planner/cascades/BUILD.bazel b/pkg/planner/cascades/BUILD.bazel index 85e75e02e0023..f6e7fed103137 100644 --- a/pkg/planner/cascades/BUILD.bazel +++ b/pkg/planner/cascades/BUILD.bazel @@ -54,8 +54,8 @@ go_test( "//pkg/domain", "//pkg/expression", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser", - "//pkg/parser/model", "//pkg/planner/core", "//pkg/planner/core/base", "//pkg/planner/core/operator/logicalop", diff --git a/pkg/planner/cascades/optimize_test.go b/pkg/planner/cascades/optimize_test.go index 417c3ca72094e..12acd356290e9 100644 --- a/pkg/planner/cascades/optimize_test.go +++ b/pkg/planner/cascades/optimize_test.go @@ -22,8 +22,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" diff --git a/pkg/planner/cascades/stringer_test.go b/pkg/planner/cascades/stringer_test.go index d4773325ce014..092d3606c6921 100644 --- a/pkg/planner/cascades/stringer_test.go +++ b/pkg/planner/cascades/stringer_test.go @@ -20,8 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/planner/cascades/transformation_rules_test.go b/pkg/planner/cascades/transformation_rules_test.go index cbd526c56d4b4..36ee47625635f 100644 --- a/pkg/planner/cascades/transformation_rules_test.go +++ b/pkg/planner/cascades/transformation_rules_test.go @@ -20,8 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/planner/context/BUILD.bazel b/pkg/planner/context/BUILD.bazel index d1c00bad2ed94..b0a9cf7a57db6 100644 --- a/pkg/planner/context/BUILD.bazel +++ b/pkg/planner/context/BUILD.bazel @@ -10,7 +10,7 @@ go_library( "//pkg/infoschema/context", "//pkg/kv", "//pkg/lock/context", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/variable", "//pkg/util", "//pkg/util/context", diff --git a/pkg/planner/context/context.go b/pkg/planner/context/context.go index e6252f8383c01..a53f9e86d4965 100644 --- a/pkg/planner/context/context.go +++ b/pkg/planner/context/context.go @@ -19,7 +19,7 @@ import ( infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" tablelock "github.com/pingcap/tidb/pkg/lock/context" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util" contextutil "github.com/pingcap/tidb/pkg/util/context" diff --git a/pkg/planner/core/BUILD.bazel b/pkg/planner/core/BUILD.bazel index db230d9e40e04..960ad2f493817 100644 --- a/pkg/planner/core/BUILD.bazel +++ b/pkg/planner/core/BUILD.bazel @@ -112,6 +112,7 @@ go_library( "//pkg/lock", "//pkg/lock/context", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", @@ -277,6 +278,7 @@ go_test( "//pkg/expression/contextstatic", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", diff --git a/pkg/planner/core/access_object.go b/pkg/planner/core/access_object.go index 2be6b44fb1663..fb83b52b5ae92 100644 --- a/pkg/planner/core/access_object.go +++ b/pkg/planner/core/access_object.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tipb/go-tipb" diff --git a/pkg/planner/core/casetest/BUILD.bazel b/pkg/planner/core/casetest/BUILD.bazel index 4373abd827560..c6c0e0644fd65 100644 --- a/pkg/planner/core/casetest/BUILD.bazel +++ b/pkg/planner/core/casetest/BUILD.bazel @@ -16,6 +16,7 @@ go_test( deps = [ "//pkg/domain", "//pkg/errno", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/planner/core", diff --git a/pkg/planner/core/casetest/cbotest/BUILD.bazel b/pkg/planner/core/casetest/cbotest/BUILD.bazel index 375b5b2d221a5..cd8e47357b17a 100644 --- a/pkg/planner/core/casetest/cbotest/BUILD.bazel +++ b/pkg/planner/core/casetest/cbotest/BUILD.bazel @@ -14,6 +14,7 @@ go_test( deps = [ "//pkg/domain", "//pkg/executor", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner", "//pkg/planner/core", diff --git a/pkg/planner/core/casetest/cbotest/cbo_test.go b/pkg/planner/core/casetest/cbotest/cbo_test.go index 0e368d38b0a2e..9e43d42fe7448 100644 --- a/pkg/planner/core/casetest/cbotest/cbo_test.go +++ b/pkg/planner/core/casetest/cbotest/cbo_test.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/executor" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" @@ -558,7 +559,7 @@ func TestTiFlashCostModel(t *testing.T) { tk.MustExec("create table t (a int, b int, c int, primary key(a))") tk.MustExec("insert into t values(1,1,1), (2,2,2), (3,3,3)") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} diff --git a/pkg/planner/core/casetest/dag/BUILD.bazel b/pkg/planner/core/casetest/dag/BUILD.bazel index d48c0875135bf..8df6159b856ef 100644 --- a/pkg/planner/core/casetest/dag/BUILD.bazel +++ b/pkg/planner/core/casetest/dag/BUILD.bazel @@ -13,9 +13,9 @@ go_test( deps = [ "//pkg/domain", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/planner", "//pkg/planner/core", "//pkg/planner/core/resolve", diff --git a/pkg/planner/core/casetest/dag/dag_test.go b/pkg/planner/core/casetest/dag/dag_test.go index df494985154db..07bc329f1d67a 100644 --- a/pkg/planner/core/casetest/dag/dag_test.go +++ b/pkg/planner/core/casetest/dag/dag_test.go @@ -21,9 +21,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/planner/core/casetest/enforcempp/BUILD.bazel b/pkg/planner/core/casetest/enforcempp/BUILD.bazel index 4b641844323a2..65369c052c5e4 100644 --- a/pkg/planner/core/casetest/enforcempp/BUILD.bazel +++ b/pkg/planner/core/casetest/enforcempp/BUILD.bazel @@ -12,6 +12,7 @@ go_test( shard_count = 12, deps = [ "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/util/coretestsdk", "//pkg/testkit", diff --git a/pkg/planner/core/casetest/enforcempp/enforce_mpp_test.go b/pkg/planner/core/casetest/enforcempp/enforce_mpp_test.go index 4fc102016cab3..d8f3aa5124590 100644 --- a/pkg/planner/core/casetest/enforcempp/enforce_mpp_test.go +++ b/pkg/planner/core/casetest/enforcempp/enforce_mpp_test.go @@ -21,7 +21,8 @@ import ( "time" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/util/coretestsdk" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" @@ -52,7 +53,7 @@ func TestEnforceMPP(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) + db, exists := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) coretestsdk.SetTiFlashReplica(t, dom, db.Name.L, "t") coretestsdk.SetTiFlashReplica(t, dom, db.Name.L, "s") @@ -130,7 +131,7 @@ func TestEnforceMPPWarning1(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -169,7 +170,7 @@ func TestEnforceMPPWarning2(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -217,7 +218,7 @@ func TestEnforceMPPWarning3(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -374,7 +375,7 @@ func TestMPPSkewedGroupDistinctRewrite(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -422,7 +423,7 @@ func TestMPPSingleDistinct3Stage(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/casetest/flatplan/BUILD.bazel b/pkg/planner/core/casetest/flatplan/BUILD.bazel index fcd9da8b6af2b..0943092880c5d 100644 --- a/pkg/planner/core/casetest/flatplan/BUILD.bazel +++ b/pkg/planner/core/casetest/flatplan/BUILD.bazel @@ -13,8 +13,8 @@ go_test( "//pkg/config", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", - "//pkg/parser/model", "//pkg/planner", "//pkg/planner/core", "//pkg/planner/core/resolve", diff --git a/pkg/planner/core/casetest/flatplan/flat_plan_test.go b/pkg/planner/core/casetest/flatplan/flat_plan_test.go index f057165bf2f79..41c4cab825505 100644 --- a/pkg/planner/core/casetest/flatplan/flat_plan_test.go +++ b/pkg/planner/core/casetest/flatplan/flat_plan_test.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/planner/core/casetest/hint/BUILD.bazel b/pkg/planner/core/casetest/hint/BUILD.bazel index fb991010adaee..388a30b1ad4db 100644 --- a/pkg/planner/core/casetest/hint/BUILD.bazel +++ b/pkg/planner/core/casetest/hint/BUILD.bazel @@ -13,6 +13,7 @@ go_test( deps = [ "//pkg/config", "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/util/coretestsdk", "//pkg/sessionctx/variable", diff --git a/pkg/planner/core/casetest/hint/hint_test.go b/pkg/planner/core/casetest/hint/hint_test.go index aa671b836e4a9..3dbc953e8b364 100644 --- a/pkg/planner/core/casetest/hint/hint_test.go +++ b/pkg/planner/core/casetest/hint/hint_test.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/util/coretestsdk" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" @@ -87,7 +88,7 @@ func TestAllViewHintType(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -270,7 +271,7 @@ func TestOptimizeHintOnPartitionTable(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/casetest/integration_test.go b/pkg/planner/core/casetest/integration_test.go index 145969843f102..313f9bd48d667 100644 --- a/pkg/planner/core/casetest/integration_test.go +++ b/pkg/planner/core/casetest/integration_test.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/util/coretestsdk" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" @@ -87,7 +88,7 @@ func TestIsolationReadTiFlashNotChoosePointGet(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -175,7 +176,7 @@ func TestIssue31240(t *testing.T) { // since allow-mpp is adjusted to false, there will be no physical plan if TiFlash cop is banned. tk.MustExec("set @@session.tidb_allow_tiflash_cop=ON") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t31240", L: "t31240"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t31240", L: "t31240"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -228,9 +229,9 @@ func TestIssue32632(t *testing.T) { require.NoError(t, h.HandleDDLEvent(<-h.DDLEventCh())) tk.MustExec("set @@tidb_enforce_mpp = 1") - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "partsupp", L: "partsupp"}) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "partsupp", L: "partsupp"}) require.NoError(t, err) - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "supplier", L: "supplier"}) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "supplier", L: "supplier"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -274,9 +275,9 @@ func TestTiFlashPartitionTableScan(t *testing.T) { tk.MustExec("drop table if exists hp_t;") tk.MustExec("create table rp_t(a int) partition by RANGE (a) (PARTITION p0 VALUES LESS THAN (6),PARTITION p1 VALUES LESS THAN (11), PARTITION p2 VALUES LESS THAN (16), PARTITION p3 VALUES LESS THAN (21));") tk.MustExec("create table hp_t(a int) partition by hash(a) partitions 4;") - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "rp_t", L: "rp_t"}) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "rp_t", L: "rp_t"}) require.NoError(t, err) - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "hp_t", L: "hp_t"}) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "hp_t", L: "hp_t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -309,7 +310,7 @@ func TestTiFlashFineGrainedShuffle(t *testing.T) { tk.MustExec("drop table if exists t1;") tk.MustExec("create table t1(c1 int, c2 int)") - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t1", L: "t1"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -473,7 +474,7 @@ func TestTiFlashExtraColumnPrune(t *testing.T) { tk.MustExec("drop table if exists t1;") tk.MustExec("create table t1(c1 int, c2 int)") - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t1", L: "t1"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} diff --git a/pkg/planner/core/casetest/mpp/BUILD.bazel b/pkg/planner/core/casetest/mpp/BUILD.bazel index 052fd0d9e4eaf..7cf46e36a6980 100644 --- a/pkg/planner/core/casetest/mpp/BUILD.bazel +++ b/pkg/planner/core/casetest/mpp/BUILD.bazel @@ -13,6 +13,7 @@ go_test( deps = [ "//pkg/config", "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/util/coretestsdk", "//pkg/testkit", diff --git a/pkg/planner/core/casetest/mpp/mpp_test.go b/pkg/planner/core/casetest/mpp/mpp_test.go index 43f3c3b6320e4..bcc8508ddcf9a 100644 --- a/pkg/planner/core/casetest/mpp/mpp_test.go +++ b/pkg/planner/core/casetest/mpp/mpp_test.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/util/coretestsdk" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" @@ -89,7 +90,7 @@ func TestMPPLeftSemiJoin(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -613,7 +614,7 @@ func TestMppAggTopNWithJoin(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -651,7 +652,7 @@ func TestRejectSortForMPP(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -689,7 +690,7 @@ func TestPushDownSelectionForMPP(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -728,7 +729,7 @@ func TestPushDownProjectionForMPP(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -766,7 +767,7 @@ func TestPushDownAggForMPP(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -803,7 +804,7 @@ func TestMppVersion(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/casetest/physicalplantest/BUILD.bazel b/pkg/planner/core/casetest/physicalplantest/BUILD.bazel index bb2eafb9f75da..4dc70207d8987 100644 --- a/pkg/planner/core/casetest/physicalplantest/BUILD.bazel +++ b/pkg/planner/core/casetest/physicalplantest/BUILD.bazel @@ -16,9 +16,9 @@ go_test( "//pkg/domain", "//pkg/executor", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/planner", "//pkg/planner/core", "//pkg/planner/core/resolve", diff --git a/pkg/planner/core/casetest/physicalplantest/physical_plan_test.go b/pkg/planner/core/casetest/physicalplantest/physical_plan_test.go index ec027c73330bd..925d2acdca769 100644 --- a/pkg/planner/core/casetest/physicalplantest/physical_plan_test.go +++ b/pkg/planner/core/casetest/physicalplantest/physical_plan_test.go @@ -25,9 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/planner/core/casetest/plan_test.go b/pkg/planner/core/casetest/plan_test.go index 23ce5f3316c41..27a6e2d24c533 100644 --- a/pkg/planner/core/casetest/plan_test.go +++ b/pkg/planner/core/casetest/plan_test.go @@ -21,7 +21,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/testkit" @@ -239,7 +240,7 @@ func TestNormalizedPlanForDiffStore(t *testing.T) { tk.MustExec("drop table if exists t1") tk.MustExec("create table t1 (a int, b int, c int, primary key(a))") tk.MustExec("insert into t1 values(1,1,1), (2,2,2), (3,3,3)") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t1", L: "t1"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} diff --git a/pkg/planner/core/casetest/planstats/BUILD.bazel b/pkg/planner/core/casetest/planstats/BUILD.bazel index 3a67afd4fbe2d..e414ad5456309 100644 --- a/pkg/planner/core/casetest/planstats/BUILD.bazel +++ b/pkg/planner/core/casetest/planstats/BUILD.bazel @@ -14,6 +14,7 @@ go_test( "//pkg/config", "//pkg/domain", "//pkg/executor", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/planner", diff --git a/pkg/planner/core/casetest/planstats/plan_stats_test.go b/pkg/planner/core/casetest/planstats/plan_stats_test.go index cdd964173dfa1..d2deac5a13691 100644 --- a/pkg/planner/core/casetest/planstats/plan_stats_test.go +++ b/pkg/planner/core/casetest/planstats/plan_stats_test.go @@ -25,8 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/executor" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -217,7 +218,7 @@ func TestPlanStatsLoad(t *testing.T) { nodeW := resolve.NewNodeW(stmt) p, _, err := planner.Optimize(context.TODO(), ctx, nodeW, is) require.NoError(t, err) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() testCase.check(p, tableInfo) @@ -269,7 +270,7 @@ func TestPlanStatsLoadTimeout(t *testing.T) { tk.MustExec("analyze table t all columns") is := dom.InfoSchema() require.NoError(t, dom.StatsHandle().Update(context.Background(), is)) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() neededColumn := model.StatsLoadItem{TableItemID: model.TableItemID{TableID: tableInfo.ID, ID: tableInfo.Columns[0].ID, IsIndex: false}, FullLoad: true} @@ -365,7 +366,7 @@ func TestCollectDependingVirtualCols(t *testing.T) { tblName2TblID := make(map[string]int64) tblID2Tbl := make(map[int64]table.Table) for _, tblName := range tableNames { - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr(tblName)) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr(tblName)) require.NoError(t, err) tblName2TblID[tblName] = tbl.Meta().ID tblID2Tbl[tbl.Meta().ID] = tbl diff --git a/pkg/planner/core/casetest/pushdown/BUILD.bazel b/pkg/planner/core/casetest/pushdown/BUILD.bazel index a9bfc80ead97c..6288eefb214e5 100644 --- a/pkg/planner/core/casetest/pushdown/BUILD.bazel +++ b/pkg/planner/core/casetest/pushdown/BUILD.bazel @@ -12,6 +12,7 @@ go_test( shard_count = 6, deps = [ "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/util/coretestsdk", "//pkg/testkit", diff --git a/pkg/planner/core/casetest/pushdown/push_down_test.go b/pkg/planner/core/casetest/pushdown/push_down_test.go index 646c7bbcc1fd0..32e1ca33a3125 100644 --- a/pkg/planner/core/casetest/pushdown/push_down_test.go +++ b/pkg/planner/core/casetest/pushdown/push_down_test.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/util/coretestsdk" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" @@ -38,7 +39,7 @@ func TestPushDownToTiFlashWithKeepOrder(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -78,7 +79,7 @@ func TestPushDownToTiFlashWithKeepOrderInFastMode(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -151,7 +152,7 @@ func TestPushDownProjectionForTiFlashCoprocessor(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -187,7 +188,7 @@ func TestSelPushDownTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/collect_column_stats_usage.go b/pkg/planner/core/collect_column_stats_usage.go index 0bdf6be5a66ce..ab8af9b0b126f 100644 --- a/pkg/planner/core/collect_column_stats_usage.go +++ b/pkg/planner/core/collect_column_stats_usage.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/planner/core/collect_column_stats_usage_test.go b/pkg/planner/core/collect_column_stats_usage_test.go index e316eac2ab73c..92802c0dfbb3c 100644 --- a/pkg/planner/core/collect_column_stats_usage_test.go +++ b/pkg/planner/core/collect_column_stats_usage_test.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/planner/core/rule" @@ -37,7 +38,7 @@ func getColumnName(t *testing.T, is infoschema.InfoSchema, tblColID model.TableI tblInfo = tbl.Meta() prefix = tblInfo.Name.L + "." } else { - db, exists := is.SchemaByName(model.NewCIStr("test")) + db, exists := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists, comment) tblInfos, err := is.SchemaTableInfos(context.Background(), db.Name) require.NoError(t, err) diff --git a/pkg/planner/core/common_plans.go b/pkg/planner/core/common_plans.go index 436e42974f7ed..24376090463dd 100644 --- a/pkg/planner/core/common_plans.go +++ b/pkg/planner/core/common_plans.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/physicalop" @@ -539,7 +540,7 @@ type V2AnalyzeOptions struct { PhyTableID int64 RawOpts map[ast.AnalyzeOptionType]uint64 FilledOpts map[ast.AnalyzeOptionType]uint64 - ColChoice model.ColumnChoice + ColChoice pmodel.ColumnChoice ColumnList []*model.ColumnInfo IsPartition bool } @@ -670,7 +671,7 @@ type SplitRegion struct { baseSchemaProducer TableInfo *model.TableInfo - PartitionNames []model.CIStr + PartitionNames []pmodel.CIStr IndexInfo *model.IndexInfo Lower []types.Datum Upper []types.Datum @@ -692,7 +693,7 @@ type CompactTable struct { ReplicaKind ast.CompactReplicaKind TableInfo *model.TableInfo - PartitionNames []model.CIStr + PartitionNames []pmodel.CIStr } // DDL represents a DDL statement plan. diff --git a/pkg/planner/core/debugtrace.go b/pkg/planner/core/debugtrace.go index babda2b1d4551..688f724524cb3 100644 --- a/pkg/planner/core/debugtrace.go +++ b/pkg/planner/core/debugtrace.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/planner/core/base" diff --git a/pkg/planner/core/enforce_mpp_test.go b/pkg/planner/core/enforce_mpp_test.go index 896958fa59705..92fc684ae3043 100644 --- a/pkg/planner/core/enforce_mpp_test.go +++ b/pkg/planner/core/enforce_mpp_test.go @@ -21,7 +21,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" ) @@ -39,7 +40,7 @@ func TestRowSizeInMPP(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) + db, exists := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) tblInfos, err := is.SchemaTableInfos(context.Background(), db.Name) require.NoError(t, err) diff --git a/pkg/planner/core/exhaust_physical_plans.go b/pkg/planner/core/exhaust_physical_plans.go index b4655e9ef9d46..6583a0650fde6 100644 --- a/pkg/planner/core/exhaust_physical_plans.go +++ b/pkg/planner/core/exhaust_physical_plans.go @@ -25,8 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/core/base" diff --git a/pkg/planner/core/expression_rewriter.go b/pkg/planner/core/expression_rewriter.go index 236b8c8ea7a1c..7c75b71e28ef3 100644 --- a/pkg/planner/core/expression_rewriter.go +++ b/pkg/planner/core/expression_rewriter.go @@ -30,9 +30,10 @@ import ( "github.com/pingcap/tidb/pkg/expression/contextopt" "github.com/pingcap/tidb/pkg/infoschema" infoschemactx "github.com/pingcap/tidb/pkg/infoschema/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -2573,7 +2574,7 @@ func (er *expressionRewriter) evalDefaultExprWithPlanCtx(planCtx *exprRewriterPl dbName := name.DBName if dbName.O == "" { // if database name is not specified, use current database name - dbName = model.NewCIStr(planCtx.builder.ctx.GetSessionVars().CurrentDB) + dbName = pmodel.NewCIStr(planCtx.builder.ctx.GetSessionVars().CurrentDB) } if name.OrigTblName.O == "" { // column is evaluated by some expressions, for example: diff --git a/pkg/planner/core/expression_test.go b/pkg/planner/core/expression_test.go index c85ccc1f2ea17..27b630008533f 100644 --- a/pkg/planner/core/expression_test.go +++ b/pkg/planner/core/expression_test.go @@ -23,10 +23,11 @@ import ( "github.com/pingcap/tidb/pkg/expression/context" "github.com/pingcap/tidb/pkg/expression/contextopt" "github.com/pingcap/tidb/pkg/expression/contextstatic" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit/testutil" @@ -385,7 +386,7 @@ func TestBuildExpression(t *testing.T) { tbl := &model.TableInfo{ Columns: []*model.ColumnInfo{ { - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), Offset: 0, State: model.StatePublic, FieldType: *types.NewFieldType(mysql.TypeString), @@ -393,13 +394,13 @@ func TestBuildExpression(t *testing.T) { DefaultValue: "uuid()", }, { - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), Offset: 1, State: model.StatePublic, FieldType: *types.NewFieldType(mysql.TypeLonglong), }, { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Offset: 2, State: model.StatePublic, FieldType: *types.NewFieldType(mysql.TypeLonglong), @@ -410,7 +411,7 @@ func TestBuildExpression(t *testing.T) { ctx := contextstatic.NewStaticExprContext() evalCtx := ctx.GetStaticEvalCtx() - cols, names, err := expression.ColumnInfos2ColumnsAndNames(ctx, model.NewCIStr(""), tbl.Name, tbl.Cols(), tbl) + cols, names, err := expression.ColumnInfos2ColumnsAndNames(ctx, pmodel.NewCIStr(""), tbl.Name, tbl.Cols(), tbl) require.NoError(t, err) schema := expression.NewSchema(cols...) diff --git a/pkg/planner/core/find_best_task.go b/pkg/planner/core/find_best_task.go index 48334328d9623..af68575143be2 100644 --- a/pkg/planner/core/find_best_task.go +++ b/pkg/planner/core/find_best_task.go @@ -26,8 +26,8 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -2178,7 +2178,7 @@ func (is *PhysicalIndexScan) initSchema(idxExprCols []*expression.Column, isDoub RetType: types.NewFieldType(mysql.TypeLonglong), ID: model.ExtraPhysTblID, UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), - OrigName: model.ExtraPhysTblIdName.O, + OrigName: model.ExtraPhysTblIDName.O, }) } } @@ -2203,7 +2203,7 @@ func (is *PhysicalIndexScan) addSelectionConditionForGlobalIndex(p *DataSource, } if len(args) != 1 { - return nil, errors.Errorf("Can't find column %s in schema %s", model.ExtraPhysTblIdName.O, is.schema) + return nil, errors.Errorf("Can't find column %s in schema %s", model.ExtraPhysTblIDName.O, is.schema) } // For SQL like 'select x from t partition(p0, p1) use index(idx)', diff --git a/pkg/planner/core/foreign_key.go b/pkg/planner/core/foreign_key.go index 34b96e16b5882..c1cdd302c60c0 100644 --- a/pkg/planner/core/foreign_key.go +++ b/pkg/planner/core/foreign_key.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/physicalop" @@ -36,7 +37,7 @@ type FKCheck struct { ReferredFK *model.ReferredFKInfo Tbl table.Table Idx table.Index - Cols []model.CIStr + Cols []pmodel.CIStr IdxIsPrimaryKey bool IdxIsExclusive bool @@ -122,9 +123,9 @@ func (f *FKCascade) AccessObject() base.AccessObject { func (f *FKCascade) OperatorInfo(bool) string { switch f.Tp { case FKCascadeOnDelete: - return fmt.Sprintf("foreign_key:%s, on_delete:%s", f.FK.Name, model.ReferOptionType(f.FK.OnDelete).String()) + return fmt.Sprintf("foreign_key:%s, on_delete:%s", f.FK.Name, pmodel.ReferOptionType(f.FK.OnDelete).String()) case FKCascadeOnUpdate: - return fmt.Sprintf("foreign_key:%s, on_update:%s", f.FK.Name, model.ReferOptionType(f.FK.OnUpdate).String()) + return fmt.Sprintf("foreign_key:%s, on_update:%s", f.FK.Name, pmodel.ReferOptionType(f.FK.OnUpdate).String()) } return "" } @@ -383,19 +384,19 @@ func buildOnDeleteOrUpdateFKTrigger(ctx base.PlanContext, is infoschema.InfoSche if fk == nil || fk.Version < 1 { return nil, nil, nil } - var fkReferOption model.ReferOptionType + var fkReferOption pmodel.ReferOptionType if fk.State != model.StatePublic { - fkReferOption = model.ReferOptionRestrict + fkReferOption = pmodel.ReferOptionRestrict } else { switch tp { case FKCascadeOnDelete: - fkReferOption = model.ReferOptionType(fk.OnDelete) + fkReferOption = pmodel.ReferOptionType(fk.OnDelete) case FKCascadeOnUpdate: - fkReferOption = model.ReferOptionType(fk.OnUpdate) + fkReferOption = pmodel.ReferOptionType(fk.OnUpdate) } } switch fkReferOption { - case model.ReferOptionCascade, model.ReferOptionSetNull: + case pmodel.ReferOptionCascade, pmodel.ReferOptionSetNull: fkCascade, err := buildFKCascade(ctx, tp, referredFK, childTable, fk) return nil, fkCascade, err default: @@ -404,7 +405,7 @@ func buildOnDeleteOrUpdateFKTrigger(ctx base.PlanContext, is infoschema.InfoSche } } -func isMapContainAnyCols(colsMap map[string]struct{}, cols ...model.CIStr) bool { +func isMapContainAnyCols(colsMap map[string]struct{}, cols ...pmodel.CIStr) bool { for _, col := range cols { _, exist := colsMap[col.L] if exist { @@ -439,7 +440,7 @@ func buildFKCheckForReferredFK(ctx base.PlanContext, childTable table.Table, fk return fkCheck, nil } -func buildFKCheck(ctx base.PlanContext, tbl table.Table, cols []model.CIStr, failedErr error) (*FKCheck, error) { +func buildFKCheck(ctx base.PlanContext, tbl table.Table, cols []pmodel.CIStr, failedErr error) (*FKCheck, error) { tblInfo := tbl.Meta() if tblInfo.PKIsHandle && len(cols) == 1 { refColInfo := model.FindColumnInfo(tblInfo.Columns, cols[0].L) diff --git a/pkg/planner/core/indexmerge_path.go b/pkg/planner/core/indexmerge_path.go index 4364be98a3429..62844e1958050 100644 --- a/pkg/planner/core/indexmerge_path.go +++ b/pkg/planner/core/indexmerge_path.go @@ -23,9 +23,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/context" diff --git a/pkg/planner/core/indexmerge_test.go b/pkg/planner/core/indexmerge_test.go index 7d495c24ec318..75068e7e25aa5 100644 --- a/pkg/planner/core/indexmerge_test.go +++ b/pkg/planner/core/indexmerge_test.go @@ -22,8 +22,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/planner/util" diff --git a/pkg/planner/core/indexmerge_unfinished_path.go b/pkg/planner/core/indexmerge_unfinished_path.go index c22ea11e4d3b7..d641bdfb7bb29 100644 --- a/pkg/planner/core/indexmerge_unfinished_path.go +++ b/pkg/planner/core/indexmerge_unfinished_path.go @@ -19,7 +19,7 @@ import ( "slices" "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/util" ) diff --git a/pkg/planner/core/integration_test.go b/pkg/planner/core/integration_test.go index 9151a28fc11df..d0497f1606bb3 100644 --- a/pkg/planner/core/integration_test.go +++ b/pkg/planner/core/integration_test.go @@ -29,8 +29,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/util/coretestsdk" @@ -83,7 +84,7 @@ func TestAggPushDownEngine(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -127,9 +128,9 @@ func TestIssue15110And49616(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) + db, exists := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) - tblInfo, err := is.TableByName(context.Background(), db.Name, model.NewCIStr("crm_rd_150m")) + tblInfo, err := is.TableByName(context.Background(), db.Name, pmodel.NewCIStr("crm_rd_150m")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -158,12 +159,12 @@ func TestPartitionPruningForEQ(t *testing.T) { tk.MustExec("create table t(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(10), partition p1 values less than (100))") is := tk.Session().GetInfoSchema().(infoschema.InfoSchema) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) pt := tbl.(table.PartitionedTable) query, err := expression.ParseSimpleExpr(tk.Session().GetExprCtx(), "a = '2020-01-01 00:00:00'", expression.WithTableInfo("", tbl.Meta())) require.NoError(t, err) - dbName := model.NewCIStr(tk.Session().GetSessionVars().CurrentDB) + dbName := pmodel.NewCIStr(tk.Session().GetSessionVars().CurrentDB) columns, names, err := expression.ColumnInfos2ColumnsAndNames(tk.Session().GetExprCtx(), dbName, tbl.Meta().Name, tbl.Meta().Cols(), tbl.Meta()) require.NoError(t, err) // Even the partition is not monotonous, EQ condition should be prune! @@ -185,7 +186,7 @@ func TestNotReadOnlySQLOnTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -217,7 +218,7 @@ func TestTimeToSecPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -247,7 +248,7 @@ func TestRightShiftPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -683,7 +684,7 @@ func TestReverseUTF8PushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -713,7 +714,7 @@ func TestReversePushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -743,7 +744,7 @@ func TestSpacePushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -879,7 +880,7 @@ func TestConflictReadFromStorage(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -941,7 +942,7 @@ func TestIssue31202(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t31202(a int primary key, b int);") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t31202", L: "t31202"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t31202", L: "t31202"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -998,7 +999,7 @@ func TestTiFlashFineGrainedShuffleWithMaxTiFlashThreads(t *testing.T) { tk.MustExec("set @@tidb_enforce_mpp = on") tk.MustExec("drop table if exists t1;") tk.MustExec("create table t1(c1 int, c2 int)") - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t1", L: "t1"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1187,7 +1188,7 @@ func TestRepeatPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1222,7 +1223,7 @@ func TestIssue36194(t *testing.T) { // create virtual tiflash replica. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1247,7 +1248,7 @@ func TestGetFormatPushDownToTiFlash(t *testing.T) { tk.MustExec("set @@tidb_enforce_mpp=1;") tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash';") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1274,7 +1275,7 @@ func TestAggWithJsonPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1319,7 +1320,7 @@ func TestLeftShiftPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1346,7 +1347,7 @@ func TestHexIntOrStrPushDownToTiFlash(t *testing.T) { tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1;") tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1379,7 +1380,7 @@ func TestBinPushDownToTiFlash(t *testing.T) { tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1;") tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1407,7 +1408,7 @@ func TestEltPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1436,7 +1437,7 @@ func TestRegexpInstrPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1465,7 +1466,7 @@ func TestRegexpSubstrPushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1494,7 +1495,7 @@ func TestRegexpReplacePushDownToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1527,7 +1528,7 @@ func TestCastTimeAsDurationToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1554,7 +1555,7 @@ func TestUnhexPushDownToTiFlash(t *testing.T) { tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1;") tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1587,7 +1588,7 @@ func TestLeastGretestStringPushDownToTiFlash(t *testing.T) { tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1") tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1632,12 +1633,12 @@ func TestTiFlashReadForWriteStmt(t *testing.T) { tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 tidb_enable_tiflash_read_for_write_stmt is always turned on. This variable has been deprecated and will be removed in the future releases")) tk.MustQuery("select @@tidb_enable_tiflash_read_for_write_stmt").Check(testkit.Rows("1")) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t2", L: "t2"}) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t2", L: "t2"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl2.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1693,11 +1694,11 @@ func TestPointGetWithSelectLock(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t(a int, b int, primary key(a, b));") tk.MustExec("create table t1(c int unique, d int);") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t", L: "t"}) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t", L: "t"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.CIStr{O: "test", L: "test"}, pmodel.CIStr{O: "t1", L: "t1"}) require.NoError(t, err) // Set the hacked TiFlash replica for explain tests. tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} @@ -1905,7 +1906,7 @@ func TestIsIPv4ToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1936,7 +1937,7 @@ func TestIsIPv6ToTiFlash(t *testing.T) { // Create virtual tiflash replica info. is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, @@ -1994,7 +1995,7 @@ func TestVirtualExprPushDown(t *testing.T) { tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1") tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/logical_datasource.go b/pkg/planner/core/logical_datasource.go index 5433f098cee48..4f4660d5c6b30 100644 --- a/pkg/planner/core/logical_datasource.go +++ b/pkg/planner/core/logical_datasource.go @@ -22,8 +22,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -58,9 +59,9 @@ type DataSource struct { table table.Table TableInfo *model.TableInfo Columns []*model.ColumnInfo - DBName model.CIStr + DBName pmodel.CIStr - TableAsName *model.CIStr + TableAsName *pmodel.CIStr // IndexMergeHints are the hint for indexmerge. IndexMergeHints []h.HintedIndex // PushedDownConds are the conditions that will be pushed down to coprocessor. @@ -78,7 +79,7 @@ type DataSource struct { // The data source may be a partition, rather than a real table. PartitionDefIdx *int PhysicalTableID int64 - PartitionNames []model.CIStr + PartitionNames []pmodel.CIStr // handleCol represents the handle column for the datasource, either the // int primary key column or extra handle column. @@ -97,7 +98,7 @@ type DataSource struct { // PreferStoreType means the DataSource is enforced to which storage. PreferStoreType int // PreferPartitions store the map, the key represents store type, the value represents the partition name list. - PreferPartitions map[int][]model.CIStr + PreferPartitions map[int][]pmodel.CIStr SampleInfo *tablesampler.TableSampleInfo IS infoschema.InfoSchema // IsForUpdateRead should be true in either of the following situations diff --git a/pkg/planner/core/logical_index_scan.go b/pkg/planner/core/logical_index_scan.go index e400c6733221b..73acfc4e3c0cf 100644 --- a/pkg/planner/core/logical_index_scan.go +++ b/pkg/planner/core/logical_index_scan.go @@ -19,7 +19,7 @@ import ( "fmt" "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" diff --git a/pkg/planner/core/logical_plan_builder.go b/pkg/planner/core/logical_plan_builder.go index c6cf39519f89b..da8b65fc65756 100644 --- a/pkg/planner/core/logical_plan_builder.go +++ b/pkg/planner/core/logical_plan_builder.go @@ -34,11 +34,12 @@ import ( exprctx "github.com/pingcap/tidb/pkg/expression/context" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/parser/terror" @@ -981,7 +982,7 @@ func (b *PlanBuilder) buildSelection(ctx context.Context, p base.LogicalPlan, wh } // buildProjectionFieldNameFromColumns builds the field name, table name and database name when field expression is a column reference. -func (*PlanBuilder) buildProjectionFieldNameFromColumns(origField *ast.SelectField, colNameField *ast.ColumnNameExpr, name *types.FieldName) (colName, origColName, tblName, origTblName, dbName model.CIStr) { +func (*PlanBuilder) buildProjectionFieldNameFromColumns(origField *ast.SelectField, colNameField *ast.ColumnNameExpr, name *types.FieldName) (colName, origColName, tblName, origTblName, dbName pmodel.CIStr) { origTblName, origColName, dbName = name.OrigTblName, name.OrigColName, name.DBName if origField.AsName.L == "" { colName = colNameField.Name.Name @@ -997,7 +998,7 @@ func (*PlanBuilder) buildProjectionFieldNameFromColumns(origField *ast.SelectFie } // buildProjectionFieldNameFromExpressions builds the field name when field expression is a normal expression. -func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(_ context.Context, field *ast.SelectField) (model.CIStr, error) { +func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(_ context.Context, field *ast.SelectField) (pmodel.CIStr, error) { if agg, ok := field.Expr.(*ast.AggregateFuncExpr); ok && agg.F == ast.AggFuncFirstRow { // When the query is select t.a from t group by a; The Column Name should be a but not t.a; return agg.Args[0].(*ast.ColumnNameExpr).Name.Name, nil @@ -1010,16 +1011,16 @@ func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(_ context.Context, if isFuncCall && funcCall.FnName.L == ast.NameConst { if v, err := evalAstExpr(b.ctx.GetExprCtx(), funcCall.Args[0]); err == nil { if s, err := v.ToString(); err == nil { - return model.NewCIStr(s), nil + return pmodel.NewCIStr(s), nil } } - return model.NewCIStr(""), plannererrors.ErrWrongArguments.GenWithStackByArgs("NAME_CONST") + return pmodel.NewCIStr(""), plannererrors.ErrWrongArguments.GenWithStackByArgs("NAME_CONST") } valueExpr, isValueExpr := innerExpr.(*driver.ValueExpr) // Non-literal: Output as inputed, except that comments need to be removed. if !isValueExpr { - return model.NewCIStr(parser.SpecFieldPattern.ReplaceAllStringFunc(field.Text(), parser.TrimComment)), nil + return pmodel.NewCIStr(parser.SpecFieldPattern.ReplaceAllStringFunc(field.Text(), parser.TrimComment)), nil } // Literal: Need special processing @@ -1035,21 +1036,21 @@ func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(_ context.Context, fieldName := strings.TrimLeftFunc(projName, func(r rune) bool { return !unicode.IsOneOf(mysql.RangeGraph, r) }) - return model.NewCIStr(fieldName), nil + return pmodel.NewCIStr(fieldName), nil case types.KindNull: // See #4053, #3685 - return model.NewCIStr("NULL"), nil + return pmodel.NewCIStr("NULL"), nil case types.KindBinaryLiteral: // Don't rewrite BIT literal or HEX literals - return model.NewCIStr(field.Text()), nil + return pmodel.NewCIStr(field.Text()), nil case types.KindInt64: // See #9683 // TRUE or FALSE can be a int64 if mysql.HasIsBooleanFlag(valueExpr.Type.GetFlag()) { if i := valueExpr.GetValue().(int64); i == 0 { - return model.NewCIStr("FALSE"), nil + return pmodel.NewCIStr("FALSE"), nil } - return model.NewCIStr("TRUE"), nil + return pmodel.NewCIStr("TRUE"), nil } fallthrough @@ -1057,24 +1058,24 @@ func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(_ context.Context, fieldName := field.Text() fieldName = strings.TrimLeft(fieldName, "\t\n +(") fieldName = strings.TrimRight(fieldName, "\t\n )") - return model.NewCIStr(fieldName), nil + return pmodel.NewCIStr(fieldName), nil } } func buildExpandFieldName(ctx expression.EvalContext, expr expression.Expression, name *types.FieldName, genName string) *types.FieldName { _, isCol := expr.(*expression.Column) - var origTblName, origColName, dbName, colName, tblName model.CIStr + var origTblName, origColName, dbName, colName, tblName pmodel.CIStr if genName != "" { // for case like: gid_, gpos_ - colName = model.NewCIStr(expr.StringWithCtx(ctx, errors.RedactLogDisable)) + colName = pmodel.NewCIStr(expr.StringWithCtx(ctx, errors.RedactLogDisable)) } else if isCol { // col ref to original col, while its nullability may be changed. origTblName, origColName, dbName = name.OrigTblName, name.OrigColName, name.DBName - colName = model.NewCIStr("ex_" + name.ColName.O) - tblName = model.NewCIStr("ex_" + name.TblName.O) + colName = pmodel.NewCIStr("ex_" + name.ColName.O) + tblName = pmodel.NewCIStr("ex_" + name.TblName.O) } else { // Other: complicated expression. - colName = model.NewCIStr("ex_" + expr.StringWithCtx(ctx, errors.RedactLogDisable)) + colName = pmodel.NewCIStr("ex_" + expr.StringWithCtx(ctx, errors.RedactLogDisable)) } newName := &types.FieldName{ TblName: tblName, @@ -1088,7 +1089,7 @@ func buildExpandFieldName(ctx expression.EvalContext, expr expression.Expression // buildProjectionField builds the field object according to SelectField in projection. func (b *PlanBuilder) buildProjectionField(ctx context.Context, p base.LogicalPlan, field *ast.SelectField, expr expression.Expression) (*expression.Column, *types.FieldName, error) { - var origTblName, tblName, origColName, colName, dbName model.CIStr + var origTblName, tblName, origColName, colName, dbName pmodel.CIStr innerNode := getInnerFromParenthesesAndUnaryPlus(field.Expr) col, isCol := expr.(*expression.Column) // Correlated column won't affect the final output names. So we can put it in any of the three logic block. @@ -2277,7 +2278,7 @@ func (a *havingWindowAndOrderbyExprResolver) Leave(n ast.Node) (node ast.Node, o a.selectFields = append(a.selectFields, &ast.SelectField{ Auxiliary: true, Expr: v, - AsName: model.NewCIStr(fmt.Sprintf("sel_agg_%d", len(a.selectFields))), + AsName: pmodel.NewCIStr(fmt.Sprintf("sel_agg_%d", len(a.selectFields))), }) case *ast.WindowFuncExpr: a.inWindowFunc = false @@ -2289,7 +2290,7 @@ func (a *havingWindowAndOrderbyExprResolver) Leave(n ast.Node) (node ast.Node, o a.selectFields = append(a.selectFields, &ast.SelectField{ Auxiliary: true, Expr: v, - AsName: model.NewCIStr(fmt.Sprintf("sel_window_%d", len(a.selectFields))), + AsName: pmodel.NewCIStr(fmt.Sprintf("sel_window_%d", len(a.selectFields))), }) } case *ast.WindowSpec: @@ -2818,7 +2819,7 @@ func (b *PlanBuilder) resolveCorrelatedAggregates(ctx context.Context, sel *ast. sel.Fields.Fields = append(sel.Fields.Fields, &ast.SelectField{ Auxiliary: true, Expr: aggFunc, - AsName: model.NewCIStr(fmt.Sprintf("sel_subq_agg_%d", len(sel.Fields.Fields))), + AsName: pmodel.NewCIStr(fmt.Sprintf("sel_subq_agg_%d", len(sel.Fields.Fields))), }) } return correlatedAggMap, nil @@ -3597,9 +3598,9 @@ func (b *PlanBuilder) addAliasName(ctx context.Context, selectStmt *ast.SelectSt oldName := field.AsName if dup, ok := dedupMap[field.AsName.L]; ok { if dup == 0 { - field.AsName = model.NewCIStr(fmt.Sprintf("Name_exp_%s", field.AsName.O)) + field.AsName = pmodel.NewCIStr(fmt.Sprintf("Name_exp_%s", field.AsName.O)) } else { - field.AsName = model.NewCIStr(fmt.Sprintf("Name_exp_%d_%s", dup, field.AsName.O)) + field.AsName = pmodel.NewCIStr(fmt.Sprintf("Name_exp_%d_%s", dup, field.AsName.O)) } dedupMap[oldName.L] = dup + 1 } else { @@ -4024,7 +4025,7 @@ func (ds *DataSource) AddExtraPhysTblIDColumn() *expression.Column { RetType: types.NewFieldType(mysql.TypeLonglong), UniqueID: ds.SCtx().GetSessionVars().AllocPlanColumnID(), ID: model.ExtraPhysTblID, - OrigName: fmt.Sprintf("%v.%v.%v", ds.DBName, ds.TableInfo.Name, model.ExtraPhysTblIdName), + OrigName: fmt.Sprintf("%v.%v.%v", ds.DBName, ds.TableInfo.Name, model.ExtraPhysTblIDName), } ds.Columns = append(ds.Columns, model.NewExtraPhysTblIDColInfo()) @@ -4033,8 +4034,8 @@ func (ds *DataSource) AddExtraPhysTblIDColumn() *expression.Column { ds.SetOutputNames(append(ds.OutputNames(), &types.FieldName{ DBName: ds.DBName, TblName: ds.TableInfo.Name, - ColName: model.ExtraPhysTblIdName, - OrigColName: model.ExtraPhysTblIdName, + ColName: model.ExtraPhysTblIDName, + OrigColName: model.ExtraPhysTblIDName, })) ds.TblCols = append(ds.TblCols, pidCol) return pidCol @@ -4168,7 +4169,7 @@ func getLatestVersionFromStatsTable(ctx sessionctx.Context, tblInfo *model.Table return version } -func (b *PlanBuilder) tryBuildCTE(ctx context.Context, tn *ast.TableName, asName *model.CIStr) (base.LogicalPlan, error) { +func (b *PlanBuilder) tryBuildCTE(ctx context.Context, tn *ast.TableName, asName *pmodel.CIStr) (base.LogicalPlan, error) { for i := len(b.outerCTEs) - 1; i >= 0; i-- { cte := b.outerCTEs[i] if cte.def.Name.L == tn.Name.L { @@ -4314,7 +4315,7 @@ func (b *PlanBuilder) buildDataSourceFromCTEMerge(ctx context.Context, cte *ast. outPutNames := p.OutputNames() for _, name := range outPutNames { name.TblName = cte.Name - name.DBName = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) + name.DBName = pmodel.NewCIStr(b.ctx.GetSessionVars().CurrentDB) } if len(cte.ColNameList) > 0 { @@ -4329,7 +4330,7 @@ func (b *PlanBuilder) buildDataSourceFromCTEMerge(ctx context.Context, cte *ast. return p, nil } -func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, asName *model.CIStr) (base.LogicalPlan, error) { +func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, asName *pmodel.CIStr) (base.LogicalPlan, error) { b.optFlag |= rule.FlagPredicateSimplification dbName := tn.Schema sessionVars := b.ctx.GetSessionVars() @@ -4340,7 +4341,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as if err != nil || p != nil { return p, err } - dbName = model.NewCIStr(sessionVars.CurrentDB) + dbName = pmodel.NewCIStr(sessionVars.CurrentDB) } is := b.is @@ -4582,7 +4583,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as Columns: make([]*model.ColumnInfo, 0, len(columns)), PartitionNames: tn.PartitionNames, TblCols: make([]*expression.Column, 0, len(columns)), - PreferPartitions: make(map[int][]model.CIStr), + PreferPartitions: make(map[int][]pmodel.CIStr), IS: b.is, IsForUpdateRead: b.isForUpdateRead, }.Init(b.ctx, b.getSelectOffset()) @@ -4744,7 +4745,7 @@ func (b *PlanBuilder) timeRangeForSummaryTable() util.QueryTimeRange { return util.QueryTimeRange{From: from, To: to} } -func (b *PlanBuilder) buildMemTable(_ context.Context, dbName model.CIStr, tableInfo *model.TableInfo) (base.LogicalPlan, error) { +func (b *PlanBuilder) buildMemTable(_ context.Context, dbName pmodel.CIStr, tableInfo *model.TableInfo) (base.LogicalPlan, error) { // We can use the `TableInfo.Columns` directly because the memory table has // a stable schema and there is no online DDL on the memory table. schema := expression.NewSchema(make([]*expression.Column, 0, len(tableInfo.Columns))...) @@ -4857,7 +4858,7 @@ func (b *PlanBuilder) buildMemTable(_ context.Context, dbName model.CIStr, table } // checkRecursiveView checks whether this view is recursively defined. -func (b *PlanBuilder) checkRecursiveView(dbName model.CIStr, tableName model.CIStr) (func(), error) { +func (b *PlanBuilder) checkRecursiveView(dbName pmodel.CIStr, tableName pmodel.CIStr) (func(), error) { viewFullName := dbName.L + "." + tableName.L if b.buildingViewStack == nil { b.buildingViewStack = set.NewStringSet() @@ -4879,7 +4880,7 @@ func (b *PlanBuilder) checkRecursiveView(dbName model.CIStr, tableName model.CIS // qbNameMap4View and viewHints are used for the view's hint. // qbNameMap4View maps the query block name to the view table lists. // viewHints group the view hints based on the view's query block name. -func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName model.CIStr, tableInfo *model.TableInfo, qbNameMap4View map[string][]ast.HintTable, viewHints map[string][]*ast.TableOptimizerHint) (base.LogicalPlan, error) { +func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName pmodel.CIStr, tableInfo *model.TableInfo, qbNameMap4View map[string][]ast.HintTable, viewHints map[string][]*ast.TableOptimizerHint) (base.LogicalPlan, error) { viewDepth := b.ctx.GetSessionVars().StmtCtx.ViewDepth b.ctx.GetSessionVars().StmtCtx.ViewDepth++ deferFunc, err := b.checkRecursiveView(dbName, tableInfo.Name) @@ -4982,7 +4983,7 @@ func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName model. !pm.RequestVerification(b.ctx.GetSessionVars().ActiveRoles, dbName.L, tableInfo.Name.L, "", mysql.SelectPriv) { return nil, plannererrors.ErrViewNoExplain } - if tableInfo.View.Security == model.SecurityDefiner { + if tableInfo.View.Security == pmodel.SecurityDefiner { if pm != nil { for _, v := range b.visitInfo { if !pm.RequestVerificationWithUser(v.db, v.table, v.column, v.privilege, tableInfo.View.Definer) { @@ -5005,7 +5006,7 @@ func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName model. return b.buildProjUponView(ctx, dbName, tableInfo, selectLogicalPlan) } -func (b *PlanBuilder) buildProjUponView(_ context.Context, dbName model.CIStr, tableInfo *model.TableInfo, selectLogicalPlan base.Plan) (base.LogicalPlan, error) { +func (b *PlanBuilder) buildProjUponView(_ context.Context, dbName pmodel.CIStr, tableInfo *model.TableInfo, selectLogicalPlan base.Plan) (base.LogicalPlan, error) { columnInfo := tableInfo.Cols() cols := selectLogicalPlan.Schema().Clone().Columns outputNamesOfUnderlyingSelect := selectLogicalPlan.OutputNames().Shallow() @@ -5403,7 +5404,7 @@ func CheckUpdateList(assignFlags []int, updt *Update, newTblID2Table map[int64]t tbl := newTblID2Table[content.TblID] flags := assignFlags[content.Start:content.End] var update, updatePK, updatePartitionCol bool - var partitionColumnNames []model.CIStr + var partitionColumnNames []pmodel.CIStr if pt, ok := tbl.(table.PartitionedTable); ok && pt != nil { partitionColumnNames = pt.GetPartitionColumnNames() } @@ -5761,7 +5762,7 @@ func (b *PlanBuilder) buildDelete(ctx context.Context, ds *ast.DeleteStmt) (base if !foundMatch { if tn.Schema.L == "" { - name = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB).L + "." + tn.Name.L + name = pmodel.NewCIStr(b.ctx.GetSessionVars().CurrentDB).L + "." + tn.Name.L } else { name = tn.Schema.L + "." + tn.Name.L } @@ -6605,7 +6606,7 @@ func mergeWindowSpec(spec, ref *ast.WindowSpec) error { spec.OrderBy = ref.OrderBy } spec.PartitionBy = ref.PartitionBy - spec.Ref = model.NewCIStr("") + spec.Ref = pmodel.NewCIStr("") return nil } @@ -6647,7 +6648,7 @@ func (u *updatableTableListResolver) Leave(inNode ast.Node) (ast.Node, bool) { if v.AsName.L != "" { newTableName := *s newTableName.Name = v.AsName - newTableName.Schema = model.NewCIStr("") + newTableName.Schema = pmodel.NewCIStr("") u.updatableTableList = append(u.updatableTableList, &newTableName) if tnW := u.resolveCtx.GetTableName(s); tnW != nil { u.resolveCtx.AddTableName(&resolve.TableNameW{ @@ -6727,7 +6728,7 @@ func (e *tableListExtractor) Enter(n ast.Node) (_ ast.Node, skipChildren bool) { if x.AsName.L != "" && e.asName { newTableName := *s newTableName.Name = x.AsName - newTableName.Schema = model.NewCIStr("") + newTableName.Schema = pmodel.NewCIStr("") e.tableNames = append(e.tableNames, &newTableName) if tnW := e.resolveCtx.GetTableName(s); tnW != nil { e.resolveCtx.AddTableName(&resolve.TableNameW{ @@ -6747,7 +6748,7 @@ func (e *tableListExtractor) Enter(n ast.Node) (_ ast.Node, skipChildren bool) { if x.AsName.L != "" && e.asName { newTableName := *innerList[0] newTableName.Name = x.AsName - newTableName.Schema = model.NewCIStr("") + newTableName.Schema = pmodel.NewCIStr("") innerTableName = &newTableName if tnW := e.resolveCtx.GetTableName(innerList[0]); tnW != nil { e.resolveCtx.AddTableName(&resolve.TableNameW{ @@ -6766,7 +6767,7 @@ func (e *tableListExtractor) Enter(n ast.Node) (_ ast.Node, skipChildren bool) { case *ast.ShowStmt: if x.DBName != "" { - e.tableNames = append(e.tableNames, &ast.TableName{Schema: model.NewCIStr(x.DBName)}) + e.tableNames = append(e.tableNames, &ast.TableName{Schema: pmodel.NewCIStr(x.DBName)}) } case *ast.CreateDatabaseStmt: e.tableNames = append(e.tableNames, &ast.TableName{Schema: x.Name}) @@ -6777,7 +6778,7 @@ func (e *tableListExtractor) Enter(n ast.Node) (_ ast.Node, skipChildren bool) { case *ast.FlashBackDatabaseStmt: e.tableNames = append(e.tableNames, &ast.TableName{Schema: x.DBName}) - e.tableNames = append(e.tableNames, &ast.TableName{Schema: model.NewCIStr(x.NewName)}) + e.tableNames = append(e.tableNames, &ast.TableName{Schema: pmodel.NewCIStr(x.NewName)}) case *ast.FlashBackToTimestampStmt: if x.DBName.L != "" { e.tableNames = append(e.tableNames, &ast.TableName{Schema: x.DBName}) @@ -6786,15 +6787,15 @@ func (e *tableListExtractor) Enter(n ast.Node) (_ ast.Node, skipChildren bool) { if newName := x.NewName; newName != "" { e.tableNames = append(e.tableNames, &ast.TableName{ Schema: x.Table.Schema, - Name: model.NewCIStr(newName)}) + Name: pmodel.NewCIStr(newName)}) } case *ast.GrantStmt: if x.ObjectType == ast.ObjectTypeTable || x.ObjectType == ast.ObjectTypeNone { if x.Level.Level == ast.GrantLevelDB || x.Level.Level == ast.GrantLevelTable { e.tableNames = append(e.tableNames, &ast.TableName{ - Schema: model.NewCIStr(x.Level.DBName), - Name: model.NewCIStr(x.Level.TableName), + Schema: pmodel.NewCIStr(x.Level.DBName), + Name: pmodel.NewCIStr(x.Level.TableName), }) } } @@ -6802,19 +6803,19 @@ func (e *tableListExtractor) Enter(n ast.Node) (_ ast.Node, skipChildren bool) { if x.ObjectType == ast.ObjectTypeTable || x.ObjectType == ast.ObjectTypeNone { if x.Level.Level == ast.GrantLevelDB || x.Level.Level == ast.GrantLevelTable { e.tableNames = append(e.tableNames, &ast.TableName{ - Schema: model.NewCIStr(x.Level.DBName), - Name: model.NewCIStr(x.Level.TableName), + Schema: pmodel.NewCIStr(x.Level.DBName), + Name: pmodel.NewCIStr(x.Level.TableName), }) } } case *ast.BRIEStmt: if x.Kind == ast.BRIEKindBackup || x.Kind == ast.BRIEKindRestore { for _, v := range x.Schemas { - e.tableNames = append(e.tableNames, &ast.TableName{Schema: model.NewCIStr(v)}) + e.tableNames = append(e.tableNames, &ast.TableName{Schema: pmodel.NewCIStr(v)}) } } case *ast.UseStmt: - e.tableNames = append(e.tableNames, &ast.TableName{Schema: model.NewCIStr(x.DBName)}) + e.tableNames = append(e.tableNames, &ast.TableName{Schema: pmodel.NewCIStr(x.DBName)}) case *ast.ExecuteStmt: if v, ok := x.PrepStmt.(*PlanCacheStmt); ok { e.tableNames = append(e.tableNames, innerExtract(v.PreparedAst.Stmt, v.ResolveCtx)...) @@ -7099,7 +7100,7 @@ func (b *PlanBuilder) adjustCTEPlanOutputName(p base.LogicalPlan, def *ast.Commo outPutNames := p.OutputNames() for _, name := range outPutNames { name.TblName = def.Name - name.DBName = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) + name.DBName = pmodel.NewCIStr(b.ctx.GetSessionVars().CurrentDB) } if len(def.ColNameList) > 0 { if len(def.ColNameList) != len(p.OutputNames()) { diff --git a/pkg/planner/core/logical_plans_test.go b/pkg/planner/core/logical_plans_test.go index 6c8aac29b6a3e..bb8a7a91cc866 100644 --- a/pkg/planner/core/logical_plans_test.go +++ b/pkg/planner/core/logical_plans_test.go @@ -26,10 +26,11 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -509,27 +510,27 @@ func TestTablePartition(t *testing.T) { definitions := []model.PartitionDefinition{ { ID: 41, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), LessThan: []string{"16"}, }, { ID: 42, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), LessThan: []string{"32"}, }, { ID: 43, - Name: model.NewCIStr("p3"), + Name: pmodel.NewCIStr("p3"), LessThan: []string{"64"}, }, { ID: 44, - Name: model.NewCIStr("p4"), + Name: pmodel.NewCIStr("p4"), LessThan: []string{"128"}, }, { ID: 45, - Name: model.NewCIStr("p5"), + Name: pmodel.NewCIStr("p5"), LessThan: []string{"maxvalue"}, }, } diff --git a/pkg/planner/core/logical_tikv_single_gather.go b/pkg/planner/core/logical_tikv_single_gather.go index 1fb05e9db4db7..29d8dd326c2ed 100644 --- a/pkg/planner/core/logical_tikv_single_gather.go +++ b/pkg/planner/core/logical_tikv_single_gather.go @@ -18,7 +18,7 @@ import ( "bytes" "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" "github.com/pingcap/tidb/pkg/util/plancodec" diff --git a/pkg/planner/core/memtable_infoschema_extractor.go b/pkg/planner/core/memtable_infoschema_extractor.go index 3478b065a348a..123b0e28e829c 100644 --- a/pkg/planner/core/memtable_infoschema_extractor.go +++ b/pkg/planner/core/memtable_infoschema_extractor.go @@ -26,7 +26,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -94,12 +95,12 @@ func (e *InfoSchemaBaseExtractor) GetBase() *InfoSchemaBaseExtractor { } // ListSchemas lists related schemas from predicate. -func (e *InfoSchemaBaseExtractor) ListSchemas(is infoschema.InfoSchema) []model.CIStr { +func (e *InfoSchemaBaseExtractor) ListSchemas(is infoschema.InfoSchema) []pmodel.CIStr { ec := e.extractableColumns schemas := e.getSchemaObjectNames(ec.schema) if len(schemas) == 0 { ret := is.AllSchemaNames() - slices.SortFunc(ret, func(a, b model.CIStr) int { + slices.SortFunc(ret, func(a, b pmodel.CIStr) int { return strings.Compare(a.L, b.L) }) return ret @@ -118,14 +119,14 @@ func (e *InfoSchemaBaseExtractor) ListSchemas(is infoschema.InfoSchema) []model. func (e *InfoSchemaBaseExtractor) ListSchemasAndTables( ctx context.Context, is infoschema.InfoSchema, -) ([]model.CIStr, []*model.TableInfo, error) { +) ([]pmodel.CIStr, []*model.TableInfo, error) { ec := e.extractableColumns schemas := e.ListSchemas(is) - var tableNames []model.CIStr + var tableNames []pmodel.CIStr if ec.table != "" { tableNames = e.getSchemaObjectNames(ec.table) } - var tableIDs []model.CIStr + var tableIDs []pmodel.CIStr if ec.tableID != "" { tableIDs = e.getSchemaObjectNames(ec.tableID) if len(tableIDs) > 0 { @@ -502,8 +503,8 @@ func NewInfoSchemaSequenceExtractor() *InfoSchemaSequenceExtractor { // findTablesByID finds tables by table IDs and append them to table map. func findTablesByID( is infoschema.InfoSchema, - tableIDs []model.CIStr, - tableNames []model.CIStr, + tableIDs []pmodel.CIStr, + tableNames []pmodel.CIStr, tables map[int64]*model.TableInfo, ) { tblNameMap := make(map[string]struct{}, len(tableNames)) @@ -532,8 +533,8 @@ func findTablesByID( // findTablesByPartID finds tables by partition IDs and append them to table map. func findTablesByPartID( is infoschema.InfoSchema, - partIDs []model.CIStr, - tableNames []model.CIStr, + partIDs []pmodel.CIStr, + tableNames []pmodel.CIStr, tables map[int64]*model.TableInfo, ) { tblNameMap := make(map[string]struct{}, len(tableNames)) @@ -559,11 +560,11 @@ func findTablesByPartID( func findTableAndSchemaByName( ctx context.Context, is infoschema.InfoSchema, - schemas []model.CIStr, - tableNames []model.CIStr, -) ([]model.CIStr, []*model.TableInfo, error) { + schemas []pmodel.CIStr, + tableNames []pmodel.CIStr, +) ([]pmodel.CIStr, []*model.TableInfo, error) { type schemaAndTable struct { - schema model.CIStr + schema pmodel.CIStr table *model.TableInfo } tableMap := make(map[int64]schemaAndTable, len(tableNames)) @@ -584,7 +585,7 @@ func findTableAndSchemaByName( tableMap[tblInfo.ID] = schemaAndTable{s, tblInfo} } } - schemaSlice := make([]model.CIStr, 0, len(tableMap)) + schemaSlice := make([]pmodel.CIStr, 0, len(tableMap)) tableSlice := make([]*model.TableInfo, 0, len(tableMap)) for _, st := range tableMap { schemaSlice = append(schemaSlice, st.schema) @@ -596,9 +597,9 @@ func findTableAndSchemaByName( func listTablesForEachSchema( ctx context.Context, is infoschema.InfoSchema, - schemas []model.CIStr, -) ([]model.CIStr, []*model.TableInfo, error) { - schemaSlice := make([]model.CIStr, 0, 8) + schemas []pmodel.CIStr, +) ([]pmodel.CIStr, []*model.TableInfo, error) { + schemaSlice := make([]pmodel.CIStr, 0, 8) tableSlice := make([]*model.TableInfo, 0, 8) for _, s := range schemas { tables, err := is.SchemaTableInfos(ctx, s) @@ -616,10 +617,10 @@ func listTablesForEachSchema( func findSchemasForTables( ctx context.Context, is infoschema.InfoSchema, - schemas []model.CIStr, + schemas []pmodel.CIStr, tableSlice []*model.TableInfo, -) ([]model.CIStr, []*model.TableInfo, error) { - schemaSlice := make([]model.CIStr, 0, len(tableSlice)) +) ([]pmodel.CIStr, []*model.TableInfo, error) { + schemaSlice := make([]pmodel.CIStr, 0, len(tableSlice)) for i, tbl := range tableSlice { found := false for _, s := range schemas { @@ -650,7 +651,7 @@ func findSchemasForTables( return schemaSlice, remains, nil } -func parseIDs(ids []model.CIStr) []int64 { +func parseIDs(ids []pmodel.CIStr) []int64 { tableIDs := make([]int64, 0, len(ids)) for _, s := range ids { v, err := strconv.ParseInt(s.L, 10, 64) @@ -664,14 +665,14 @@ func parseIDs(ids []model.CIStr) []int64 { } // getSchemaObjectNames gets the schema object names specified in predicate of given column name. -func (e *InfoSchemaBaseExtractor) getSchemaObjectNames(colName string) []model.CIStr { +func (e *InfoSchemaBaseExtractor) getSchemaObjectNames(colName string) []pmodel.CIStr { predVals, ok := e.ColPredicates[colName] if ok && len(predVals) > 0 { - tableNames := make([]model.CIStr, 0, len(predVals)) + tableNames := make([]pmodel.CIStr, 0, len(predVals)) predVals.IterateWith(func(n string) { - tableNames = append(tableNames, model.NewCIStr(n)) + tableNames = append(tableNames, pmodel.NewCIStr(n)) }) - slices.SortFunc(tableNames, func(a, b model.CIStr) int { + slices.SortFunc(tableNames, func(a, b pmodel.CIStr) int { return strings.Compare(a.L, b.L) }) return tableNames @@ -688,12 +689,12 @@ type InfoSchemaTableNameExtractor struct { listTableFunc func( ctx context.Context, - s model.CIStr, + s pmodel.CIStr, is infoschema.InfoSchema, ) ([]*model.TableInfo, error) // table names from predicate, used by `ListTables` - tableNames []model.CIStr + tableNames []pmodel.CIStr // all predicates in lower case colsPredLower map[string]set.StringSet @@ -778,15 +779,15 @@ func (e *InfoSchemaTableNameExtractor) getSchemaNames() ( // Returned schemas is examined by like operators, so there is no need to call Filter again. func (e *InfoSchemaTableNameExtractor) ListSchemas( is infoschema.InfoSchema, -) []model.CIStr { +) []pmodel.CIStr { schemaFilters, schemaRegexp, hasPredicates := e.getSchemaNames() // Get all schema names - var schemas []model.CIStr + var schemas []pmodel.CIStr if hasPredicates { - schemas = make([]model.CIStr, 0, len(schemaFilters)) + schemas = make([]pmodel.CIStr, 0, len(schemaFilters)) schemaFilters.IterateWith(func(n string) { - s := model.CIStr{O: n, L: n} + s := pmodel.CIStr{O: n, L: n} if n, ok := is.SchemaByName(s); ok { schemas = append(schemas, n.Name) } @@ -794,12 +795,12 @@ func (e *InfoSchemaTableNameExtractor) ListSchemas( } else { schemas = is.AllSchemaNames() } - slices.SortFunc(schemas, func(a, b model.CIStr) int { + slices.SortFunc(schemas, func(a, b pmodel.CIStr) int { return strings.Compare(a.L, b.L) }) // Filter with regexp - filteredSchemas := make([]model.CIStr, 0, len(schemas)) + filteredSchemas := make([]pmodel.CIStr, 0, len(schemas)) ForLoop: for _, schema := range schemas { for _, re := range schemaRegexp { @@ -826,7 +827,7 @@ ForLoop: // If no table found in predicate, it return all tables. func (e *InfoSchemaTableNameExtractor) ListTables( ctx context.Context, - s model.CIStr, + s pmodel.CIStr, is infoschema.InfoSchema, ) ([]*model.TableInfo, error) { allTbls, err := e.listTableFunc(ctx, s, is) @@ -853,7 +854,7 @@ func (e *InfoSchemaTableNameExtractor) ListTables( func (e *InfoSchemaTableNameExtractor) listSchemaTablesByName( ctx context.Context, - s model.CIStr, + s pmodel.CIStr, is infoschema.InfoSchema, ) ([]*model.TableInfo, error) { tbls := make([]*model.TableInfo, 0, len(e.tableNames)) @@ -873,7 +874,7 @@ func (e *InfoSchemaTableNameExtractor) listSchemaTablesByName( func listSchemaTables( ctx context.Context, - s model.CIStr, + s pmodel.CIStr, is infoschema.InfoSchema, ) ([]*model.TableInfo, error) { return is.SchemaTableInfos(ctx, s) diff --git a/pkg/planner/core/mock.go b/pkg/planner/core/mock.go index 8cc2b8d33853a..b83328743fa03 100644 --- a/pkg/planner/core/mock.go +++ b/pkg/planner/core/mock.go @@ -19,8 +19,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/types" @@ -52,20 +53,20 @@ func MockSignedTable() *model.TableInfo { indices := []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("c_d_e"), + Name: pmodel.NewCIStr("c_d_e"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Length: types.UnspecifiedLength, Offset: 2, }, { - Name: model.NewCIStr("d"), + Name: pmodel.NewCIStr("d"), Length: types.UnspecifiedLength, Offset: 3, }, { - Name: model.NewCIStr("e"), + Name: pmodel.NewCIStr("e"), Length: types.UnspecifiedLength, Offset: 4, }, @@ -75,10 +76,10 @@ func MockSignedTable() *model.TableInfo { }, { ID: 2, - Name: model.NewCIStr("x"), + Name: pmodel.NewCIStr("x"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("e"), + Name: pmodel.NewCIStr("e"), Length: types.UnspecifiedLength, Offset: 4, }, @@ -88,10 +89,10 @@ func MockSignedTable() *model.TableInfo { }, { ID: 3, - Name: model.NewCIStr("f"), + Name: pmodel.NewCIStr("f"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("f"), + Name: pmodel.NewCIStr("f"), Length: types.UnspecifiedLength, Offset: 8, }, @@ -101,10 +102,10 @@ func MockSignedTable() *model.TableInfo { }, { ID: 4, - Name: model.NewCIStr("g"), + Name: pmodel.NewCIStr("g"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("g"), + Name: pmodel.NewCIStr("g"), Length: types.UnspecifiedLength, Offset: 9, }, @@ -113,15 +114,15 @@ func MockSignedTable() *model.TableInfo { }, { ID: 5, - Name: model.NewCIStr("f_g"), + Name: pmodel.NewCIStr("f_g"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("f"), + Name: pmodel.NewCIStr("f"), Length: types.UnspecifiedLength, Offset: 8, }, { - Name: model.NewCIStr("g"), + Name: pmodel.NewCIStr("g"), Length: types.UnspecifiedLength, Offset: 9, }, @@ -131,20 +132,20 @@ func MockSignedTable() *model.TableInfo { }, { ID: 6, - Name: model.NewCIStr("c_d_e_str"), + Name: pmodel.NewCIStr("c_d_e_str"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("c_str"), + Name: pmodel.NewCIStr("c_str"), Length: types.UnspecifiedLength, Offset: 5, }, { - Name: model.NewCIStr("d_str"), + Name: pmodel.NewCIStr("d_str"), Length: types.UnspecifiedLength, Offset: 6, }, { - Name: model.NewCIStr("e_str"), + Name: pmodel.NewCIStr("e_str"), Length: types.UnspecifiedLength, Offset: 7, }, @@ -153,20 +154,20 @@ func MockSignedTable() *model.TableInfo { }, { ID: 7, - Name: model.NewCIStr("e_d_c_str_prefix"), + Name: pmodel.NewCIStr("e_d_c_str_prefix"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("e_str"), + Name: pmodel.NewCIStr("e_str"), Length: types.UnspecifiedLength, Offset: 7, }, { - Name: model.NewCIStr("d_str"), + Name: pmodel.NewCIStr("d_str"), Length: types.UnspecifiedLength, Offset: 6, }, { - Name: model.NewCIStr("c_str"), + Name: pmodel.NewCIStr("c_str"), Length: 10, Offset: 5, }, @@ -177,84 +178,84 @@ func MockSignedTable() *model.TableInfo { pkColumn := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), FieldType: newLongType(), ID: 3, } col2 := &model.ColumnInfo{ State: model.StatePublic, Offset: 3, - Name: model.NewCIStr("d"), + Name: pmodel.NewCIStr("d"), FieldType: newLongType(), ID: 4, } col3 := &model.ColumnInfo{ State: model.StatePublic, Offset: 4, - Name: model.NewCIStr("e"), + Name: pmodel.NewCIStr("e"), FieldType: newLongType(), ID: 5, } colStr1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 5, - Name: model.NewCIStr("c_str"), + Name: pmodel.NewCIStr("c_str"), FieldType: newStringType(), ID: 6, } colStr2 := &model.ColumnInfo{ State: model.StatePublic, Offset: 6, - Name: model.NewCIStr("d_str"), + Name: pmodel.NewCIStr("d_str"), FieldType: newStringType(), ID: 7, } colStr3 := &model.ColumnInfo{ State: model.StatePublic, Offset: 7, - Name: model.NewCIStr("e_str"), + Name: pmodel.NewCIStr("e_str"), FieldType: newStringType(), ID: 8, } col4 := &model.ColumnInfo{ State: model.StatePublic, Offset: 8, - Name: model.NewCIStr("f"), + Name: pmodel.NewCIStr("f"), FieldType: newLongType(), ID: 9, } col5 := &model.ColumnInfo{ State: model.StatePublic, Offset: 9, - Name: model.NewCIStr("g"), + Name: pmodel.NewCIStr("g"), FieldType: newLongType(), ID: 10, } col6 := &model.ColumnInfo{ State: model.StatePublic, Offset: 10, - Name: model.NewCIStr("h"), + Name: pmodel.NewCIStr("h"), FieldType: newLongType(), ID: 11, } col7 := &model.ColumnInfo{ State: model.StatePublic, Offset: 11, - Name: model.NewCIStr("i_date"), + Name: pmodel.NewCIStr("i_date"), FieldType: newDateType(), ID: 12, } @@ -270,7 +271,7 @@ func MockSignedTable() *model.TableInfo { ID: 1, Columns: []*model.ColumnInfo{pkColumn, col0, col1, col2, col3, colStr1, colStr2, colStr3, col4, col5, col6, col7}, Indices: indices, - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), PKIsHandle: true, } return table @@ -283,10 +284,10 @@ func MockUnsignedTable() *model.TableInfo { // indeices: b, b_c indices := []*model.IndexInfo{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Length: types.UnspecifiedLength, Offset: 1, }, @@ -295,15 +296,15 @@ func MockUnsignedTable() *model.TableInfo { Unique: true, }, { - Name: model.NewCIStr("b_c"), + Name: pmodel.NewCIStr("b_c"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Length: types.UnspecifiedLength, Offset: 1, }, { - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Length: types.UnspecifiedLength, Offset: 2, }, @@ -314,21 +315,21 @@ func MockUnsignedTable() *model.TableInfo { pkColumn := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), FieldType: newLongType(), ID: 3, } @@ -340,7 +341,7 @@ func MockUnsignedTable() *model.TableInfo { ID: 2, Columns: []*model.ColumnInfo{pkColumn, col0, col1}, Indices: indices, - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), PKIsHandle: true, } return table @@ -352,14 +353,14 @@ func MockNoPKTable() *model.TableInfo { col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), FieldType: newLongType(), ID: 3, } @@ -369,7 +370,7 @@ func MockNoPKTable() *model.TableInfo { table := &model.TableInfo{ ID: 3, Columns: []*model.ColumnInfo{col0, col1}, - Name: model.NewCIStr("t3"), + Name: pmodel.NewCIStr("t3"), PKIsHandle: true, } return table @@ -381,25 +382,25 @@ func MockView() *model.TableInfo { col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), ID: 1, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), ID: 2, } col2 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, - Name: model.NewCIStr("d"), + Name: pmodel.NewCIStr("d"), ID: 3, } - view := &model.ViewInfo{SelectStmt: selectStmt, Security: model.SecurityDefiner, Definer: &auth.UserIdentity{Username: "root", Hostname: ""}, Cols: []model.CIStr{col0.Name, col1.Name, col2.Name}} + view := &model.ViewInfo{SelectStmt: selectStmt, Security: pmodel.SecurityDefiner, Definer: &auth.UserIdentity{Username: "root", Hostname: ""}, Cols: []pmodel.CIStr{col0.Name, col1.Name, col2.Name}} table := &model.TableInfo{ ID: 4, - Name: model.NewCIStr("v"), + Name: pmodel.NewCIStr("v"), Columns: []*model.ColumnInfo{col0, col1, col2}, View: view, } @@ -435,12 +436,12 @@ func MockPartitionInfoSchema(definitions []model.PartitionDefinition) infoschema cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, - Name: model.NewCIStr("ptn"), + Name: pmodel.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ - Type: model.PartitionTypeRange, + Type: pmodel.PartitionTypeRange, Expr: "ptn", Enable: true, Definitions: definitions, @@ -456,30 +457,30 @@ func MockRangePartitionTable() *model.TableInfo { definitions := []model.PartitionDefinition{ { ID: 41, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), LessThan: []string{"16"}, }, { ID: 42, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), LessThan: []string{"32"}, }, } tableInfo := MockSignedTable() tableInfo.ID = 5 - tableInfo.Name = model.NewCIStr("pt1") + tableInfo.Name = pmodel.NewCIStr("pt1") cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, - Name: model.NewCIStr("ptn"), + Name: pmodel.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ - Type: model.PartitionTypeRange, + Type: pmodel.PartitionTypeRange, Expr: "ptn", Enable: true, Definitions: definitions, @@ -494,28 +495,28 @@ func MockHashPartitionTable() *model.TableInfo { definitions := []model.PartitionDefinition{ { ID: 51, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, { ID: 52, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), }, } tableInfo := MockSignedTable() tableInfo.ID = 6 - tableInfo.Name = model.NewCIStr("pt2") + tableInfo.Name = pmodel.NewCIStr("pt2") cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, - Name: model.NewCIStr("ptn"), + Name: pmodel.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ - Type: model.PartitionTypeHash, + Type: pmodel.PartitionTypeHash, Expr: "ptn", Enable: true, Definitions: definitions, @@ -531,7 +532,7 @@ func MockListPartitionTable() *model.TableInfo { definitions := []model.PartitionDefinition{ { ID: 61, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), InValues: [][]string{ { "1", @@ -540,7 +541,7 @@ func MockListPartitionTable() *model.TableInfo { }, { ID: 62, - Name: model.NewCIStr("p2"), + Name: pmodel.NewCIStr("p2"), InValues: [][]string{ { "2", @@ -550,19 +551,19 @@ func MockListPartitionTable() *model.TableInfo { } tableInfo := MockSignedTable() tableInfo.ID = 7 - tableInfo.Name = model.NewCIStr("pt3") + tableInfo.Name = pmodel.NewCIStr("pt3") cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, - Name: model.NewCIStr("ptn"), + Name: pmodel.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ - Type: model.PartitionTypeList, + Type: pmodel.PartitionTypeList, Expr: "ptn", Enable: true, Definitions: definitions, @@ -580,10 +581,10 @@ func MockStateNoneColumnTable() *model.TableInfo { // indeices: b indices := []*model.IndexInfo{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Columns: []*model.IndexColumn{ { - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), Length: types.UnspecifiedLength, Offset: 1, }, @@ -595,21 +596,21 @@ func MockStateNoneColumnTable() *model.TableInfo { pkColumn := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, - Name: model.NewCIStr("b"), + Name: pmodel.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StateNone, Offset: 2, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), FieldType: newLongType(), ID: 3, } @@ -620,7 +621,7 @@ func MockStateNoneColumnTable() *model.TableInfo { ID: 8, Columns: []*model.ColumnInfo{pkColumn, col0, col1}, Indices: indices, - Name: model.NewCIStr("T_StateNoneColumn"), + Name: pmodel.NewCIStr("T_StateNoneColumn"), PKIsHandle: true, } return table diff --git a/pkg/planner/core/operator/logicalop/BUILD.bazel b/pkg/planner/core/operator/logicalop/BUILD.bazel index 29089a6e6e7d9..684805239568e 100644 --- a/pkg/planner/core/operator/logicalop/BUILD.bazel +++ b/pkg/planner/core/operator/logicalop/BUILD.bazel @@ -35,6 +35,7 @@ go_library( "//pkg/expression/aggregation", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/auth", "//pkg/parser/model", diff --git a/pkg/planner/core/operator/logicalop/logical_mem_table.go b/pkg/planner/core/operator/logicalop/logical_mem_table.go index 3874e19030a33..0648b114a0b4c 100644 --- a/pkg/planner/core/operator/logicalop/logical_mem_table.go +++ b/pkg/planner/core/operator/logicalop/logical_mem_table.go @@ -18,7 +18,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" @@ -42,7 +43,7 @@ type LogicalMemTable struct { LogicalSchemaProducer Extractor base.MemTablePredicateExtractor - DBName model.CIStr + DBName pmodel.CIStr TableInfo *model.TableInfo Columns []*model.ColumnInfo // QueryTimeRange is used to specify the time range for metrics summary tables and inspection tables diff --git a/pkg/planner/core/operator/logicalop/logical_union_scan.go b/pkg/planner/core/operator/logicalop/logical_union_scan.go index 3fbd6deeb950a..3eb493e7e6867 100644 --- a/pkg/planner/core/operator/logicalop/logical_union_scan.go +++ b/pkg/planner/core/operator/logicalop/logical_union_scan.go @@ -19,7 +19,7 @@ import ( "fmt" "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index 7fbab273e52a3..ff914a94a6a3e 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -33,9 +33,10 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lock" tablelock "github.com/pingcap/tidb/pkg/lock/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" @@ -209,7 +210,7 @@ func VisitInfo4PrivCheck(ctx context.Context, is infoschema.InfoSchema, node ast func needCheckTmpTablePriv(ctx context.Context, is infoschema.InfoSchema, v visitInfo) bool { if v.db != "" && v.table != "" { // Other statements on local temporary tables except `CREATE` do not check any privileges. - tb, err := is.TableByName(ctx, model.NewCIStr(v.db), model.NewCIStr(v.table)) + tb, err := is.TableByName(ctx, pmodel.NewCIStr(v.db), pmodel.NewCIStr(v.table)) // If the table doesn't exist, we do not report errors to avoid leaking the existence of the table. if err == nil && tb.Meta().TempTableType == model.TempTableLocal { return false diff --git a/pkg/planner/core/pb_to_plan.go b/pkg/planner/core/pb_to_plan.go index 94002d9228618..7d34541676fc7 100644 --- a/pkg/planner/core/pb_to_plan.go +++ b/pkg/planner/core/pb_to_plan.go @@ -23,8 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/planner/property" diff --git a/pkg/planner/core/physical_plan_test.go b/pkg/planner/core/physical_plan_test.go index c28a465567e87..ac6fa59be8025 100644 --- a/pkg/planner/core/physical_plan_test.go +++ b/pkg/planner/core/physical_plan_test.go @@ -25,9 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/planner/core/physical_plans.go b/pkg/planner/core/physical_plans.go index cbf7d5bb8dc88..ab26132ad6676 100644 --- a/pkg/planner/core/physical_plans.go +++ b/pkg/planner/core/physical_plans.go @@ -24,8 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -157,7 +158,7 @@ type PhysicalTableReader struct { // PhysPlanPartInfo indicates partition helper info in physical plan. type PhysPlanPartInfo struct { PruningConds []expression.Expression - PartitionNames []model.CIStr + PartitionNames []pmodel.CIStr Columns []*expression.Column ColumnNames types.NameSlice } @@ -724,9 +725,9 @@ type PhysicalIndexScan struct { IdxColLens []int Ranges []*ranger.Range Columns []*model.ColumnInfo `plan-cache-clone:"shallow"` - DBName model.CIStr `plan-cache-clone:"shallow"` + DBName pmodel.CIStr `plan-cache-clone:"shallow"` - TableAsName *model.CIStr `plan-cache-clone:"shallow"` + TableAsName *pmodel.CIStr `plan-cache-clone:"shallow"` // dataSourceSchema is the original schema of DataSource. The schema of index scan in KV and index reader in TiDB // will be different. The schema of index scan will decode all columns of index but the TiDB only need some of them. @@ -865,7 +866,7 @@ func AddExtraPhysTblIDColumn(sctx base.PlanContext, columns []*model.ColumnInfo, type PhysicalMemTable struct { physicalSchemaProducer - DBName model.CIStr + DBName pmodel.CIStr Table *model.TableInfo Columns []*model.ColumnInfo Extractor base.MemTablePredicateExtractor @@ -897,10 +898,10 @@ type PhysicalTableScan struct { Table *model.TableInfo `plan-cache-clone:"shallow"` Columns []*model.ColumnInfo `plan-cache-clone:"shallow"` - DBName model.CIStr `plan-cache-clone:"shallow"` + DBName pmodel.CIStr `plan-cache-clone:"shallow"` Ranges []*ranger.Range - TableAsName *model.CIStr `plan-cache-clone:"shallow"` + TableAsName *pmodel.CIStr `plan-cache-clone:"shallow"` physicalTableID int64 @@ -2680,8 +2681,8 @@ type PhysicalCTE struct { SeedPlan base.PhysicalPlan RecurPlan base.PhysicalPlan CTE *logicalop.CTEClass - cteAsName model.CIStr - cteName model.CIStr + cteAsName pmodel.CIStr + cteName pmodel.CIStr readerReceiver *PhysicalExchangeReceiver storageSender *PhysicalExchangeSender diff --git a/pkg/planner/core/plan_cache_test.go b/pkg/planner/core/plan_cache_test.go index d9aaa70cb97da..e30edcdfea9e5 100644 --- a/pkg/planner/core/plan_cache_test.go +++ b/pkg/planner/core/plan_cache_test.go @@ -26,9 +26,9 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/resolve" diff --git a/pkg/planner/core/plan_cacheable_checker.go b/pkg/planner/core/plan_cacheable_checker.go index ee37d86ad6fa1..112dab3200177 100644 --- a/pkg/planner/core/plan_cacheable_checker.go +++ b/pkg/planner/core/plan_cacheable_checker.go @@ -25,8 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" core_metrics "github.com/pingcap/tidb/pkg/planner/core/metrics" diff --git a/pkg/planner/core/plan_cost_ver1_test.go b/pkg/planner/core/plan_cost_ver1_test.go index 2555cbd92e511..5320c0e618091 100644 --- a/pkg/planner/core/plan_cost_ver1_test.go +++ b/pkg/planner/core/plan_cost_ver1_test.go @@ -21,7 +21,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" ) @@ -108,7 +109,7 @@ func TestScanOnSmallTable(t *testing.T) { // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) + db, exists := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, exists) tblInfos, err := is.SchemaTableInfos(context.Background(), db.Name) require.NoError(t, err) diff --git a/pkg/planner/core/plan_cost_ver2.go b/pkg/planner/core/plan_cost_ver2.go index 89a307c4eba58..241f9ac1b7423 100644 --- a/pkg/planner/core/plan_cost_ver2.go +++ b/pkg/planner/core/plan_cost_ver2.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/property" diff --git a/pkg/planner/core/plan_test.go b/pkg/planner/core/plan_test.go index 2e855457bb8d6..97856482388a2 100644 --- a/pkg/planner/core/plan_test.go +++ b/pkg/planner/core/plan_test.go @@ -26,8 +26,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -370,7 +371,7 @@ func TestExplainFormatHintRecoverableForTiFlashReplica(t *testing.T) { tk.MustExec("create table t(a int)") // Create virtual `tiflash` replica info. is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/plan_to_pb.go b/pkg/planner/core/plan_to_pb.go index 03ef91daac7f3..24401d9af3a7d 100644 --- a/pkg/planner/core/plan_to_pb.go +++ b/pkg/planner/core/plan_to_pb.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" util2 "github.com/pingcap/tidb/pkg/planner/util" diff --git a/pkg/planner/core/plan_to_pb_test.go b/pkg/planner/core/plan_to_pb_test.go index 9f6219f73ab1b..dc055266c2231 100644 --- a/pkg/planner/core/plan_to_pb_test.go +++ b/pkg/planner/core/plan_to_pb_test.go @@ -17,7 +17,7 @@ package core import ( "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/planner/core/planbuilder.go b/pkg/planner/core/planbuilder.go index 35ac91a5e6768..e79aaeac815d1 100644 --- a/pkg/planner/core/planbuilder.go +++ b/pkg/planner/core/planbuilder.go @@ -32,11 +32,12 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/parser/terror" @@ -1070,7 +1071,7 @@ func (*PlanBuilder) detectSelectWindow(sel *ast.SelectStmt) bool { return false } -func getPathByIndexName(paths []*util.AccessPath, idxName model.CIStr, tblInfo *model.TableInfo) *util.AccessPath { +func getPathByIndexName(paths []*util.AccessPath, idxName pmodel.CIStr, tblInfo *model.TableInfo) *util.AccessPath { var primaryIdxPath, indexPrefixPath *util.AccessPath prefixMatches := 0 for _, path := range paths { @@ -1100,7 +1101,7 @@ func getPathByIndexName(paths []*util.AccessPath, idxName model.CIStr, tblInfo * return nil } -func isPrimaryIndex(indexName model.CIStr) bool { +func isPrimaryIndex(indexName pmodel.CIStr) bool { return indexName.L == "primary" } @@ -1157,7 +1158,7 @@ func getLatestIndexInfo(ctx base.PlanContext, id int64, startVer int64) (map[int return latestIndexes, true, nil } -func getPossibleAccessPaths(ctx base.PlanContext, tableHints *hint.PlanHints, indexHints []*ast.IndexHint, tbl table.Table, dbName, tblName model.CIStr, check bool, hasFlagPartitionProcessor bool) ([]*util.AccessPath, error) { +func getPossibleAccessPaths(ctx base.PlanContext, tableHints *hint.PlanHints, indexHints []*ast.IndexHint, tbl table.Table, dbName, tblName pmodel.CIStr, check bool, hasFlagPartitionProcessor bool) ([]*util.AccessPath, error) { tblInfo := tbl.Meta() publicPaths := make([]*util.AccessPath, 0, len(tblInfo.Indices)+2) tp := kv.TiKV @@ -1349,7 +1350,7 @@ func getPossibleAccessPaths(ctx base.PlanContext, tableHints *hint.PlanHints, in return available, nil } -func filterPathByIsolationRead(ctx base.PlanContext, paths []*util.AccessPath, tblName model.CIStr, dbName model.CIStr) ([]*util.AccessPath, error) { +func filterPathByIsolationRead(ctx base.PlanContext, paths []*util.AccessPath, tblName pmodel.CIStr, dbName pmodel.CIStr) ([]*util.AccessPath, error) { // TODO: filter paths with isolation read locations. if util2.IsSysDB(dbName.L) { return paths, nil @@ -1599,7 +1600,7 @@ func (b *PlanBuilder) buildAdmin(ctx context.Context, as *ast.AdminStmt) (base.P return ret, nil } -func (b *PlanBuilder) buildPhysicalIndexLookUpReader(_ context.Context, dbName model.CIStr, tbl table.Table, idx *model.IndexInfo) (base.Plan, error) { +func (b *PlanBuilder) buildPhysicalIndexLookUpReader(_ context.Context, dbName pmodel.CIStr, tbl table.Table, idx *model.IndexInfo) (base.Plan, error) { tblInfo := tbl.Meta() physicalID, isPartition := getPhysicalID(tbl, idx.Global) fullExprCols, _, err := expression.TableInfo2SchemaAndNames(b.ctx.GetExprCtx(), dbName, tblInfo) @@ -1759,7 +1760,7 @@ func tryGetPkHandleCol(tblInfo *model.TableInfo, allColSchema *expression.Schema return nil, nil, false } -func (b *PlanBuilder) buildPhysicalIndexLookUpReaders(ctx context.Context, dbName model.CIStr, tbl table.Table, indices []table.Index) ([]base.Plan, []*model.IndexInfo, error) { +func (b *PlanBuilder) buildPhysicalIndexLookUpReaders(ctx context.Context, dbName pmodel.CIStr, tbl table.Table, indices []table.Index) ([]base.Plan, []*model.IndexInfo, error) { tblInfo := tbl.Meta() // get index information indexInfos := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) @@ -1901,7 +1902,7 @@ func (b *PlanBuilder) buildCheckIndexSchema(tn *ast.TableName, indexName string) ID: col.ID}) } names = append(names, &types.FieldName{ - ColName: model.NewCIStr("extra_handle"), + ColName: pmodel.NewCIStr("extra_handle"), TblName: tn.Name, DBName: tn.Schema, }) @@ -1990,7 +1991,7 @@ func BuildHandleColsForAnalyze(ctx base.PlanContext, tblInfo *model.TableInfo, a } // GetPhysicalIDsAndPartitionNames returns physical IDs and names of these partitions. -func GetPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []model.CIStr) ([]int64, []string, error) { +func GetPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []pmodel.CIStr) ([]int64, []string, error) { pi := tblInfo.GetPartitionInfo() if pi == nil { if len(partitionNames) != 0 { @@ -2126,7 +2127,7 @@ func (b *PlanBuilder) getPredicateColumns(tbl *resolve.TableNameW, cols *calcOnc return cols.data, nil } -func getAnalyzeColumnList(specifiedColumns []model.CIStr, tbl *resolve.TableNameW) ([]*model.ColumnInfo, error) { +func getAnalyzeColumnList(specifiedColumns []pmodel.CIStr, tbl *resolve.TableNameW) ([]*model.ColumnInfo, error) { colList := make([]*model.ColumnInfo, 0, len(specifiedColumns)) for _, colName := range specifiedColumns { colInfo := model.FindColumnInfo(tbl.TableInfo.Columns, colName.L) @@ -2143,23 +2144,23 @@ func getAnalyzeColumnList(specifiedColumns []model.CIStr, tbl *resolve.TableName // be record in mysql.analyze_options(only for the case of analyze table t columns c1, .., cn). func (b *PlanBuilder) getFullAnalyzeColumnsInfo( tbl *resolve.TableNameW, - columnChoice model.ColumnChoice, + columnChoice pmodel.ColumnChoice, specifiedCols []*model.ColumnInfo, predicateCols, mustAnalyzedCols *calcOnceMap, mustAllColumns bool, warning bool, ) ([]*model.ColumnInfo, []*model.ColumnInfo, error) { - if mustAllColumns && warning && (columnChoice == model.PredicateColumns || columnChoice == model.ColumnList) { + if mustAllColumns && warning && (columnChoice == pmodel.PredicateColumns || columnChoice == pmodel.ColumnList) { b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("Table %s.%s has version 1 statistics so all the columns must be analyzed to overwrite the current statistics", tbl.Schema.L, tbl.Name.L)) } switch columnChoice { - case model.DefaultChoice: + case pmodel.DefaultChoice: columnOptions := variable.AnalyzeColumnOptions.Load() switch columnOptions { - case model.AllColumns.String(): + case pmodel.AllColumns.String(): return tbl.TableInfo.Columns, nil, nil - case model.PredicateColumns.String(): + case pmodel.PredicateColumns.String(): columns, err := b.getColumnsBasedOnPredicateColumns( tbl, predicateCols, @@ -2175,9 +2176,9 @@ func (b *PlanBuilder) getFullAnalyzeColumnsInfo( logutil.BgLogger().Warn("Unknown default column choice, analyze all columns", zap.String("choice", columnOptions)) return tbl.TableInfo.Columns, nil, nil } - case model.AllColumns: + case pmodel.AllColumns: return tbl.TableInfo.Columns, nil, nil - case model.PredicateColumns: + case pmodel.PredicateColumns: columns, err := b.getColumnsBasedOnPredicateColumns( tbl, predicateCols, @@ -2188,7 +2189,7 @@ func (b *PlanBuilder) getFullAnalyzeColumnsInfo( return nil, nil, err } return columns, nil, nil - case model.ColumnList: + case pmodel.ColumnList: colSet := getColumnSetFromSpecifiedCols(specifiedCols) mustAnalyzed, err := b.getMustAnalyzedColumns(tbl, mustAnalyzedCols) if err != nil { @@ -2469,7 +2470,7 @@ func (b *PlanBuilder) genV2AnalyzeOptions( isAnalyzeTable bool, physicalIDs []int64, astOpts map[ast.AnalyzeOptionType]uint64, - astColChoice model.ColumnChoice, + astColChoice pmodel.ColumnChoice, astColList []*model.ColumnInfo, predicateCols, mustAnalyzedCols *calcOnceMap, mustAllColumns bool, @@ -2485,9 +2486,9 @@ func (b *PlanBuilder) genV2AnalyzeOptions( // Because the plan is generated for each partition individually, each partition uses its own statistics; // In dynamic mode, there is no partitioning, and a global plan is generated for the whole table, so a global statistic is needed; dynamicPrune := variable.PartitionPruneMode(b.ctx.GetSessionVars().PartitionPruneMode.Load()) == variable.Dynamic - if !isAnalyzeTable && dynamicPrune && (len(astOpts) > 0 || astColChoice != model.DefaultChoice) { + if !isAnalyzeTable && dynamicPrune && (len(astOpts) > 0 || astColChoice != pmodel.DefaultChoice) { astOpts = make(map[ast.AnalyzeOptionType]uint64, 0) - astColChoice = model.DefaultChoice + astColChoice = pmodel.DefaultChoice astColList = make([]*model.ColumnInfo, 0) b.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackError("Ignore columns and options when analyze partition in dynamic mode")) } @@ -2572,16 +2573,16 @@ func (b *PlanBuilder) genV2AnalyzeOptions( } // getSavedAnalyzeOpts gets the analyze options which are saved in mysql.analyze_options. -func (b *PlanBuilder) getSavedAnalyzeOpts(physicalID int64, tblInfo *model.TableInfo) (map[ast.AnalyzeOptionType]uint64, model.ColumnChoice, []*model.ColumnInfo, error) { +func (b *PlanBuilder) getSavedAnalyzeOpts(physicalID int64, tblInfo *model.TableInfo) (map[ast.AnalyzeOptionType]uint64, pmodel.ColumnChoice, []*model.ColumnInfo, error) { analyzeOptions := map[ast.AnalyzeOptionType]uint64{} exec := b.ctx.GetRestrictedSQLExecutor() ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select sample_num,sample_rate,buckets,topn,column_choice,column_ids from mysql.analyze_options where table_id = %?", physicalID) if err != nil { - return nil, model.DefaultChoice, nil, err + return nil, pmodel.DefaultChoice, nil, err } if len(rows) <= 0 { - return analyzeOptions, model.DefaultChoice, nil, nil + return analyzeOptions, pmodel.DefaultChoice, nil, nil } row := rows[0] @@ -2604,7 +2605,7 @@ func (b *PlanBuilder) getSavedAnalyzeOpts(physicalID int64, tblInfo *model.Table colType := row.GetEnum(4) switch colType.Name { case "ALL": - return analyzeOptions, model.AllColumns, tblInfo.Columns, nil + return analyzeOptions, pmodel.AllColumns, tblInfo.Columns, nil case "LIST": colIDStrs := strings.Split(row.GetString(5), ",") colList := make([]*model.ColumnInfo, 0, len(colIDStrs)) @@ -2615,11 +2616,11 @@ func (b *PlanBuilder) getSavedAnalyzeOpts(physicalID int64, tblInfo *model.Table colList = append(colList, colInfo) } } - return analyzeOptions, model.ColumnList, colList, nil + return analyzeOptions, pmodel.ColumnList, colList, nil case "PREDICATE": - return analyzeOptions, model.PredicateColumns, nil, nil + return analyzeOptions, pmodel.PredicateColumns, nil, nil default: - return analyzeOptions, model.DefaultChoice, nil, nil + return analyzeOptions, pmodel.DefaultChoice, nil, nil } } @@ -2637,8 +2638,8 @@ func mergeAnalyzeOptions(stmtOpts map[ast.AnalyzeOptionType]uint64, savedOpts ma // pickColumnList picks the column list to be analyzed. // If the column list is specified in the statement, we will use it. -func pickColumnList(astColChoice model.ColumnChoice, astColList []*model.ColumnInfo, tblSavedColChoice model.ColumnChoice, tblSavedColList []*model.ColumnInfo) (model.ColumnChoice, []*model.ColumnInfo) { - if astColChoice != model.DefaultChoice { +func pickColumnList(astColChoice pmodel.ColumnChoice, astColList []*model.ColumnInfo, tblSavedColChoice pmodel.ColumnChoice, tblSavedColList []*model.ColumnInfo) (pmodel.ColumnChoice, []*model.ColumnInfo) { + if astColChoice != pmodel.DefaultChoice { return astColChoice, astColList } return tblSavedColChoice, tblSavedColList @@ -2675,10 +2676,10 @@ func (b *PlanBuilder) buildAnalyzeTable(as *ast.AnalyzeTableStmt, opts map[ast.A } // Version 1 analyze. - if as.ColumnChoice == model.PredicateColumns { + if as.ColumnChoice == pmodel.PredicateColumns { return nil, errors.Errorf("Only the version 2 of analyze supports analyzing predicate columns") } - if as.ColumnChoice == model.ColumnList { + if as.ColumnChoice == pmodel.ColumnList { return nil, errors.Errorf("Only the version 2 of analyze supports analyzing the specified columns") } for _, idx := range idxInfo { @@ -3247,7 +3248,7 @@ func buildColumnWithName(tableName, name string, tp byte, size int) (*expression fieldType.SetFlag(flag) return &expression.Column{ RetType: fieldType, - }, &types.FieldName{DBName: util2.InformationSchemaName, TblName: model.NewCIStr(tableName), ColName: model.NewCIStr(name)} + }, &types.FieldName{DBName: util2.InformationSchemaName, TblName: pmodel.NewCIStr(tableName), ColName: pmodel.NewCIStr(name)} } type columnsWithNames struct { @@ -4304,7 +4305,7 @@ func (b *PlanBuilder) buildLoadData(ctx context.Context, ld *ast.LoadDataStmt) ( db := b.ctx.GetSessionVars().CurrentDB return nil, infoschema.ErrTableNotExists.FastGenByArgs(db, tableInfo.Name.O) } - schema, names, err := expression.TableInfo2SchemaAndNames(b.ctx.GetExprCtx(), model.NewCIStr(""), tableInfo) + schema, names, err := expression.TableInfo2SchemaAndNames(b.ctx.GetExprCtx(), pmodel.NewCIStr(""), tableInfo) if err != nil { return nil, err } @@ -4435,7 +4436,7 @@ func (b *PlanBuilder) buildImportInto(ctx context.Context, ld *ast.ImportIntoStm db := b.ctx.GetSessionVars().CurrentDB return nil, infoschema.ErrTableNotExists.FastGenByArgs(db, tableInfo.Name.O) } - schema, names, err := expression.TableInfo2SchemaAndNames(b.ctx.GetExprCtx(), model.NewCIStr(""), tableInfo) + schema, names, err := expression.TableInfo2SchemaAndNames(b.ctx.GetExprCtx(), pmodel.NewCIStr(""), tableInfo) if err != nil { return nil, err } @@ -4767,7 +4768,7 @@ func (b *PlanBuilder) buildDDL(ctx context.Context, node ast.DDLNode) (base.Plan switch v := node.(type) { case *ast.AlterDatabaseStmt: if v.AlterDefaultDatabase { - v.Name = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) + v.Name = pmodel.NewCIStr(b.ctx.GetSessionVars().CurrentDB) } if v.Name.O == "" { return nil, plannererrors.ErrNoDB @@ -4931,7 +4932,7 @@ func (b *PlanBuilder) buildDDL(ctx context.Context, node ast.DDLNode) (base.Plan names := plan.OutputNames() if v.Cols == nil { adjustOverlongViewColname(plan.(base.LogicalPlan)) - v.Cols = make([]model.CIStr, len(schema.Columns)) + v.Cols = make([]pmodel.CIStr, len(schema.Columns)) for i, name := range names { v.Cols[i] = name.ColName } @@ -5516,7 +5517,7 @@ func convert2OutputSchemasAndNames(names []string, ftypes []byte, flags []uint) outputNames = make([]*types.FieldName, 0, len(names)) for i := range names { col := &expression.Column{} - outputNames = append(outputNames, &types.FieldName{ColName: model.NewCIStr(names[i])}) + outputNames = append(outputNames, &types.FieldName{ColName: pmodel.NewCIStr(names[i])}) // User varchar as the default return column type. tp := mysql.TypeVarchar if len(ftypes) != 0 && ftypes[i] != mysql.TypeUnspecified { @@ -5609,7 +5610,7 @@ func adjustOverlongViewColname(plan base.LogicalPlan) { outputNames := plan.OutputNames() for i := range outputNames { if outputName := outputNames[i].ColName.L; len(outputName) > mysql.MaxColumnNameLength { - outputNames[i].ColName = model.NewCIStr(fmt.Sprintf("name_exp_%d", i+1)) + outputNames[i].ColName = pmodel.NewCIStr(fmt.Sprintf("name_exp_%d", i+1)) } } } diff --git a/pkg/planner/core/planbuilder_test.go b/pkg/planner/core/planbuilder_test.go index 4019d02d1744c..761382b662d2a 100644 --- a/pkg/planner/core/planbuilder_test.go +++ b/pkg/planner/core/planbuilder_test.go @@ -27,9 +27,10 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/resolve" @@ -92,24 +93,24 @@ func TestGetPathByIndexName(t *testing.T) { accessPath := []*util.AccessPath{ {IsIntHandlePath: true}, - {Index: &model.IndexInfo{Name: model.NewCIStr("idx")}}, + {Index: &model.IndexInfo{Name: pmodel.NewCIStr("idx")}}, genTiFlashPath(tblInfo), } - path := getPathByIndexName(accessPath, model.NewCIStr("idx"), tblInfo) + path := getPathByIndexName(accessPath, pmodel.NewCIStr("idx"), tblInfo) require.NotNil(t, path) require.Equal(t, accessPath[1], path) // "id" is a prefix of "idx" - path = getPathByIndexName(accessPath, model.NewCIStr("id"), tblInfo) + path = getPathByIndexName(accessPath, pmodel.NewCIStr("id"), tblInfo) require.NotNil(t, path) require.Equal(t, accessPath[1], path) - path = getPathByIndexName(accessPath, model.NewCIStr("primary"), tblInfo) + path = getPathByIndexName(accessPath, pmodel.NewCIStr("primary"), tblInfo) require.NotNil(t, path) require.Equal(t, accessPath[0], path) - path = getPathByIndexName(accessPath, model.NewCIStr("not exists"), tblInfo) + path = getPathByIndexName(accessPath, pmodel.NewCIStr("not exists"), tblInfo) require.Nil(t, path) tblInfo = &model.TableInfo{ @@ -117,7 +118,7 @@ func TestGetPathByIndexName(t *testing.T) { PKIsHandle: false, } - path = getPathByIndexName(accessPath, model.NewCIStr("primary"), tblInfo) + path = getPathByIndexName(accessPath, pmodel.NewCIStr("primary"), tblInfo) require.Nil(t, path) } @@ -682,23 +683,23 @@ func TestGetFullAnalyzeColumnsInfo(t *testing.T) { // Create a new TableName instance. tableName := &ast.TableName{ - Schema: model.NewCIStr("test"), - Name: model.NewCIStr("my_table"), + Schema: pmodel.NewCIStr("test"), + Name: pmodel.NewCIStr("my_table"), } columns := []*model.ColumnInfo{ { ID: 1, - Name: model.NewCIStr("id"), + Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeLonglong), }, { ID: 2, - Name: model.NewCIStr("name"), + Name: pmodel.NewCIStr("name"), FieldType: *types.NewFieldType(mysql.TypeString), }, { ID: 3, - Name: model.NewCIStr("age"), + Name: pmodel.NewCIStr("age"), FieldType: *types.NewFieldType(mysql.TypeLonglong), }, } @@ -710,7 +711,7 @@ func TestGetFullAnalyzeColumnsInfo(t *testing.T) { } // Test case 1: AllColumns. - cols, _, err := pb.getFullAnalyzeColumnsInfo(tblNameW, model.AllColumns, nil, nil, nil, false, false) + cols, _, err := pb.getFullAnalyzeColumnsInfo(tblNameW, pmodel.AllColumns, nil, nil, nil, false, false) require.NoError(t, err) require.Equal(t, columns, cols) @@ -722,7 +723,7 @@ func TestGetFullAnalyzeColumnsInfo(t *testing.T) { // Test case 3: ColumnList. specifiedCols := []*model.ColumnInfo{columns[0], columns[2]} mustAnalyzedCols.data[3] = struct{}{} - cols, _, err = pb.getFullAnalyzeColumnsInfo(tblNameW, model.ColumnList, specifiedCols, nil, mustAnalyzedCols, false, false) + cols, _, err = pb.getFullAnalyzeColumnsInfo(tblNameW, pmodel.ColumnList, specifiedCols, nil, mustAnalyzedCols, false, false) require.NoError(t, err) require.Equal(t, specifiedCols, cols) } @@ -736,12 +737,12 @@ func TestRequireInsertAndSelectPriv(t *testing.T) { tables := []*ast.TableName{ { - Schema: model.NewCIStr("test"), - Name: model.NewCIStr("t1"), + Schema: pmodel.NewCIStr("test"), + Name: pmodel.NewCIStr("t1"), }, { - Schema: model.NewCIStr("test"), - Name: model.NewCIStr("t2"), + Schema: pmodel.NewCIStr("test"), + Name: pmodel.NewCIStr("t2"), }, } diff --git a/pkg/planner/core/point_get_plan.go b/pkg/planner/core/point_get_plan.go index 5fcf93342a71a..2825a4dde1508 100644 --- a/pkg/planner/core/point_get_plan.go +++ b/pkg/planner/core/point_get_plan.go @@ -26,9 +26,10 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/parser/terror" @@ -79,7 +80,7 @@ type PointGetPlan struct { // Please see comments in PhysicalPlan for details. probeParents []base.PhysicalPlan // explicit partition selection - PartitionNames []model.CIStr + PartitionNames []pmodel.CIStr dbName string schema *expression.Schema @@ -431,7 +432,7 @@ type BatchPointGetPlan struct { // Please see comments in PhysicalPlan for details. probeParents []base.PhysicalPlan // explicit partition selection - PartitionNames []model.CIStr + PartitionNames []pmodel.CIStr ctx base.PlanContext dbName string @@ -652,7 +653,7 @@ func (p *BatchPointGetPlan) LoadTableStats(ctx sessionctx.Context) { loadTableStats(ctx, p.TblInfo, p.TblInfo.ID) } -func isInExplicitPartitions(pi *model.PartitionInfo, idx int, names []model.CIStr) bool { +func isInExplicitPartitions(pi *model.PartitionInfo, idx int, names []pmodel.CIStr) bool { if len(names) == 0 { return true } @@ -1456,7 +1457,7 @@ func indexIsAvailableByHints(idxInfo *model.IndexInfo, idxHints []*ast.IndexHint if len(idxHints) == 0 { return true } - match := func(name model.CIStr) bool { + match := func(name pmodel.CIStr) bool { if idxInfo == nil { return name.L == "primary" } @@ -1525,9 +1526,9 @@ func checkFastPlanPrivilege(ctx base.PlanContext, dbName, tableName string, chec } func buildSchemaFromFields( - dbName model.CIStr, + dbName pmodel.CIStr, tbl *model.TableInfo, - tblName model.CIStr, + tblName pmodel.CIStr, fields []*ast.SelectField, ) ( *expression.Schema, @@ -1633,7 +1634,7 @@ func tryExtractRowChecksumColumn(field *ast.SelectField, idx int) (*types.FieldN // getSingleTableNameAndAlias return the ast node of queried table name and the alias string. // `tblName` is `nil` if there are multiple tables in the query. // `tblAlias` will be the real table name if there is no table alias in the query. -func getSingleTableNameAndAlias(tableRefs *ast.TableRefsClause) (tblName *ast.TableName, tblAlias model.CIStr) { +func getSingleTableNameAndAlias(tableRefs *ast.TableRefsClause) (tblName *ast.TableName, tblAlias pmodel.CIStr) { if tableRefs == nil || tableRefs.TableRefs == nil || tableRefs.TableRefs.Right != nil { return nil, tblAlias } @@ -1653,7 +1654,7 @@ func getSingleTableNameAndAlias(tableRefs *ast.TableRefsClause) (tblName *ast.Ta } // getNameValuePairs extracts `column = constant/paramMarker` conditions from expr as name value pairs. -func getNameValuePairs(ctx expression.BuildContext, tbl *model.TableInfo, tblName model.CIStr, nvPairs []nameValuePair, expr ast.ExprNode) ( +func getNameValuePairs(ctx expression.BuildContext, tbl *model.TableInfo, tblName pmodel.CIStr, nvPairs []nameValuePair, expr ast.ExprNode) ( pairs []nameValuePair, isTableDual bool) { evalCtx := ctx.GetEvalCtx() binOp, ok := expr.(*ast.BinaryOperationExpr) @@ -2134,12 +2135,12 @@ func buildHandleCols(ctx base.PlanContext, tbl *model.TableInfo, schema *express // TODO: Remove this, by enabling all types of partitioning // and update/add tests -func getHashOrKeyPartitionColumnName(ctx base.PlanContext, tbl *model.TableInfo) *model.CIStr { +func getHashOrKeyPartitionColumnName(ctx base.PlanContext, tbl *model.TableInfo) *pmodel.CIStr { pi := tbl.GetPartitionInfo() if pi == nil { return nil } - if pi.Type != model.PartitionTypeHash && pi.Type != model.PartitionTypeKey { + if pi.Type != pmodel.PartitionTypeHash && pi.Type != pmodel.PartitionTypeKey { return nil } is := ctx.GetInfoSchema().(infoschema.InfoSchema) @@ -2149,7 +2150,7 @@ func getHashOrKeyPartitionColumnName(ctx base.PlanContext, tbl *model.TableInfo) } // PartitionExpr don't need columns and names for hash partition. partitionExpr := table.(partitionTable).PartitionExpr() - if pi.Type == model.PartitionTypeKey { + if pi.Type == pmodel.PartitionTypeKey { // used to judge whether the key partition contains only one field if len(pi.Columns) != 1 { return nil diff --git a/pkg/planner/core/preprocess.go b/pkg/planner/core/preprocess.go index 2da3a7e1e415f..708e985c971fd 100644 --- a/pkg/planner/core/preprocess.go +++ b/pkg/planner/core/preprocess.go @@ -26,12 +26,13 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/core/base" @@ -495,7 +496,7 @@ func (p *preprocessor) tableByName(tn *ast.TableName) (table.Table, error) { if currentDB == "" { return nil, errors.Trace(plannererrors.ErrNoDB) } - sName := model.NewCIStr(currentDB) + sName := pmodel.NewCIStr(currentDB) is := p.ensureInfoSchema() // for 'SHOW CREATE VIEW/SEQUENCE ...' statement, ignore local temporary tables. @@ -851,7 +852,7 @@ func (p *preprocessor) checkAdminCheckTableGrammar(stmt *ast.AdminStmt) { func (p *preprocessor) checkCreateTableGrammar(stmt *ast.CreateTableStmt) { if stmt.ReferTable != nil { - schema := model.NewCIStr(p.sctx.GetSessionVars().CurrentDB) + schema := pmodel.NewCIStr(p.sctx.GetSessionVars().CurrentDB) if stmt.ReferTable.Schema.String() != "" { schema = stmt.ReferTable.Schema } @@ -1027,7 +1028,7 @@ func (p *preprocessor) checkDropTableGrammar(stmt *ast.DropTableStmt) { } func (p *preprocessor) checkDropTemporaryTableGrammar(stmt *ast.DropTableStmt) { - currentDB := model.NewCIStr(p.sctx.GetSessionVars().CurrentDB) + currentDB := pmodel.NewCIStr(p.sctx.GetSessionVars().CurrentDB) for _, t := range stmt.Tables { if util.IsInCorrectIdentifierName(t.Name.String()) { p.err = dbterror.ErrWrongTableName.GenWithStackByArgs(t.Name.String()) @@ -1092,7 +1093,7 @@ func isTableAliasDuplicate(node ast.ResultSetNode, tableAliases map[string]any) if tabName.L == "" { if tableNode, ok := ts.Source.(*ast.TableName); ok { if tableNode.Schema.L != "" { - tabName = model.NewCIStr(fmt.Sprintf("%s.%s", tableNode.Schema.L, tableNode.Name.L)) + tabName = pmodel.NewCIStr(fmt.Sprintf("%s.%s", tableNode.Schema.L, tableNode.Name.L)) } else { tabName = tableNode.Name } @@ -1579,7 +1580,7 @@ func (p *preprocessor) handleTableName(tn *ast.TableName) { return } - tn.Schema = model.NewCIStr(currentDB) + tn.Schema = pmodel.NewCIStr(currentDB) } if p.flag&inCreateOrDropTable > 0 { @@ -1617,7 +1618,7 @@ func (p *preprocessor) handleTableName(tn *ast.TableName) { } if !p.skipLockMDL() { - table, err = tryLockMDLAndUpdateSchemaIfNecessary(p.ctx, p.sctx.GetPlanCtx(), model.NewCIStr(tn.Schema.L), table, p.ensureInfoSchema()) + table, err = tryLockMDLAndUpdateSchemaIfNecessary(p.ctx, p.sctx.GetPlanCtx(), pmodel.NewCIStr(tn.Schema.L), table, p.ensureInfoSchema()) if err != nil { p.err = err return @@ -1678,7 +1679,7 @@ func (p *preprocessor) resolveShowStmt(node *ast.ShowStmt) { node.DBName = p.sctx.GetSessionVars().CurrentDB } } else if node.Table != nil && node.Table.Schema.L == "" { - node.Table.Schema = model.NewCIStr(node.DBName) + node.Table.Schema = pmodel.NewCIStr(node.DBName) } if node.User != nil && node.User.CurrentUser { // Fill the Username and Hostname with the current user. @@ -1723,7 +1724,7 @@ func (p *preprocessor) resolveAlterTableStmt(node *ast.AlterTableStmt) { if spec.Tp == ast.AlterTableAddConstraint && spec.Constraint.Refer != nil { table := spec.Constraint.Refer.Table if table.Schema.L == "" && node.Table.Schema.L != "" { - table.Schema = model.NewCIStr(node.Table.Schema.L) + table.Schema = pmodel.NewCIStr(node.Table.Schema.L) } if spec.Constraint.Tp == ast.ConstraintForeignKey { // when foreign_key_checks is off, should ignore err when refer table is not exists. @@ -1838,7 +1839,7 @@ func (p *preprocessor) hasAutoConvertWarning(colDef *ast.ColumnDef) bool { return false } -func tryLockMDLAndUpdateSchemaIfNecessary(ctx context.Context, sctx base.PlanContext, dbName model.CIStr, tbl table.Table, is infoschema.InfoSchema) (table.Table, error) { +func tryLockMDLAndUpdateSchemaIfNecessary(ctx context.Context, sctx base.PlanContext, dbName pmodel.CIStr, tbl table.Table, is infoschema.InfoSchema) (table.Table, error) { if !sctx.GetSessionVars().TxnCtx.EnableMDL { return tbl, nil } diff --git a/pkg/planner/core/preprocess_test.go b/pkg/planner/core/preprocess_test.go index 99b9016cd6487..f3eea5b6f54b9 100644 --- a/pkg/planner/core/preprocess_test.go +++ b/pkg/planner/core/preprocess_test.go @@ -22,9 +22,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/planner/core/resolve/BUILD.bazel b/pkg/planner/core/resolve/BUILD.bazel index 8d977499986d7..2c1beb5266085 100644 --- a/pkg/planner/core/resolve/BUILD.bazel +++ b/pkg/planner/core/resolve/BUILD.bazel @@ -2,10 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "resolve", - srcs = ["resolve.go"], + srcs = [ + "resolve.go", + "result.go", + ], importpath = "github.com/pingcap/tidb/pkg/planner/core/resolve", visibility = ["//visibility:public"], deps = [ + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", ], diff --git a/pkg/planner/core/resolve/resolve.go b/pkg/planner/core/resolve/resolve.go index aab14ec18f02f..37dc843503fe2 100644 --- a/pkg/planner/core/resolve/resolve.go +++ b/pkg/planner/core/resolve/resolve.go @@ -18,8 +18,8 @@ package resolve import ( + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" ) // TableNameW is a wrapper around ast.TableName to store more information. diff --git a/pkg/planner/core/resolve/result.go b/pkg/planner/core/resolve/result.go new file mode 100644 index 0000000000000..b85d7a6590c94 --- /dev/null +++ b/pkg/planner/core/resolve/result.go @@ -0,0 +1,40 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resolve + +import ( + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" +) + +// ResultField represents a result field which can be a column from a table, +// or an expression in select field. It is a generated property during +// binding process. ResultField is the key element to evaluate a ColumnNameExpr. +// After resolving process, every ColumnNameExpr will be resolved to a ResultField. +// During execution, every row retrieved from table will set the row value to +// ResultFields of that table, so ColumnNameExpr resolved to that ResultField can be +// easily evaluated. +type ResultField struct { + Column *model.ColumnInfo + ColumnAsName pmodel.CIStr + // EmptyOrgName indicates whether this field has an empty org_name. A field has an empty org name, if it's an + // expression. It's not sure whether it's safe to use empty string in `.Column.Name`, so a new field is added to + // indicate whether it's empty. + EmptyOrgName bool + + Table *model.TableInfo + TableAsName pmodel.CIStr + DBName pmodel.CIStr +} diff --git a/pkg/planner/core/rule/util/BUILD.bazel b/pkg/planner/core/rule/util/BUILD.bazel index 0aa9d23ab8723..7325159de4a5f 100644 --- a/pkg/planner/core/rule/util/BUILD.bazel +++ b/pkg/planner/core/rule/util/BUILD.bazel @@ -10,7 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/expression", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/planner/core/base", ], diff --git a/pkg/planner/core/rule/util/build_key_info_misc.go b/pkg/planner/core/rule/util/build_key_info_misc.go index 8d566868a54e9..7ab9e89371f1e 100644 --- a/pkg/planner/core/rule/util/build_key_info_misc.go +++ b/pkg/planner/core/rule/util/build_key_info_misc.go @@ -16,7 +16,7 @@ package util import ( "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" ) diff --git a/pkg/planner/core/rule_collect_plan_stats.go b/pkg/planner/core/rule_collect_plan_stats.go index b89919524b600..7646d6d8f5045 100644 --- a/pkg/planner/core/rule_collect_plan_stats.go +++ b/pkg/planner/core/rule_collect_plan_stats.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/planner/core/rule_column_pruning.go b/pkg/planner/core/rule_column_pruning.go index 43d23b9eaacae..8692a854994b0 100644 --- a/pkg/planner/core/rule_column_pruning.go +++ b/pkg/planner/core/rule_column_pruning.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" diff --git a/pkg/planner/core/rule_max_min_eliminate.go b/pkg/planner/core/rule_max_min_eliminate.go index 571d900cef3d2..ce264efa44e8d 100644 --- a/pkg/planner/core/rule_max_min_eliminate.go +++ b/pkg/planner/core/rule_max_min_eliminate.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" diff --git a/pkg/planner/core/rule_partition_processor.go b/pkg/planner/core/rule_partition_processor.go index 6319ab51ae2fa..74e9e6d73aa17 100644 --- a/pkg/planner/core/rule_partition_processor.go +++ b/pkg/planner/core/rule_partition_processor.go @@ -26,8 +26,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" @@ -155,7 +156,7 @@ func getPartColumnsForHashPartition(hashExpr expression.Expression) ([]*expressi } func (s *PartitionProcessor) getUsedHashPartitions(ctx base.PlanContext, - tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column, + tbl table.Table, partitionNames []pmodel.CIStr, columns []*expression.Column, conds []expression.Expression, names types.NameSlice) ([]int, error) { pi := tbl.Meta().Partition hashExpr, err := generateHashPartitionExpr(ctx, pi, columns, names) @@ -273,7 +274,7 @@ func (s *PartitionProcessor) getUsedHashPartitions(ctx base.PlanContext, } func (s *PartitionProcessor) getUsedKeyPartitions(ctx base.PlanContext, - tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column, + tbl table.Table, partitionNames []pmodel.CIStr, columns []*expression.Column, conds []expression.Expression, _ types.NameSlice) ([]int, error) { pi := tbl.Meta().Partition partExpr := tbl.(partitionTable).PartitionExpr() @@ -381,9 +382,9 @@ func (s *PartitionProcessor) getUsedKeyPartitions(ctx base.PlanContext, // getUsedPartitions is used to get used partitions for hash or key partition tables func (s *PartitionProcessor) getUsedPartitions(ctx base.PlanContext, tbl table.Table, - partitionNames []model.CIStr, columns []*expression.Column, conds []expression.Expression, - names types.NameSlice, partType model.PartitionType) ([]int, error) { - if partType == model.PartitionTypeHash { + partitionNames []pmodel.CIStr, columns []*expression.Column, conds []expression.Expression, + names types.NameSlice, partType pmodel.PartitionType) ([]int, error) { + if partType == pmodel.PartitionTypeHash { return s.getUsedHashPartitions(ctx, tbl, partitionNames, columns, conds, names) } return s.getUsedKeyPartitions(ctx, tbl, partitionNames, columns, conds, names) @@ -392,7 +393,7 @@ func (s *PartitionProcessor) getUsedPartitions(ctx base.PlanContext, tbl table.T // findUsedPartitions is used to get used partitions for hash or key partition tables. // The first returning is the used partition index set pruned by `conds`. func (s *PartitionProcessor) findUsedPartitions(ctx base.PlanContext, - tbl table.Table, partitionNames []model.CIStr, conds []expression.Expression, + tbl table.Table, partitionNames []pmodel.CIStr, conds []expression.Expression, columns []*expression.Column, names types.NameSlice) ([]int, error) { pi := tbl.Meta().Partition used, err := s.getUsedPartitions(ctx, tbl, partitionNames, columns, conds, names, pi.Type) @@ -414,7 +415,7 @@ func (s *PartitionProcessor) findUsedPartitions(ctx base.PlanContext, return ret, nil } -func (s *PartitionProcessor) convertToIntSlice(or partitionRangeOR, pi *model.PartitionInfo, partitionNames []model.CIStr) []int { +func (s *PartitionProcessor) convertToIntSlice(or partitionRangeOR, pi *model.PartitionInfo, partitionNames []pmodel.CIStr) []int { if len(or) == 1 && or[0].start == 0 && or[0].end == len(pi.Definitions) { if len(partitionNames) == 0 { if len(pi.Definitions) == 1 { @@ -448,7 +449,7 @@ func convertToRangeOr(used []int, pi *model.PartitionInfo) partitionRangeOR { } // pruneHashOrKeyPartition is used to prune hash or key partition tables -func (s *PartitionProcessor) pruneHashOrKeyPartition(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, +func (s *PartitionProcessor) pruneHashOrKeyPartition(ctx base.PlanContext, tbl table.Table, partitionNames []pmodel.CIStr, conds []expression.Expression, columns []*expression.Column, names types.NameSlice) ([]int, error) { used, err := s.findUsedPartitions(ctx, tbl, partitionNames, conds, columns, names) if err != nil { @@ -482,8 +483,8 @@ func (*PartitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.Fi names = append(names, &types.FieldName{ DBName: ds.DBName, TblName: ds.TableInfo.Name, - ColName: model.ExtraPhysTblIdName, - OrigColName: model.ExtraPhysTblIdName, + ColName: model.ExtraPhysTblIDName, + OrigColName: model.ExtraPhysTblIDName, }) continue } @@ -529,12 +530,12 @@ type listPartitionPruner struct { *PartitionProcessor ctx base.PlanContext pi *model.PartitionInfo - partitionNames []model.CIStr + partitionNames []pmodel.CIStr fullRange map[int]struct{} listPrune *tables.ForListPruning } -func newListPartitionPruner(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, s *PartitionProcessor, pruneList *tables.ForListPruning, columns []*expression.Column) *listPartitionPruner { +func newListPartitionPruner(ctx base.PlanContext, tbl table.Table, partitionNames []pmodel.CIStr, s *PartitionProcessor, pruneList *tables.ForListPruning, columns []*expression.Column) *listPartitionPruner { pruneList = pruneList.Clone() for i := range pruneList.PruneExprCols { for j := range columns { @@ -789,7 +790,7 @@ func (l *listPartitionPruner) findUsedListPartitions(conds []expression.Expressi return used, nil } -func (s *PartitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, +func (s *PartitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []pmodel.CIStr, conds []expression.Expression, columns []*expression.Column) ([]int, error) { pi := tbl.Meta().Partition partExpr := tbl.(partitionTable).PartitionExpr() @@ -817,7 +818,7 @@ func (s *PartitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl ta return ret, nil } -func (s *PartitionProcessor) pruneListPartition(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, +func (s *PartitionProcessor) pruneListPartition(ctx base.PlanContext, tbl table.Table, partitionNames []pmodel.CIStr, conds []expression.Expression, columns []*expression.Column) ([]int, error) { used, err := s.findUsedListPartitions(ctx, tbl, partitionNames, conds, columns) if err != nil { @@ -843,11 +844,11 @@ func (s *PartitionProcessor) prune(ds *DataSource, opt *optimizetrace.LogicalOpt // a = 1 OR a = 2 => for p1 only "a = 1" and for p2 only "a = 2" // since a cannot be 2 in p1 and a cannot be 1 in p2 switch pi.Type { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: return s.processRangePartition(ds, pi, opt) - case model.PartitionTypeHash, model.PartitionTypeKey: + case pmodel.PartitionTypeHash, pmodel.PartitionTypeKey: return s.processHashOrKeyPartition(ds, pi, opt) - case model.PartitionTypeList: + case pmodel.PartitionTypeList: return s.processListPartition(ds, pi, opt) } @@ -855,7 +856,7 @@ func (s *PartitionProcessor) prune(ds *DataSource, opt *optimizetrace.LogicalOpt } // findByName checks whether object name exists in list. -func (*PartitionProcessor) findByName(partitionNames []model.CIStr, partitionName string) bool { +func (*PartitionProcessor) findByName(partitionNames []pmodel.CIStr, partitionName string) bool { for _, s := range partitionNames { if s.L == partitionName { return true @@ -1728,7 +1729,7 @@ func (*PartitionProcessor) resolveAccessPaths(ds *DataSource) error { return nil } -func (s *PartitionProcessor) resolveOptimizeHint(ds *DataSource, partitionName model.CIStr) error { +func (s *PartitionProcessor) resolveOptimizeHint(ds *DataSource, partitionName pmodel.CIStr) error { // index hint if len(ds.IndexHints) > 0 { newIndexHint := make([]h.HintedIndex, 0, len(ds.IndexHints)) @@ -1794,7 +1795,7 @@ func (s *PartitionProcessor) resolveOptimizeHint(ds *DataSource, partitionName m return s.resolveAccessPaths(ds) } -func checkTableHintsApplicableForPartition(partitions []model.CIStr, partitionSet set.StringSet) []string { +func checkTableHintsApplicableForPartition(partitions []pmodel.CIStr, partitionSet set.StringSet) []string { var unknownPartitions []string for _, p := range partitions { if !partitionSet.Exist(p.L) { diff --git a/pkg/planner/core/runtime_filter_generator_test.go b/pkg/planner/core/runtime_filter_generator_test.go index 96734c3345ec7..4b62e22fb4fc2 100644 --- a/pkg/planner/core/runtime_filter_generator_test.go +++ b/pkg/planner/core/runtime_filter_generator_test.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" @@ -52,13 +53,13 @@ func TestRuntimeFilterGenerator(t *testing.T) { // set tiflash replica dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, Available: true, } - tblInfo, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tblInfo, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/core/stats.go b/pkg/planner/core/stats.go index c80ae25151d18..cf53f55479a6d 100644 --- a/pkg/planner/core/stats.go +++ b/pkg/planner/core/stats.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/cost" diff --git a/pkg/planner/core/util.go b/pkg/planner/core/util.go index 7ad81e06cd5d3..6270f29635b1d 100644 --- a/pkg/planner/core/util.go +++ b/pkg/planner/core/util.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/baseimpl" diff --git a/pkg/planner/implementation/BUILD.bazel b/pkg/planner/implementation/BUILD.bazel index d0af00ea0580f..b3c2224524a8e 100644 --- a/pkg/planner/implementation/BUILD.bazel +++ b/pkg/planner/implementation/BUILD.bazel @@ -14,7 +14,7 @@ go_library( deps = [ "//pkg/expression", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/planner/cardinality", "//pkg/planner/core", "//pkg/planner/core/base", diff --git a/pkg/planner/implementation/datasource.go b/pkg/planner/implementation/datasource.go index e17a4462e8158..6ed32391b0c56 100644 --- a/pkg/planner/implementation/datasource.go +++ b/pkg/planner/implementation/datasource.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/cardinality" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/memo" diff --git a/pkg/planner/memo/BUILD.bazel b/pkg/planner/memo/BUILD.bazel index 46ea8736a58f8..7abe6db4b2691 100644 --- a/pkg/planner/memo/BUILD.bazel +++ b/pkg/planner/memo/BUILD.bazel @@ -36,8 +36,8 @@ go_test( "//pkg/domain", "//pkg/expression", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser", - "//pkg/parser/model", "//pkg/planner/core", "//pkg/planner/core/base", "//pkg/planner/core/operator/logicalop", diff --git a/pkg/planner/memo/group_test.go b/pkg/planner/memo/group_test.go index d0e553b3bf2d9..d6d050ca5a829 100644 --- a/pkg/planner/memo/group_test.go +++ b/pkg/planner/memo/group_test.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" diff --git a/pkg/planner/util/BUILD.bazel b/pkg/planner/util/BUILD.bazel index 8500f83dc55bd..f10b9e3ecd9ed 100644 --- a/pkg/planner/util/BUILD.bazel +++ b/pkg/planner/util/BUILD.bazel @@ -17,6 +17,7 @@ go_library( deps = [ "//pkg/expression", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", @@ -49,7 +50,7 @@ go_test( flaky = True, deps = [ "//pkg/domain", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/planner/core", "//pkg/testkit/testsetup", "//pkg/types", diff --git a/pkg/planner/util/coretestsdk/BUILD.bazel b/pkg/planner/util/coretestsdk/BUILD.bazel index 0f5bb3aa34bc7..34f1f16d362b9 100644 --- a/pkg/planner/util/coretestsdk/BUILD.bazel +++ b/pkg/planner/util/coretestsdk/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/store/mockstore", "//pkg/store/mockstore/unistore", diff --git a/pkg/planner/util/coretestsdk/testkit.go b/pkg/planner/util/coretestsdk/testkit.go index c770ca86d8f93..4677839fcf4b7 100644 --- a/pkg/planner/util/coretestsdk/testkit.go +++ b/pkg/planner/util/coretestsdk/testkit.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/store/mockstore/unistore" "github.com/stretchr/testify/require" @@ -32,7 +33,7 @@ import ( // SetTiFlashReplica is to set TiFlash replica func SetTiFlashReplica(t *testing.T, dom *domain.Domain, dbName, tableName string) { is := dom.InfoSchema() - tblInfo, err := is.TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tableName)) + tblInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) require.NoError(t, err) tblInfo.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, diff --git a/pkg/planner/util/handle_cols.go b/pkg/planner/util/handle_cols.go index 57691f310842a..06d6ad97008a5 100644 --- a/pkg/planner/util/handle_cols.go +++ b/pkg/planner/util/handle_cols.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/planner/util/misc.go b/pkg/planner/util/misc.go index 300f45d004517..0cdc728ed9794 100644 --- a/pkg/planner/util/misc.go +++ b/pkg/planner/util/misc.go @@ -23,8 +23,9 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" @@ -46,12 +47,12 @@ func CloneFieldNames(names []*types.FieldName) []*types.FieldName { return cloned } -// CloneCIStrs uses model.CIStr.Clone to clone a slice of model.CIStr. -func CloneCIStrs(strs []model.CIStr) []model.CIStr { +// CloneCIStrs uses ast.CIStr.Clone to clone a slice of ast.CIStr. +func CloneCIStrs(strs []pmodel.CIStr) []pmodel.CIStr { if strs == nil { return nil } - cloned := make([]model.CIStr, 0, len(strs)) + cloned := make([]pmodel.CIStr, 0, len(strs)) cloned = append(cloned, strs...) return cloned } @@ -332,7 +333,7 @@ func ExtractTableAlias(p base.Plan, parentOffset int) *h.HintedTable { } dbName := firstName.DBName if dbName.L == "" { - dbName = model.NewCIStr(p.SCtx().GetSessionVars().CurrentDB) + dbName = pmodel.NewCIStr(p.SCtx().GetSessionVars().CurrentDB) } return &h.HintedTable{DBName: dbName, TblName: firstName.TblName, SelectOffset: qbOffset} } diff --git a/pkg/planner/util/path.go b/pkg/planner/util/path.go index a1374c5247fc6..2cfd107488885 100644 --- a/pkg/planner/util/path.go +++ b/pkg/planner/util/path.go @@ -19,8 +19,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/collate" diff --git a/pkg/planner/util/path_test.go b/pkg/planner/util/path_test.go index 36bc3f852c514..f53f058186cb9 100644 --- a/pkg/planner/util/path_test.go +++ b/pkg/planner/util/path_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/privilege/privileges/BUILD.bazel b/pkg/privilege/privileges/BUILD.bazel index 656a93681784b..3d12422553428 100644 --- a/pkg/privilege/privileges/BUILD.bazel +++ b/pkg/privilege/privileges/BUILD.bazel @@ -15,10 +15,10 @@ go_library( "//pkg/extension", "//pkg/infoschema", "//pkg/kv", - "//pkg/parser/ast", "//pkg/parser/auth", "//pkg/parser/mysql", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/privilege", "//pkg/privilege/conn", "//pkg/privilege/privileges/ldap", diff --git a/pkg/privilege/privileges/cache.go b/pkg/privilege/privileges/cache.go index 241e8ee0f96e3..ac240b4e08895 100644 --- a/pkg/privilege/privileges/cache.go +++ b/pkg/privilege/privileges/cache.go @@ -28,10 +28,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/types" @@ -579,7 +579,7 @@ func (p *MySQLPrivilege) LoadDefaultRoles(ctx sessionctx.Context) error { } func (p *MySQLPrivilege) loadTable(sctx sessionctx.Context, sql string, - decodeTableRow func(chunk.Row, []*ast.ResultField) error) error { + decodeTableRow func(chunk.Row, []*resolve.ResultField) error) error { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) rs, err := sctx.GetSQLExecutor().ExecuteInternal(ctx, sql) if err != nil { @@ -640,7 +640,7 @@ func parseHostIPNet(s string) *net.IPNet { } } -func (record *baseRecord) assignUserOrHost(row chunk.Row, i int, f *ast.ResultField) { +func (record *baseRecord) assignUserOrHost(row chunk.Row, i int, f *resolve.ResultField) { switch f.ColumnAsName.L { case "user": record.User = row.GetString(i) @@ -651,7 +651,7 @@ func (record *baseRecord) assignUserOrHost(row chunk.Row, i int, f *ast.ResultFi } } -func (p *MySQLPrivilege) decodeUserTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeUserTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value UserRecord for i, f := range fs { switch { @@ -739,7 +739,7 @@ func (p *MySQLPrivilege) decodeUserTableRow(row chunk.Row, fs []*ast.ResultField return nil } -func (p *MySQLPrivilege) decodeGlobalPrivTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeGlobalPrivTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value globalPrivRecord for i, f := range fs { if f.ColumnAsName.L == "priv" { @@ -776,7 +776,7 @@ func (p *MySQLPrivilege) decodeGlobalPrivTableRow(row chunk.Row, fs []*ast.Resul return nil } -func (p *MySQLPrivilege) decodeGlobalGrantsTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeGlobalGrantsTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value dynamicPrivRecord for i, f := range fs { switch f.ColumnAsName.L { @@ -795,7 +795,7 @@ func (p *MySQLPrivilege) decodeGlobalGrantsTableRow(row chunk.Row, fs []*ast.Res return nil } -func (p *MySQLPrivilege) decodeDBTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeDBTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value dbRecord for i, f := range fs { switch { @@ -819,7 +819,7 @@ func (p *MySQLPrivilege) decodeDBTableRow(row chunk.Row, fs []*ast.ResultField) return nil } -func (p *MySQLPrivilege) decodeTablesPrivTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeTablesPrivTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value tablesPrivRecord for i, f := range fs { switch f.ColumnAsName.L { @@ -839,7 +839,7 @@ func (p *MySQLPrivilege) decodeTablesPrivTableRow(row chunk.Row, fs []*ast.Resul return nil } -func (p *MySQLPrivilege) decodeRoleEdgesTable(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeRoleEdgesTable(row chunk.Row, fs []*resolve.ResultField) error { var fromUser, fromHost, toHost, toUser string for i, f := range fs { switch f.ColumnAsName.L { @@ -864,7 +864,7 @@ func (p *MySQLPrivilege) decodeRoleEdgesTable(row chunk.Row, fs []*ast.ResultFie return nil } -func (p *MySQLPrivilege) decodeDefaultRoleTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeDefaultRoleTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value defaultRoleRecord for i, f := range fs { switch f.ColumnAsName.L { @@ -880,7 +880,7 @@ func (p *MySQLPrivilege) decodeDefaultRoleTableRow(row chunk.Row, fs []*ast.Resu return nil } -func (p *MySQLPrivilege) decodeColumnsPrivTableRow(row chunk.Row, fs []*ast.ResultField) error { +func (p *MySQLPrivilege) decodeColumnsPrivTableRow(row chunk.Row, fs []*resolve.ResultField) error { var value columnsPrivRecord for i, f := range fs { switch f.ColumnAsName.L { diff --git a/pkg/resourcegroup/tests/BUILD.bazel b/pkg/resourcegroup/tests/BUILD.bazel index f48f0aa7e9b6c..9ef1e1a79bbd2 100644 --- a/pkg/resourcegroup/tests/BUILD.bazel +++ b/pkg/resourcegroup/tests/BUILD.bazel @@ -12,6 +12,7 @@ go_test( "//pkg/domain", "//pkg/domain/infosync", "//pkg/errno", + "//pkg/meta/model", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/server", diff --git a/pkg/resourcegroup/tests/resource_group_test.go b/pkg/resourcegroup/tests/resource_group_test.go index 85ef7c7ddbc95..90668db9e4315 100644 --- a/pkg/resourcegroup/tests/resource_group_test.go +++ b/pkg/resourcegroup/tests/resource_group_test.go @@ -28,8 +28,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/domain/infosync" mysql "github.com/pingcap/tidb/pkg/errno" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/testkit" @@ -104,8 +105,8 @@ func TestResourceGroupBasic(t *testing.T) { re.Equal(uint64(2000), g.RURate) re.Equal(int64(-1), g.BurstLimit) re.Equal(uint64(time.Second*15/time.Millisecond), g.Runaway.ExecElapsedTimeMs) - re.Equal(model.RunawayActionDryRun, g.Runaway.Action) - re.Equal(model.WatchSimilar, g.Runaway.WatchType) + re.Equal(pmodel.RunawayActionDryRun, g.Runaway.Action) + re.Equal(pmodel.WatchSimilar, g.Runaway.WatchType) re.Equal(int64(time.Minute*10/time.Millisecond), g.Runaway.WatchDurationMs) tk.MustExec("alter resource group x QUERY_LIMIT=(EXEC_ELAPSED='20s' ACTION DRYRUN WATCH SIMILAR) BURSTABLE=FALSE") @@ -113,8 +114,8 @@ func TestResourceGroupBasic(t *testing.T) { re.Equal(uint64(2000), g.RURate) re.Equal(int64(2000), g.BurstLimit) re.Equal(uint64(time.Second*20/time.Millisecond), g.Runaway.ExecElapsedTimeMs) - re.Equal(model.RunawayActionDryRun, g.Runaway.Action) - re.Equal(model.WatchSimilar, g.Runaway.WatchType) + re.Equal(pmodel.RunawayActionDryRun, g.Runaway.Action) + re.Equal(pmodel.WatchSimilar, g.Runaway.WatchType) re.Equal(int64(0), g.Runaway.WatchDurationMs) tk.MustQuery("select * from information_schema.resource_groups where name = 'x'").Check(testkit.Rows("x 2000 MEDIUM NO EXEC_ELAPSED='20s', ACTION=DRYRUN, WATCH=SIMILAR DURATION=UNLIMITED ")) @@ -124,8 +125,8 @@ func TestResourceGroupBasic(t *testing.T) { re.Equal(uint64(math.MaxInt32), g.RURate) re.Equal(int64(-1), g.BurstLimit) re.Equal(uint64(time.Second*15/time.Millisecond), g.Runaway.ExecElapsedTimeMs) - re.Equal(model.RunawayActionDryRun, g.Runaway.Action) - re.Equal(model.WatchSimilar, g.Runaway.WatchType) + re.Equal(pmodel.RunawayActionDryRun, g.Runaway.Action) + re.Equal(pmodel.WatchSimilar, g.Runaway.WatchType) re.Equal(int64(time.Minute*10/time.Millisecond), g.Runaway.WatchDurationMs) tk.MustQuery("select * from information_schema.resource_groups where name = 'x'").Check(testkit.Rows("x UNLIMITED MEDIUM YES EXEC_ELAPSED='15s', ACTION=DRYRUN, WATCH=SIMILAR DURATION='10m0s' ")) @@ -179,7 +180,7 @@ func TestResourceGroupBasic(t *testing.T) { re.Equal(uint64(5000), groupInfo.RURate) re.Equal(int64(-1), groupInfo.BurstLimit) re.Equal(uint64(time.Second*15/time.Millisecond), groupInfo.Runaway.ExecElapsedTimeMs) - re.Equal(model.RunawayActionKill, groupInfo.Runaway.Action) + re.Equal(pmodel.RunawayActionKill, groupInfo.Runaway.Action) re.Equal(int64(0), groupInfo.Runaway.WatchDurationMs) } g = testResourceGroupNameFromIS(t, tk.Session(), "y") @@ -283,7 +284,7 @@ func testResourceGroupNameFromIS(t *testing.T, ctx sessionctx.Context, name stri // Make sure the table schema is the new schema. err := dom.Reload() require.NoError(t, err) - g, _ := dom.InfoSchema().ResourceGroupByName(model.NewCIStr(name)) + g, _ := dom.InfoSchema().ResourceGroupByName(pmodel.NewCIStr(name)) return g } diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index 4571f5564fbd1..5d543393cdb6c 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -31,13 +31,13 @@ go_library( "//pkg/extension", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/param", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/core", @@ -162,14 +162,15 @@ go_test( "//pkg/extension", "//pkg/keyspace", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/param", - "//pkg/parser/ast", "//pkg/parser/auth", "//pkg/parser/charset", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/server/internal", "//pkg/server/internal/column", "//pkg/server/internal/handshake", diff --git a/pkg/server/conn.go b/pkg/server/conn.go index 1c30a851c2868..ea6ffb4006fbb 100644 --- a/pkg/server/conn.go +++ b/pkg/server/conn.go @@ -65,11 +65,11 @@ import ( "github.com/pingcap/tidb/pkg/extension" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" diff --git a/pkg/server/driver_tidb_test.go b/pkg/server/driver_tidb_test.go index af824ad912c9f..f916da009076e 100644 --- a/pkg/server/driver_tidb_test.go +++ b/pkg/server/driver_tidb_test.go @@ -17,10 +17,11 @@ package server import ( "testing" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/server/internal/column" "github.com/pingcap/tidb/pkg/types" "github.com/stretchr/testify/require" @@ -45,17 +46,17 @@ func TestConvertColumnInfo(t *testing.T) { // Test "mysql.TypeBit", for: https://github.com/pingcap/tidb/issues/5405. ftb := types.NewFieldTypeBuilder() ftb.SetType(mysql.TypeBit).SetFlag(mysql.UnsignedFlag).SetFlen(1).SetCharset(charset.CharsetUTF8).SetCollate(charset.CollationUTF8) - resultField := ast.ResultField{ + resultField := resolve.ResultField{ Column: &model.ColumnInfo{ - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), ID: 0, Offset: 0, FieldType: ftb.Build(), Comment: "column a is the first column in table dual", }, - ColumnAsName: model.NewCIStr("a"), - TableAsName: model.NewCIStr("dual"), - DBName: model.NewCIStr("test"), + ColumnAsName: pmodel.NewCIStr("a"), + TableAsName: pmodel.NewCIStr("dual"), + DBName: pmodel.NewCIStr("test"), } colInfo := column.ConvertColumnInfo(&resultField) require.Equal(t, createColumnByTypeAndLen(mysql.TypeBit, 1), colInfo) @@ -63,34 +64,34 @@ func TestConvertColumnInfo(t *testing.T) { // Test "mysql.TypeTiny", for: https://github.com/pingcap/tidb/issues/5405. ftpb := types.NewFieldTypeBuilder() ftpb.SetType(mysql.TypeTiny).SetFlag(mysql.UnsignedFlag).SetFlen(1).SetCharset(charset.CharsetUTF8).SetCollate(charset.CollationUTF8) - resultField = ast.ResultField{ + resultField = resolve.ResultField{ Column: &model.ColumnInfo{ - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), ID: 0, Offset: 0, FieldType: ftpb.Build(), Comment: "column a is the first column in table dual", }, - ColumnAsName: model.NewCIStr("a"), - TableAsName: model.NewCIStr("dual"), - DBName: model.NewCIStr("test"), + ColumnAsName: pmodel.NewCIStr("a"), + TableAsName: pmodel.NewCIStr("dual"), + DBName: pmodel.NewCIStr("test"), } colInfo = column.ConvertColumnInfo(&resultField) require.Equal(t, createColumnByTypeAndLen(mysql.TypeTiny, 1), colInfo) ftpb1 := types.NewFieldTypeBuilder() ftpb1.SetType(mysql.TypeYear).SetFlag(mysql.ZerofillFlag).SetFlen(4).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin) - resultField = ast.ResultField{ + resultField = resolve.ResultField{ Column: &model.ColumnInfo{ - Name: model.NewCIStr("a"), + Name: pmodel.NewCIStr("a"), ID: 0, Offset: 0, FieldType: ftpb1.Build(), Comment: "column a is the first column in table dual", }, - ColumnAsName: model.NewCIStr("a"), - TableAsName: model.NewCIStr("dual"), - DBName: model.NewCIStr("test"), + ColumnAsName: pmodel.NewCIStr("a"), + TableAsName: pmodel.NewCIStr("dual"), + DBName: pmodel.NewCIStr("test"), } colInfo = column.ConvertColumnInfo(&resultField) require.Equal(t, uint32(4), colInfo.ColumnLength) diff --git a/pkg/server/handler/BUILD.bazel b/pkg/server/handler/BUILD.bazel index 0aff0b1c56151..b51894e371e13 100644 --- a/pkg/server/handler/BUILD.bazel +++ b/pkg/server/handler/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//pkg/domain/infosync", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/terror", "//pkg/session", diff --git a/pkg/server/handler/optimizor/BUILD.bazel b/pkg/server/handler/optimizor/BUILD.bazel index 727063b320d36..a267c4a2b323f 100644 --- a/pkg/server/handler/optimizor/BUILD.bazel +++ b/pkg/server/handler/optimizor/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//pkg/domain", "//pkg/domain/infosync", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/server/handler", diff --git a/pkg/server/handler/optimizor/plan_replayer.go b/pkg/server/handler/optimizor/plan_replayer.go index cbe647214f9fe..590492a57599d 100644 --- a/pkg/server/handler/optimizor/plan_replayer.go +++ b/pkg/server/handler/optimizor/plan_replayer.go @@ -34,7 +34,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/server/handler" "github.com/pingcap/tidb/pkg/statistics/handle" util2 "github.com/pingcap/tidb/pkg/statistics/handle/util" @@ -307,7 +308,7 @@ func loadSchemaMeta(z *zip.Reader, is infoschema.InfoSchema) (map[int64]*tblInfo s := strings.Split(row, ";") databaseName := s[0] tableName := s[1] - t, err := is.TableByName(context.Background(), model.NewCIStr(databaseName), model.NewCIStr(tableName)) + t, err := is.TableByName(context.Background(), pmodel.NewCIStr(databaseName), pmodel.NewCIStr(tableName)) if err != nil { return nil, err } diff --git a/pkg/server/handler/tests/BUILD.bazel b/pkg/server/handler/tests/BUILD.bazel index 687e94eef2ff8..ab9dc935e244a 100644 --- a/pkg/server/handler/tests/BUILD.bazel +++ b/pkg/server/handler/tests/BUILD.bazel @@ -20,6 +20,7 @@ go_test( "//pkg/infoschema", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/server/handler/tests/http_handler_test.go b/pkg/server/handler/tests/http_handler_test.go index 06fafdc762116..17bcbd4b0c8c9 100644 --- a/pkg/server/handler/tests/http_handler_test.go +++ b/pkg/server/handler/tests/http_handler_test.go @@ -48,7 +48,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core" server2 "github.com/pingcap/tidb/pkg/server" @@ -1206,7 +1207,7 @@ func TestWriteDBTablesData(t *testing.T) { // No table in a schema. info := infoschema.MockInfoSchema([]*model.TableInfo{}) rc := httptest.NewRecorder() - tbs, err := info.SchemaTableInfos(context.Background(), model.NewCIStr("test")) + tbs, err := info.SchemaTableInfos(context.Background(), pmodel.NewCIStr("test")) require.NoError(t, err) require.Equal(t, 0, len(tbs)) tikvhandler.WriteDBTablesData(rc, tbs) @@ -1219,7 +1220,7 @@ func TestWriteDBTablesData(t *testing.T) { // One table in a schema. info = infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable()}) rc = httptest.NewRecorder() - tbs, err = info.SchemaTableInfos(context.Background(), model.NewCIStr("test")) + tbs, err = info.SchemaTableInfos(context.Background(), pmodel.NewCIStr("test")) require.NoError(t, err) require.Equal(t, 1, len(tbs)) tikvhandler.WriteDBTablesData(rc, tbs) @@ -1233,7 +1234,7 @@ func TestWriteDBTablesData(t *testing.T) { // Two tables in a schema. info = infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) rc = httptest.NewRecorder() - tbs, err = info.SchemaTableInfos(context.Background(), model.NewCIStr("test")) + tbs, err = info.SchemaTableInfos(context.Background(), pmodel.NewCIStr("test")) require.NoError(t, err) require.Equal(t, 2, len(tbs)) tikvhandler.WriteDBTablesData(rc, tbs) diff --git a/pkg/server/handler/tikv_handler.go b/pkg/server/handler/tikv_handler.go index 1a6339e05be6d..03d204a1ca3cb 100644 --- a/pkg/server/handler/tikv_handler.go +++ b/pkg/server/handler/tikv_handler.go @@ -29,7 +29,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" derr "github.com/pingcap/tidb/pkg/store/driver/error" @@ -190,7 +191,7 @@ func (t *TikvHandlerTool) GetTable(dbName, tableName string) (table.PhysicalTabl return nil, errors.Trace(err) } tableName, partitionName := ExtractTableAndPartitionName(tableName) - tableVal, err := schema.TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tableName)) + tableVal, err := schema.TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/server/handler/tikvhandler/BUILD.bazel b/pkg/server/handler/tikvhandler/BUILD.bazel index 3f9673d987950..852f339731c40 100644 --- a/pkg/server/handler/tikvhandler/BUILD.bazel +++ b/pkg/server/handler/tikvhandler/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//pkg/infoschema", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/terror", "//pkg/server/handler", diff --git a/pkg/server/handler/tikvhandler/tikv_handler.go b/pkg/server/handler/tikvhandler/tikv_handler.go index 374a05a36cc8f..13c8f8b8cf3d2 100644 --- a/pkg/server/handler/tikvhandler/tikv_handler.go +++ b/pkg/server/handler/tikvhandler/tikv_handler.go @@ -42,7 +42,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/server/handler" "github.com/pingcap/tidb/pkg/session" @@ -800,7 +801,7 @@ type SchemaTableStorage struct { DataFree int64 `json:"data_free"` } -func getSchemaTablesStorageInfo(h *SchemaStorageHandler, schema *model.CIStr, table *model.CIStr) (messages []*SchemaTableStorage, err error) { +func getSchemaTablesStorageInfo(h *SchemaStorageHandler, schema *pmodel.CIStr, table *pmodel.CIStr) (messages []*SchemaTableStorage, err error) { var s sessiontypes.Session if s, err = session.CreateSession(h.Store); err != nil { return @@ -871,13 +872,13 @@ func (h SchemaStorageHandler) ServeHTTP(w http.ResponseWriter, req *http.Request params := mux.Vars(req) var ( - dbName *model.CIStr - tableName *model.CIStr + dbName *pmodel.CIStr + tableName *pmodel.CIStr isSingle bool ) if reqDbName, ok := params[handler.DBName]; ok { - cDBName := model.NewCIStr(reqDbName) + cDBName := pmodel.NewCIStr(reqDbName) // all table schemas in a specified database schemaInfo, exists := schema.SchemaByName(cDBName) if !exists { @@ -888,7 +889,7 @@ func (h SchemaStorageHandler) ServeHTTP(w http.ResponseWriter, req *http.Request if reqTableName, ok := params[handler.TableName]; ok { // table schema of a specified table name - cTableName := model.NewCIStr(reqTableName) + cTableName := pmodel.NewCIStr(reqTableName) data, e := schema.TableByName(context.Background(), cDBName, cTableName) if e != nil { handler.WriteError(w, e) @@ -987,10 +988,10 @@ func (h SchemaHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { params := mux.Vars(req) if dbName, ok := params[handler.DBName]; ok { - cDBName := model.NewCIStr(dbName) + cDBName := pmodel.NewCIStr(dbName) if tableName, ok := params[handler.TableName]; ok { // table schema of a specified table name - cTableName := model.NewCIStr(tableName) + cTableName := pmodel.NewCIStr(tableName) data, err := schema.TableByName(context.Background(), cDBName, cTableName) if err != nil { handler.WriteError(w, err) @@ -1086,7 +1087,7 @@ func (h *TableHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } tableName, partitionName := handler.ExtractTableAndPartitionName(tableName) - tableVal, err := schema.TableByName(context.Background(), model.NewCIStr(dbName), model.NewCIStr(tableName)) + tableVal, err := schema.TableByName(context.Background(), pmodel.NewCIStr(dbName), pmodel.NewCIStr(tableName)) if err != nil { handler.WriteError(w, err) return diff --git a/pkg/server/internal/column/BUILD.bazel b/pkg/server/internal/column/BUILD.bazel index 88fb2d7c97b35..9640e099b393c 100644 --- a/pkg/server/internal/column/BUILD.bazel +++ b/pkg/server/internal/column/BUILD.bazel @@ -10,9 +10,9 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/server/internal/column", visibility = ["//pkg/server:__subpackages__"], deps = [ - "//pkg/parser/ast", "//pkg/parser/charset", "//pkg/parser/mysql", + "//pkg/planner/core/resolve", "//pkg/server/err", "//pkg/server/internal/dump", "//pkg/server/internal/util", diff --git a/pkg/server/internal/column/convert.go b/pkg/server/internal/column/convert.go index ffda8db1d1edc..a8459a67f0ec8 100644 --- a/pkg/server/internal/column/convert.go +++ b/pkg/server/internal/column/convert.go @@ -15,14 +15,14 @@ package column import ( - "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/types" ) // ConvertColumnInfo converts `*ast.ResultField` to `*Info` -func ConvertColumnInfo(fld *ast.ResultField) (ci *Info) { +func ConvertColumnInfo(fld *resolve.ResultField) (ci *Info) { ci = &Info{ Name: fld.ColumnAsName.O, OrgName: fld.Column.Name.O, diff --git a/pkg/server/internal/testserverclient/BUILD.bazel b/pkg/server/internal/testserverclient/BUILD.bazel index b2b72a651c465..e74b241bdb141 100644 --- a/pkg/server/internal/testserverclient/BUILD.bazel +++ b/pkg/server/internal/testserverclient/BUILD.bazel @@ -8,8 +8,8 @@ go_library( deps = [ "//pkg/errno", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/server", "//pkg/sessionctx/sessionstates", diff --git a/pkg/server/internal/testserverclient/server_client.go b/pkg/server/internal/testserverclient/server_client.go index 2e79094f12fdf..c6d112b1d370d 100644 --- a/pkg/server/internal/testserverclient/server_client.go +++ b/pkg/server/internal/testserverclient/server_client.go @@ -41,8 +41,8 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" tmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/sessionctx/sessionstates" diff --git a/pkg/session/BUILD.bazel b/pkg/session/BUILD.bazel index 7c356bbcaaae3..1e3cec58a35a4 100644 --- a/pkg/session/BUILD.bazel +++ b/pkg/session/BUILD.bazel @@ -44,6 +44,7 @@ go_library( "//pkg/infoschema/context", "//pkg/kv", "//pkg/meta", + "//pkg/meta/model", "//pkg/metrics", "//pkg/owner", "//pkg/param", diff --git a/pkg/session/bootstraptest/BUILD.bazel b/pkg/session/bootstraptest/BUILD.bazel index 64612e6f8542c..f770b484eb0d5 100644 --- a/pkg/session/bootstraptest/BUILD.bazel +++ b/pkg/session/bootstraptest/BUILD.bazel @@ -14,7 +14,7 @@ go_test( "//pkg/ddl", "//pkg/kv", "//pkg/meta", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/terror", "//pkg/planner/core", "//pkg/server/handler", diff --git a/pkg/session/bootstraptest/bootstrap_upgrade_test.go b/pkg/session/bootstraptest/bootstrap_upgrade_test.go index 28cf75f22a709..445d8c3196379 100644 --- a/pkg/session/bootstraptest/bootstrap_upgrade_test.go +++ b/pkg/session/bootstraptest/bootstrap_upgrade_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/server/handler" diff --git a/pkg/session/nontransactional.go b/pkg/session/nontransactional.go index e272fbb74b0fa..6283d36a5b42c 100644 --- a/pkg/session/nontransactional.go +++ b/pkg/session/nontransactional.go @@ -24,10 +24,11 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/errno" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" "github.com/pingcap/tidb/pkg/planner/core" @@ -65,7 +66,7 @@ type job struct { type statementBuildInfo struct { stmt *ast.NonTransactionalDMLStmt shardColumnType types.FieldType - shardColumnRefer *ast.ResultField + shardColumnRefer *resolve.ResultField originalCondition ast.ExprNode } @@ -260,11 +261,11 @@ func checkReadClauses(limit *ast.Limit, order *ast.OrderByClause) error { func runJobs(ctx context.Context, jobs []job, stmt *ast.NonTransactionalDMLStmt, tableName *resolve.TableNameW, se sessiontypes.Session, originalCondition ast.ExprNode) ([]string, error) { // prepare for the construction of statement - var shardColumnRefer *ast.ResultField + var shardColumnRefer *resolve.ResultField var shardColumnType types.FieldType for _, col := range tableName.TableInfo.Columns { if col.Name.L == stmt.ShardColumn.Name.L { - shardColumnRefer = &ast.ResultField{ + shardColumnRefer = &resolve.ResultField{ Column: col, Table: tableName.TableInfo, DBName: tableName.Schema, @@ -300,7 +301,7 @@ func runJobs(ctx context.Context, jobs []job, stmt *ast.NonTransactionalDMLStmt, // _tidb_rowid if shardColumnRefer == nil { shardColumnType = *types.NewFieldType(mysql.TypeLonglong) - shardColumnRefer = &ast.ResultField{ + shardColumnRefer = &resolve.ResultField{ Column: model.NewExtraHandleColInfo(), Table: tableName.TableInfo, DBName: tableName.Schema, @@ -615,7 +616,7 @@ func selectShardColumn(stmt *ast.NonTransactionalDMLStmt, se sessiontypes.Sessio // the specified table must be in the join tableInJoin := false - var chosenTableName model.CIStr + var chosenTableName pmodel.CIStr for _, tableSource := range tableSources { tableSourceName := tableSource.Source.(*ast.TableName) tableSourceFinalTableName := tableSource.AsName // precedence: alias name, then table name @@ -687,7 +688,7 @@ func collectTableSourcesInJoin(node ast.ResultSetNode, tableSources []*ast.Table // it attempts to auto-select a shard column from handle if not specified, and fills back the corresponding info in the stmt, // making it transparent to following steps func selectShardColumnFromTheOnlyTable(stmt *ast.NonTransactionalDMLStmt, tableName *ast.TableName, - tableAsName model.CIStr, tbl table.Table) ( + tableAsName pmodel.CIStr, tbl table.Table) ( indexed bool, shardColumnInfo *model.ColumnInfo, err error) { if stmt.ShardColumn == nil { return selectShardColumnAutomatically(stmt, tbl, tableName, tableAsName) @@ -732,7 +733,7 @@ func selectShardColumnByGivenName(shardColumnName string, tbl table.Table) ( } func selectShardColumnAutomatically(stmt *ast.NonTransactionalDMLStmt, tbl table.Table, - tableName *ast.TableName, tableAsName model.CIStr) (bool, *model.ColumnInfo, error) { + tableName *ast.TableName, tableAsName pmodel.CIStr) (bool, *model.ColumnInfo, error) { // auto-detect shard column var shardColumnInfo *model.ColumnInfo tableInfo := tbl.Meta() @@ -766,7 +767,7 @@ func selectShardColumnAutomatically(stmt *ast.NonTransactionalDMLStmt, tbl table stmt.ShardColumn = &ast.ColumnName{ Schema: tableName.Schema, Table: outputTableName, // so that table alias works - Name: model.NewCIStr(shardColumnName), + Name: pmodel.NewCIStr(shardColumnName), } return true, shardColumnInfo, nil } @@ -779,11 +780,11 @@ func buildDryRunResults(dryRunOption int, results []string, maxChunkSize int) (s fieldName = "query statement" } - resultFields := []*ast.ResultField{{ + resultFields := []*resolve.ResultField{{ Column: &model.ColumnInfo{ FieldType: *types.NewFieldType(mysql.TypeString), }, - ColumnAsName: model.NewCIStr(fieldName), + ColumnAsName: pmodel.NewCIStr(fieldName), }} rows := make([][]any, 0, len(results)) for _, result := range results { @@ -806,18 +807,18 @@ func buildExecuteResults(ctx context.Context, jobs []job, maxChunkSize int, reda } } if len(failedJobs) == 0 { - resultFields := []*ast.ResultField{ + resultFields := []*resolve.ResultField{ { Column: &model.ColumnInfo{ FieldType: *types.NewFieldType(mysql.TypeLong), }, - ColumnAsName: model.NewCIStr("number of jobs"), + ColumnAsName: pmodel.NewCIStr("number of jobs"), }, { Column: &model.ColumnInfo{ FieldType: *types.NewFieldType(mysql.TypeString), }, - ColumnAsName: model.NewCIStr("job status"), + ColumnAsName: pmodel.NewCIStr("job status"), }, } rows := make([][]any, 1) diff --git a/pkg/session/session.go b/pkg/session/session.go index 906b2dbc04e75..00932c3f525ba 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -62,6 +62,7 @@ import ( infoschemactx "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" "github.com/pingcap/tidb/pkg/param" @@ -69,13 +70,14 @@ import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner" planctx "github.com/pingcap/tidb/pkg/planner/context" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/core/base" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/plugin" "github.com/pingcap/tidb/pkg/privilege" "github.com/pingcap/tidb/pkg/privilege/conn" @@ -229,7 +231,7 @@ var parserPool = &sync.Pool{New: func() any { return parser.New() }} func (s *session) AddTableLock(locks []model.TableLockTpInfo) { for _, l := range locks { // read only lock is session unrelated, skip it when adding lock to session. - if l.Tp != model.TableLockReadOnly { + if l.Tp != pmodel.TableLockReadOnly { s.lockedTables[l.TableID] = l } } @@ -250,10 +252,10 @@ func (s *session) ReleaseTableLockByTableIDs(tableIDs []int64) { } // CheckTableLocked checks the table lock. -func (s *session) CheckTableLocked(tblID int64) (bool, model.TableLockType) { +func (s *session) CheckTableLocked(tblID int64) (bool, pmodel.TableLockType) { lt, ok := s.lockedTables[tblID] if !ok { - return false, model.TableLockNone + return false, pmodel.TableLockNone } return true, lt.Tp } @@ -424,10 +426,10 @@ func (s *session) UpdateColStatsUsage(predicateColumns []model.TableItemID) { } // FieldList returns fields list of a table. -func (s *session) FieldList(tableName string) ([]*ast.ResultField, error) { +func (s *session) FieldList(tableName string) ([]*resolve.ResultField, error) { is := s.GetInfoSchema().(infoschema.InfoSchema) - dbName := model.NewCIStr(s.GetSessionVars().CurrentDB) - tName := model.NewCIStr(tableName) + dbName := pmodel.NewCIStr(s.GetSessionVars().CurrentDB) + tName := pmodel.NewCIStr(tableName) pm := privilege.GetPrivilegeManager(s) if pm != nil && s.sessionVars.User != nil { if !pm.RequestVerification(s.sessionVars.ActiveRoles, dbName.O, tName.O, "", mysql.AllPrivMask) { @@ -447,9 +449,9 @@ func (s *session) FieldList(tableName string) ([]*ast.ResultField, error) { } cols := table.Cols() - fields := make([]*ast.ResultField, 0, len(cols)) + fields := make([]*resolve.ResultField, 0, len(cols)) for _, col := range table.Cols() { - rf := &ast.ResultField{ + rf := &resolve.ResultField{ ColumnAsName: col.Name, TableAsName: tName, DBName: dbName, @@ -1760,7 +1762,7 @@ var _ sqlexec.SQLExecutor = &session{} // ExecRestrictedStmt implements RestrictedSQLExecutor interface. func (s *session) ExecRestrictedStmt(ctx context.Context, stmtNode ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ( - []chunk.Row, []*ast.ResultField, error) { + []chunk.Row, []*resolve.ResultField, error) { defer pprof.SetGoroutineLabels(ctx) execOption := sqlexec.GetExecOption(opts) var se *session @@ -1809,7 +1811,7 @@ func (s *session) ExecRestrictedStmt(ctx context.Context, stmtNode ast.StmtNode, // ExecRestrictedStmt4Test wrapper `(s *session) ExecRestrictedStmt` for test. func ExecRestrictedStmt4Test(ctx context.Context, s types.Session, stmtNode ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ( - []chunk.Row, []*ast.ResultField, error) { + []chunk.Row, []*resolve.ResultField, error) { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) return s.(*session).ExecRestrictedStmt(ctx, stmtNode, opts...) } @@ -1923,7 +1925,7 @@ func (s *session) getInternalSession(execOption sqlexec.ExecOption) (*session, f }, nil } -func (s *session) withRestrictedSQLExecutor(ctx context.Context, opts []sqlexec.OptionFuncAlias, fn func(context.Context, *session) ([]chunk.Row, []*ast.ResultField, error)) ([]chunk.Row, []*ast.ResultField, error) { +func (s *session) withRestrictedSQLExecutor(ctx context.Context, opts []sqlexec.OptionFuncAlias, fn func(context.Context, *session) ([]chunk.Row, []*resolve.ResultField, error)) ([]chunk.Row, []*resolve.ResultField, error) { execOption := sqlexec.GetExecOption(opts) var se *session var clean func() @@ -1948,8 +1950,8 @@ func (s *session) withRestrictedSQLExecutor(ctx context.Context, opts []sqlexec. return fn(ctx, se) } -func (s *session) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, params ...any) ([]chunk.Row, []*ast.ResultField, error) { - return s.withRestrictedSQLExecutor(ctx, opts, func(ctx context.Context, se *session) ([]chunk.Row, []*ast.ResultField, error) { +func (s *session) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, params ...any) ([]chunk.Row, []*resolve.ResultField, error) { + return s.withRestrictedSQLExecutor(ctx, opts, func(ctx context.Context, se *session) ([]chunk.Row, []*resolve.ResultField, error) { stmt, err := se.ParseWithParams(ctx, sql, params...) if err != nil { return nil, nil, errors.Trace(err) @@ -2095,7 +2097,7 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex // infoschema there. if sessVars.StmtCtx.ResourceGroupName != sessVars.ResourceGroupName { // if target resource group doesn't exist, fallback to the origin resource group. - if _, ok := domain.GetDomain(s).InfoSchema().ResourceGroupByName(model.NewCIStr(sessVars.StmtCtx.ResourceGroupName)); !ok { + if _, ok := domain.GetDomain(s).InfoSchema().ResourceGroupByName(pmodel.NewCIStr(sessVars.StmtCtx.ResourceGroupName)); !ok { logutil.Logger(ctx).Warn("Unknown resource group from hint", zap.String("name", sessVars.StmtCtx.ResourceGroupName)) sessVars.StmtCtx.ResourceGroupName = sessVars.ResourceGroupName if txn, err := s.Txn(false); err == nil && txn != nil && txn.Valid() { @@ -2430,7 +2432,7 @@ func (s *session) rollbackOnError(ctx context.Context) { } // PrepareStmt is used for executing prepare statement in binary protocol -func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields []*ast.ResultField, err error) { +func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields []*resolve.ResultField, err error) { defer func() { if s.sessionVars.StmtCtx != nil { s.sessionVars.StmtCtx.DetachMemDiskTracker() @@ -4028,7 +4030,7 @@ func logStmt(execStmt *executor.ExecStmt, s *session) { } case *ast.CreateIndexStmt: isCrucial = true - if stmt.IndexOption != nil && stmt.IndexOption.Tp == model.IndexTypeHypo { + if stmt.IndexOption != nil && stmt.IndexOption.Tp == pmodel.IndexTypeHypo { isCrucial = false } case *ast.CreateUserStmt, *ast.DropUserStmt, *ast.AlterUserStmt, *ast.SetPwdStmt, *ast.GrantStmt, @@ -4498,7 +4500,7 @@ func (s *session) usePipelinedDmlOrWarn(ctx context.Context) bool { } for _, t := range stmtCtx.Tables { // get table schema from current infoschema - tbl, err := is.TableByName(ctx, model.NewCIStr(t.DB), model.NewCIStr(t.Table)) + tbl, err := is.TableByName(ctx, pmodel.NewCIStr(t.DB), pmodel.NewCIStr(t.Table)) if err != nil { stmtCtx.AppendWarning(errors.New("Pipelined DML failed to get table schema. Fallback to standard mode")) return false diff --git a/pkg/session/txn.go b/pkg/session/txn.go index 0ec88c4a5667a..4dcbae9f07fc5 100644 --- a/pkg/session/txn.go +++ b/pkg/session/txn.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/session/txninfo" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/session/types/BUILD.bazel b/pkg/session/types/BUILD.bazel index 75a5811641436..debca0f3ef0bf 100644 --- a/pkg/session/types/BUILD.bazel +++ b/pkg/session/types/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/extension", "//pkg/parser/ast", "//pkg/parser/auth", + "//pkg/planner/core/resolve", "//pkg/privilege/conn", "//pkg/session/txninfo", "//pkg/sessionctx", diff --git a/pkg/session/types/sesson_interface.go b/pkg/session/types/sesson_interface.go index cbd95438e79aa..d3dfdfa3eca06 100644 --- a/pkg/session/types/sesson_interface.go +++ b/pkg/session/types/sesson_interface.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/pkg/extension" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/privilege/conn" "github.com/pingcap/tidb/pkg/session/txninfo" "github.com/pingcap/tidb/pkg/sessionctx" @@ -50,7 +51,7 @@ type Session interface { CommitTxn(context.Context) error RollbackTxn(context.Context) // PrepareStmt executes prepare statement in binary protocol. - PrepareStmt(sql string) (stmtID uint32, paramCount int, fields []*ast.ResultField, err error) + PrepareStmt(sql string) (stmtID uint32, paramCount int, fields []*resolve.ResultField, err error) // ExecutePreparedStmt executes a prepared statement. // Deprecated: please use ExecuteStmt, this function is left for testing only. // TODO: remove ExecutePreparedStmt. @@ -77,7 +78,7 @@ type Session interface { // PrepareTxnCtx is exported for test. PrepareTxnCtx(context.Context) error // FieldList returns fields list of a table. - FieldList(tableName string) (fields []*ast.ResultField, err error) + FieldList(tableName string) (fields []*resolve.ResultField, err error) SetPort(port string) // SetExtensions sets the `*extension.SessionExtensions` object diff --git a/pkg/sessionctx/BUILD.bazel b/pkg/sessionctx/BUILD.bazel index 2e62fd68abf17..3a67400e4eb3a 100644 --- a/pkg/sessionctx/BUILD.bazel +++ b/pkg/sessionctx/BUILD.bazel @@ -12,8 +12,8 @@ go_library( "//pkg/infoschema/context", "//pkg/kv", "//pkg/lock/context", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/planner/context", "//pkg/session/cursor", "//pkg/sessionctx/sessionstates", diff --git a/pkg/sessionctx/context.go b/pkg/sessionctx/context.go index cb4810b303b5b..8a307dc5c9af6 100644 --- a/pkg/sessionctx/context.go +++ b/pkg/sessionctx/context.go @@ -25,8 +25,8 @@ import ( infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" tablelock "github.com/pingcap/tidb/pkg/lock/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" planctx "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/session/cursor" "github.com/pingcap/tidb/pkg/sessionctx/sessionstates" diff --git a/pkg/sessionctx/sessionstates/BUILD.bazel b/pkg/sessionctx/sessionstates/BUILD.bazel index a09cf4288ebd2..5451a8de47875 100644 --- a/pkg/sessionctx/sessionstates/BUILD.bazel +++ b/pkg/sessionctx/sessionstates/BUILD.bazel @@ -10,7 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/errno", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/types", "//pkg/types", "//pkg/util/context", diff --git a/pkg/sessionctx/sessionstates/session_states.go b/pkg/sessionctx/sessionstates/session_states.go index 63d3249811feb..3ebebbddd330f 100644 --- a/pkg/sessionctx/sessionstates/session_states.go +++ b/pkg/sessionctx/sessionstates/session_states.go @@ -16,7 +16,7 @@ package sessionstates import ( "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ptypes "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/types" contextutil "github.com/pingcap/tidb/pkg/util/context" diff --git a/pkg/sessionctx/stmtctx/BUILD.bazel b/pkg/sessionctx/stmtctx/BUILD.bazel index c13132fa87838..e61dd9b4a7a2b 100644 --- a/pkg/sessionctx/stmtctx/BUILD.bazel +++ b/pkg/sessionctx/stmtctx/BUILD.bazel @@ -8,8 +8,8 @@ go_library( deps = [ "//pkg/distsql/context", "//pkg/errctx", + "//pkg/meta/model", "//pkg/parser", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/resourcegroup", diff --git a/pkg/sessionctx/stmtctx/stmtctx.go b/pkg/sessionctx/stmtctx/stmtctx.go index 04f0e0819922a..d905d8ed993c9 100644 --- a/pkg/sessionctx/stmtctx/stmtctx.go +++ b/pkg/sessionctx/stmtctx/stmtctx.go @@ -27,8 +27,8 @@ import ( distsqlctx "github.com/pingcap/tidb/pkg/distsql/context" "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/resourcegroup" diff --git a/pkg/sessionctx/variable/BUILD.bazel b/pkg/sessionctx/variable/BUILD.bazel index d877052715466..1c52d080747d6 100644 --- a/pkg/sessionctx/variable/BUILD.bazel +++ b/pkg/sessionctx/variable/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//pkg/errno", "//pkg/keyspace", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index 2d1f3579b9fd9..3f83b365a444f 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -36,12 +36,12 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" ptypes "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/resourcegroup" diff --git a/pkg/sessiontxn/BUILD.bazel b/pkg/sessiontxn/BUILD.bazel index 8a68b49ed6001..63d9d885051be 100644 --- a/pkg/sessiontxn/BUILD.bazel +++ b/pkg/sessiontxn/BUILD.bazel @@ -35,6 +35,7 @@ go_test( "//pkg/expression", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/sessionctx", diff --git a/pkg/sessiontxn/txn_manager_test.go b/pkg/sessiontxn/txn_manager_test.go index 0772a62e5aa8a..068f563fa75b2 100644 --- a/pkg/sessiontxn/txn_manager_test.go +++ b/pkg/sessiontxn/txn_manager_test.go @@ -22,8 +22,9 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/sessiontxn/internal" @@ -448,7 +449,7 @@ func TestSnapshotInterceptor(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("create temporary table test.tmp1 (id int primary key)") - tbl, err := tk.Session().GetDomainInfoSchema().(infoschema.InfoSchema).TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tmp1")) + tbl, err := tk.Session().GetDomainInfoSchema().(infoschema.InfoSchema).TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tmp1")) require.NoError(t, err) require.Equal(t, model.TempTableLocal, tbl.Meta().TempTableType) tblID := tbl.Meta().ID diff --git a/pkg/statistics/BUILD.bazel b/pkg/statistics/BUILD.bazel index dc4fda20fe81f..ddc59761edfe2 100644 --- a/pkg/statistics/BUILD.bazel +++ b/pkg/statistics/BUILD.bazel @@ -25,12 +25,13 @@ go_library( deps = [ "//pkg/expression", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/context", + "//pkg/planner/core/resolve", "//pkg/planner/util/debugtrace", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", @@ -84,9 +85,10 @@ go_test( shard_count = 38, deps = [ "//pkg/config", - "//pkg/parser/ast", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", "//pkg/testkit", diff --git a/pkg/statistics/asyncload/BUILD.bazel b/pkg/statistics/asyncload/BUILD.bazel index 3c42d1eb6d9aa..269a4d45ac504 100644 --- a/pkg/statistics/asyncload/BUILD.bazel +++ b/pkg/statistics/asyncload/BUILD.bazel @@ -5,5 +5,5 @@ go_library( srcs = ["async_load.go"], importpath = "github.com/pingcap/tidb/pkg/statistics/asyncload", visibility = ["//visibility:public"], - deps = ["//pkg/parser/model"], + deps = ["//pkg/meta/model"], ) diff --git a/pkg/statistics/asyncload/async_load.go b/pkg/statistics/asyncload/async_load.go index 28dace83b371a..219845364565f 100644 --- a/pkg/statistics/asyncload/async_load.go +++ b/pkg/statistics/asyncload/async_load.go @@ -17,7 +17,7 @@ package asyncload import ( "sync" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // AsyncLoadHistogramNeededItems stores the columns/indices whose Histograms need to be loaded from physical kv layer. diff --git a/pkg/statistics/builder_ext_stats.go b/pkg/statistics/builder_ext_stats.go index 868910de457cd..94e29ecaae50d 100644 --- a/pkg/statistics/builder_ext_stats.go +++ b/pkg/statistics/builder_ext_stats.go @@ -20,8 +20,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" diff --git a/pkg/statistics/column.go b/pkg/statistics/column.go index 3829dcf0a098b..5cdc39238314f 100644 --- a/pkg/statistics/column.go +++ b/pkg/statistics/column.go @@ -15,7 +15,7 @@ package statistics import ( - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/planner/util/debugtrace" diff --git a/pkg/statistics/handle/BUILD.bazel b/pkg/statistics/handle/BUILD.bazel index 59442c07e0c70..30e02e4711a81 100644 --- a/pkg/statistics/handle/BUILD.bazel +++ b/pkg/statistics/handle/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/config", "//pkg/infoschema", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/sessionctx", diff --git a/pkg/statistics/handle/autoanalyze/BUILD.bazel b/pkg/statistics/handle/autoanalyze/BUILD.bazel index 9e508d61f20fa..ebc779a9dd220 100644 --- a/pkg/statistics/handle/autoanalyze/BUILD.bazel +++ b/pkg/statistics/handle/autoanalyze/BUILD.bazel @@ -8,6 +8,7 @@ go_library( deps = [ "//pkg/domain/infosync", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/terror", "//pkg/sessionctx", diff --git a/pkg/statistics/handle/autoanalyze/autoanalyze.go b/pkg/statistics/handle/autoanalyze/autoanalyze.go index 686a87124219a..4c88277bc74f6 100644 --- a/pkg/statistics/handle/autoanalyze/autoanalyze.go +++ b/pkg/statistics/handle/autoanalyze/autoanalyze.go @@ -27,7 +27,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/sysproctrack" @@ -400,7 +401,7 @@ func RandomPickOneTableAndTryAutoAnalyze( continue } - tbls, err := is.SchemaTableInfos(context.Background(), model.NewCIStr(db)) + tbls, err := is.SchemaTableInfos(context.Background(), pmodel.NewCIStr(db)) terror.Log(err) // We shuffle dbs and tbls so that the order of iterating tables is random. If the order is fixed and the auto // analyze job of one table fails for some reason, it may always analyze the same table and fail again and again diff --git a/pkg/statistics/handle/autoanalyze/exec/BUILD.bazel b/pkg/statistics/handle/autoanalyze/exec/BUILD.bazel index 1ef6999f7006f..a38903f35db5a 100644 --- a/pkg/statistics/handle/autoanalyze/exec/BUILD.bazel +++ b/pkg/statistics/handle/autoanalyze/exec/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/metrics", - "//pkg/parser/ast", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/sysproctrack", "//pkg/sessionctx/variable", diff --git a/pkg/statistics/handle/autoanalyze/exec/exec.go b/pkg/statistics/handle/autoanalyze/exec/exec.go index 1430a1fc7fd2f..9b4d3ac82c448 100644 --- a/pkg/statistics/handle/autoanalyze/exec/exec.go +++ b/pkg/statistics/handle/autoanalyze/exec/exec.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/sysproctrack" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -80,7 +80,7 @@ func RunAnalyzeStmt( statsVer int, sql string, params ...any, -) ([]chunk.Row, []*ast.ResultField, error) { +) ([]chunk.Row, []*resolve.ResultField, error) { pruneMode := sctx.GetSessionVars().PartitionPruneMode.Load() analyzeSnapshot := sctx.GetSessionVars().EnableAnalyzeSnapshot autoAnalyzeTracker := statsutil.NewAutoAnalyzeTracker(sysProcTracker.Track, sysProcTracker.UnTrack) diff --git a/pkg/statistics/handle/autoanalyze/refresher/BUILD.bazel b/pkg/statistics/handle/autoanalyze/refresher/BUILD.bazel index 78c21b268404d..95a50ec77d325 100644 --- a/pkg/statistics/handle/autoanalyze/refresher/BUILD.bazel +++ b/pkg/statistics/handle/autoanalyze/refresher/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx", "//pkg/sessionctx/sysproctrack", "//pkg/sessionctx/variable", @@ -33,6 +33,7 @@ go_test( shard_count = 14, deps = [ ":refresher", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/statistics", "//pkg/statistics/handle/autoanalyze/priorityqueue", diff --git a/pkg/statistics/handle/autoanalyze/refresher/refresher.go b/pkg/statistics/handle/autoanalyze/refresher/refresher.go index 5fe65236e360f..eeba01d57bfc0 100644 --- a/pkg/statistics/handle/autoanalyze/refresher/refresher.go +++ b/pkg/statistics/handle/autoanalyze/refresher/refresher.go @@ -19,7 +19,7 @@ import ( "time" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/sysproctrack" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/statistics/handle/autoanalyze/refresher/refresher_test.go b/pkg/statistics/handle/autoanalyze/refresher/refresher_test.go index 15850ae346166..aee766fd47450 100644 --- a/pkg/statistics/handle/autoanalyze/refresher/refresher_test.go +++ b/pkg/statistics/handle/autoanalyze/refresher/refresher_test.go @@ -21,7 +21,8 @@ import ( "testing" "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/autoanalyze/priorityqueue" "github.com/pingcap/tidb/pkg/statistics/handle/autoanalyze/refresher" @@ -144,12 +145,12 @@ func TestIgnoreTinyTable(t *testing.T) { require.NoError(t, handle.DumpStatsDeltaToKV(true)) require.NoError(t, handle.Update(context.Background(), dom.InfoSchema())) // Make sure table stats are not pseudo. - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) pid1 := tbl1.Meta().GetPartitionInfo().Definitions[1].ID tblStats1 := handle.GetPartitionStats(tbl1.Meta(), pid1) require.False(t, tblStats1.Pseudo) - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) pid2 := tbl2.Meta().GetPartitionInfo().Definitions[1].ID tblStats2 := handle.GetPartitionStats(tbl2.Meta(), pid2) @@ -203,14 +204,14 @@ func TestAnalyzeHighestPriorityTables(t *testing.T) { require.NoError(t, handle.DumpStatsDeltaToKV(true)) require.NoError(t, handle.Update(context.Background(), dom.InfoSchema())) // The table is analyzed. - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) pid1 := tbl1.Meta().GetPartitionInfo().Definitions[1].ID tblStats1 := handle.GetPartitionStats(tbl1.Meta(), pid1) require.Equal(t, int64(0), tblStats1.ModifyCount) require.Equal(t, int64(12), tblStats1.RealtimeCount) // t2 is not analyzed. - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) pid2 := tbl2.Meta().GetPartitionInfo().Definitions[1].ID tblStats2 := handle.GetPartitionStats(tbl2.Meta(), pid2) @@ -265,14 +266,14 @@ func TestAnalyzeHighestPriorityTablesConcurrently(t *testing.T) { require.NoError(t, handle.DumpStatsDeltaToKV(true)) require.NoError(t, handle.Update(context.Background(), dom.InfoSchema())) // Check if t1 and t2 are analyzed (they should be, as they have more new data). - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) pid1 := tbl1.Meta().GetPartitionInfo().Definitions[1].ID tblStats1 := handle.GetPartitionStats(tbl1.Meta(), pid1) require.Equal(t, int64(0), tblStats1.ModifyCount) require.Equal(t, int64(12), tblStats1.RealtimeCount) - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) pid2 := tbl2.Meta().GetPartitionInfo().Definitions[1].ID tblStats2 := handle.GetPartitionStats(tbl2.Meta(), pid2) @@ -280,7 +281,7 @@ func TestAnalyzeHighestPriorityTablesConcurrently(t *testing.T) { require.Equal(t, int64(8), tblStats2.RealtimeCount) // t3 should not be analyzed yet, as it has the least new data. - tbl3, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tbl3, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) pid3 := tbl3.Meta().GetPartitionInfo().Definitions[1].ID tblStats3 := handle.GetPartitionStats(tbl3.Meta(), pid3) @@ -316,7 +317,7 @@ func TestAnalyzeHighestPriorityTablesWithFailedAnalysis(t *testing.T) { r.AnalyzeHighestPriorityTables() // The table is not analyzed. is := dom.InfoSchema() - tbl1, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) pid1 := tbl1.Meta().GetPartitionInfo().Definitions[0].ID tblStats1 := handle.GetPartitionStats(tbl1.Meta(), pid1) @@ -333,7 +334,7 @@ func TestAnalyzeHighestPriorityTablesWithFailedAnalysis(t *testing.T) { }, } r.Jobs.Push(job1) - tbl2, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) job2 := &priorityqueue.NonPartitionedTableAnalysisJob{ TableID: tbl2.Meta().ID, @@ -553,7 +554,7 @@ func TestCheckIndexesNeedAnalyze(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("index1"), + Name: pmodel.NewCIStr("index1"), State: model.StatePublic, }, }, @@ -567,7 +568,7 @@ func TestCheckIndexesNeedAnalyze(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("index1"), + Name: pmodel.NewCIStr("index1"), State: model.StatePublic, }, }, @@ -623,7 +624,7 @@ func TestCalculateIndicatorsForPartitions(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("index1"), + Name: pmodel.NewCIStr("index1"), State: model.StatePublic, }, }, @@ -661,11 +662,11 @@ func TestCalculateIndicatorsForPartitions(t *testing.T) { defs: []model.PartitionDefinition{ { ID: 1, - Name: model.NewCIStr("p0"), + Name: pmodel.NewCIStr("p0"), }, { ID: 2, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, }, autoAnalyzeRatio: 0.5, @@ -681,7 +682,7 @@ func TestCalculateIndicatorsForPartitions(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("index1"), + Name: pmodel.NewCIStr("index1"), State: model.StatePublic, }, }, @@ -743,11 +744,11 @@ func TestCalculateIndicatorsForPartitions(t *testing.T) { defs: []model.PartitionDefinition{ { ID: 1, - Name: model.NewCIStr("p0"), + Name: pmodel.NewCIStr("p0"), }, { ID: 2, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, }, autoAnalyzeRatio: 0.5, @@ -763,7 +764,7 @@ func TestCalculateIndicatorsForPartitions(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("index1"), + Name: pmodel.NewCIStr("index1"), State: model.StatePublic, }, }, @@ -825,11 +826,11 @@ func TestCalculateIndicatorsForPartitions(t *testing.T) { defs: []model.PartitionDefinition{ { ID: 1, - Name: model.NewCIStr("p0"), + Name: pmodel.NewCIStr("p0"), }, { ID: 2, - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, }, autoAnalyzeRatio: 0.5, @@ -869,12 +870,12 @@ func TestCheckNewlyAddedIndexesNeedAnalyzeForPartitionedTable(t *testing.T) { Indices: []*model.IndexInfo{ { ID: 1, - Name: model.NewCIStr("index1"), + Name: pmodel.NewCIStr("index1"), State: model.StatePublic, }, { ID: 2, - Name: model.NewCIStr("index2"), + Name: pmodel.NewCIStr("index2"), State: model.StatePublic, }, }, diff --git a/pkg/statistics/handle/bootstrap.go b/pkg/statistics/handle/bootstrap.go index 936f4f34793a2..89fe1c019ee2e 100644 --- a/pkg/statistics/handle/bootstrap.go +++ b/pkg/statistics/handle/bootstrap.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/statistics/handle/cache/BUILD.bazel b/pkg/statistics/handle/cache/BUILD.bazel index ee1c0d9a039bf..b8953e61f8ba8 100644 --- a/pkg/statistics/handle/cache/BUILD.bazel +++ b/pkg/statistics/handle/cache/BUILD.bazel @@ -12,8 +12,8 @@ go_library( deps = [ "//pkg/config", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/sessionctx", "//pkg/sessionctx/variable", "//pkg/statistics", diff --git a/pkg/statistics/handle/cache/internal/testutil/BUILD.bazel b/pkg/statistics/handle/cache/internal/testutil/BUILD.bazel index fbe51e74eaa7f..bca31c934cc04 100644 --- a/pkg/statistics/handle/cache/internal/testutil/BUILD.bazel +++ b/pkg/statistics/handle/cache/internal/testutil/BUILD.bazel @@ -6,7 +6,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/statistics/handle/cache/internal/testutil", visibility = ["//pkg/statistics/handle/cache:__subpackages__"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/statistics", "//pkg/types", diff --git a/pkg/statistics/handle/cache/internal/testutil/testutil.go b/pkg/statistics/handle/cache/internal/testutil/testutil.go index 19ba8b31dc702..6578a660cecc2 100644 --- a/pkg/statistics/handle/cache/internal/testutil/testutil.go +++ b/pkg/statistics/handle/cache/internal/testutil/testutil.go @@ -15,7 +15,7 @@ package testutil import ( - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/statistics/handle/cache/stats_table_row_cache.go b/pkg/statistics/handle/cache/stats_table_row_cache.go index 5d7b3bc8b7de5..9b4360282b2ac 100644 --- a/pkg/statistics/handle/cache/stats_table_row_cache.go +++ b/pkg/statistics/handle/cache/stats_table_row_cache.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/statistics/handle/ddl/BUILD.bazel b/pkg/statistics/handle/ddl/BUILD.bazel index 9718a439cee76..b833cf1204fc0 100644 --- a/pkg/statistics/handle/ddl/BUILD.bazel +++ b/pkg/statistics/handle/ddl/BUILD.bazel @@ -13,7 +13,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx", "//pkg/sessionctx/variable", "//pkg/statistics/handle/lockstats", @@ -33,6 +33,7 @@ go_test( flaky = True, shard_count = 27, deps = [ + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/cardinality", "//pkg/statistics/handle/util", diff --git a/pkg/statistics/handle/ddl/ddl.go b/pkg/statistics/handle/ddl/ddl.go index 3a2278f0fa8cd..6b70f73968cb1 100644 --- a/pkg/statistics/handle/ddl/ddl.go +++ b/pkg/statistics/handle/ddl/ddl.go @@ -16,7 +16,7 @@ package ddl import ( "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics/handle/lockstats" diff --git a/pkg/statistics/handle/ddl/ddl_test.go b/pkg/statistics/handle/ddl/ddl_test.go index 38673c83a0ec8..df036e1d9b499 100644 --- a/pkg/statistics/handle/ddl/ddl_test.go +++ b/pkg/statistics/handle/ddl/ddl_test.go @@ -19,7 +19,8 @@ import ( "fmt" "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/testkit" @@ -35,7 +36,7 @@ func TestDDLAfterLoad(t *testing.T) { testKit.MustExec("create table t (c1 int, c2 int, index idx(c1, c2))") testKit.MustExec("analyze table t") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() statsTbl := do.StatsHandle().GetTableStats(tableInfo) @@ -50,7 +51,7 @@ func TestDDLAfterLoad(t *testing.T) { // add column testKit.MustExec("alter table t add column c10 int") is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() @@ -67,7 +68,7 @@ func TestDDLTable(t *testing.T) { testKit.MustExec("use test") testKit.MustExec("create table t (c1 int, c2 int)") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -79,7 +80,7 @@ func TestDDLTable(t *testing.T) { testKit.MustExec("create table t1 (c1 int, c2 int, index idx(c1))") is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tableInfo = tbl.Meta() err = h.HandleDDLEvent(<-h.DDLEventCh()) @@ -92,7 +93,7 @@ func TestDDLTable(t *testing.T) { // https://github.com/pingcap/tidb/issues/53652 testKit.MustExec("create table t_parent (id int primary key)") is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_parent")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t_parent")) require.NoError(t, err) tableInfo = tbl.Meta() err = h.HandleDDLEvent(<-h.DDLEventCh()) @@ -103,7 +104,7 @@ func TestDDLTable(t *testing.T) { testKit.MustExec("create table t_child (id int primary key, pid int, foreign key (pid) references t_parent(id) on delete cascade on update cascade);") is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_child")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t_child")) require.NoError(t, err) tableInfo = tbl.Meta() err = h.HandleDDLEvent(<-h.DDLEventCh()) @@ -120,7 +121,7 @@ func TestCreateASystemTable(t *testing.T) { // Test create a system table. testKit.MustExec("create table mysql.test (c1 int, c2 int)") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -139,7 +140,7 @@ func TestTruncateASystemTable(t *testing.T) { testKit.MustExec("create table mysql.test (c1 int, c2 int)") testKit.MustExec("truncate table mysql.test") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -159,7 +160,7 @@ func TestDropASystemTable(t *testing.T) { // Test drop a system table. testKit.MustExec("create table mysql.test (c1 int, c2 int)") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() tableID := tableInfo.ID @@ -182,7 +183,7 @@ func TestAddColumnToASystemTable(t *testing.T) { testKit.MustExec("create table mysql.test (c1 int, c2 int)") testKit.MustExec("alter table mysql.test add column c3 int") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -205,7 +206,7 @@ func TestModifyColumnOfASystemTable(t *testing.T) { testKit.MustExec("insert into mysql.test values ('1',2)") testKit.MustExec("alter table mysql.test modify column c1 int") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -227,7 +228,7 @@ func TestAddNewPartitionToASystemTable(t *testing.T) { // Add partition p1. testKit.MustExec("alter table mysql.test add partition (partition p1 values less than (11))") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -256,7 +257,7 @@ func TestDropPartitionOfASystemTable(t *testing.T) { // Drop partition p1. testKit.MustExec("alter table mysql.test drop partition p1") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() // Find the drop partition event. @@ -296,7 +297,7 @@ func TestExchangePartitionWithASystemTable(t *testing.T) { require.NoError(t, err) is := do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() require.NoError(t, err) @@ -315,7 +316,7 @@ func TestRemovePartitioningOfASystemTable(t *testing.T) { // Remove partitioning. testKit.MustExec("alter table mysql.test remove partitioning") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() // Find the remove partitioning event. @@ -337,7 +338,7 @@ func TestTruncateAPartitionOfASystemTable(t *testing.T) { // Truncate partition p1. testKit.MustExec("alter table mysql.test truncate partition p1") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("mysql"), model.NewCIStr("test")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("mysql"), pmodel.NewCIStr("test")) require.NoError(t, err) tableInfo := tbl.Meta() // Find the truncate partition event. @@ -361,7 +362,7 @@ func TestTruncateTable(t *testing.T) { testKit.MustExec("use test") testKit.MustExec("create table t (c1 int, c2 int, index idx(c1, c2))") is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() h := do.StatsHandle() @@ -391,7 +392,7 @@ func TestTruncateTable(t *testing.T) { // Get new table info. is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) newTableInfo := tbl.Meta() // Get new added table's stats meta. @@ -431,7 +432,7 @@ func TestTruncateAPartitionedTable(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -464,7 +465,7 @@ func TestTruncateAPartitionedTable(t *testing.T) { // Get new table info. is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) newTableInfo := tbl.Meta() // Get all new added partitions ID. @@ -504,7 +505,7 @@ func TestDDLHistogram(t *testing.T) { require.NoError(t, err) is := do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() statsTbl := do.StatsHandle().GetTableStats(tableInfo) @@ -519,7 +520,7 @@ func TestDDLHistogram(t *testing.T) { require.NoError(t, err) is = do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() statsTbl = do.StatsHandle().GetTableStats(tableInfo) @@ -539,7 +540,7 @@ func TestDDLHistogram(t *testing.T) { require.NoError(t, err) is = do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() statsTbl = do.StatsHandle().GetTableStats(tableInfo) @@ -552,7 +553,7 @@ func TestDDLHistogram(t *testing.T) { require.NoError(t, err) is = do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() statsTbl = do.StatsHandle().GetTableStats(tableInfo) @@ -566,20 +567,20 @@ func TestDDLHistogram(t *testing.T) { require.NoError(t, err) is = do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() statsTbl = do.StatsHandle().GetTableStats(tableInfo) require.False(t, statsTbl.Pseudo) testKit.MustExec("create index i on t(c2, c1)") - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() statsTbl = do.StatsHandle().GetTableStats(tableInfo) require.False(t, statsTbl.ColAndIdxExistenceMap.HasAnalyzed(2, true)) testKit.MustExec("analyze table t") - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() statsTbl = do.StatsHandle().GetTableStats(tableInfo) @@ -614,7 +615,7 @@ PARTITION BY RANGE ( a ) ( )` testKit.MustExec(createTable) is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() err = h.HandleDDLEvent(<-h.DDLEventCh()) @@ -633,7 +634,7 @@ PARTITION BY RANGE ( a ) ( require.NoError(t, err) is = do.InfoSchema() require.Nil(t, h.Update(context.Background(), is)) - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() pi = tableInfo.GetPartitionInfo() @@ -646,7 +647,7 @@ PARTITION BY RANGE ( a ) ( addPartition := "alter table t add partition (partition p4 values less than (26))" testKit.MustExec(addPartition) is = do.InfoSchema() - tbl, err = is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo = tbl.Meta() err = h.HandleDDLEvent(<-h.DDLEventCh()) @@ -684,7 +685,7 @@ func TestReorgPartitions(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -741,7 +742,7 @@ func TestIncreasePartitionCountOfHashPartitionTable(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -775,7 +776,7 @@ func TestIncreasePartitionCountOfHashPartitionTable(t *testing.T) { // Check new partitions are added. is = do.InfoSchema() tbl, err = is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo = tbl.Meta() @@ -809,7 +810,7 @@ func TestDecreasePartitionCountOfHashPartitionTable(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -849,7 +850,7 @@ func TestDecreasePartitionCountOfHashPartitionTable(t *testing.T) { // Check new partitions are added. is = do.InfoSchema() tbl, err = is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo = tbl.Meta() @@ -898,7 +899,7 @@ func TestTruncateAPartition(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -960,7 +961,7 @@ func TestTruncateAHashPartition(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1028,7 +1029,7 @@ func TestTruncatePartitions(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1099,7 +1100,7 @@ func TestDropAPartition(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1166,7 +1167,7 @@ func TestDropPartitions(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1244,7 +1245,7 @@ func TestExchangeAPartition(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1264,7 +1265,7 @@ func TestExchangeAPartition(t *testing.T) { testKit.MustExec("analyze table t1") is = do.InfoSchema() tbl1, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t1"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t1"), ) require.NoError(t, err) tableInfo1 := tbl1.Meta() @@ -1300,7 +1301,7 @@ func TestExchangeAPartition(t *testing.T) { testKit.MustExec("analyze table t2") is = do.InfoSchema() tbl2, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t2"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t2"), ) require.NoError(t, err) tableInfo2 := tbl2.Meta() @@ -1344,7 +1345,7 @@ func TestExchangeAPartition(t *testing.T) { testKit.MustExec("analyze table t3") is = do.InfoSchema() tbl3, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t3"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t3"), ) require.NoError(t, err) tableInfo3 := tbl3.Meta() @@ -1396,7 +1397,7 @@ func TestRemovePartitioning(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1432,7 +1433,7 @@ func TestRemovePartitioning(t *testing.T) { // Get new table id after remove partitioning. is = do.InfoSchema() tbl, err = is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo = tbl.Meta() @@ -1474,7 +1475,7 @@ func TestAddPartitioning(t *testing.T) { testKit.MustExec("analyze table t") is := do.InfoSchema() tbl, err := is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo := tbl.Meta() @@ -1495,7 +1496,7 @@ func TestAddPartitioning(t *testing.T) { // Get new table id after remove partitioning. is = do.InfoSchema() tbl, err = is.TableByName(context.Background(), - model.NewCIStr("test"), model.NewCIStr("t"), + pmodel.NewCIStr("test"), pmodel.NewCIStr("t"), ) require.NoError(t, err) tableInfo = tbl.Meta() diff --git a/pkg/statistics/handle/ddl/exchange_partition.go b/pkg/statistics/handle/ddl/exchange_partition.go index 2d057e8689071..c09b19c3cc00f 100644 --- a/pkg/statistics/handle/ddl/exchange_partition.go +++ b/pkg/statistics/handle/ddl/exchange_partition.go @@ -17,7 +17,7 @@ package ddl import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle/logutil" "github.com/pingcap/tidb/pkg/statistics/handle/storage" diff --git a/pkg/statistics/handle/ddl/truncate_partition.go b/pkg/statistics/handle/ddl/truncate_partition.go index 3286bfcb9d059..1dc73b61376fb 100644 --- a/pkg/statistics/handle/ddl/truncate_partition.go +++ b/pkg/statistics/handle/ddl/truncate_partition.go @@ -17,7 +17,7 @@ package ddl import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics/handle/lockstats" diff --git a/pkg/statistics/handle/globalstats/BUILD.bazel b/pkg/statistics/handle/globalstats/BUILD.bazel index 7683d826f669e..c6b0b1abdc330 100644 --- a/pkg/statistics/handle/globalstats/BUILD.bazel +++ b/pkg/statistics/handle/globalstats/BUILD.bazel @@ -12,8 +12,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", "//pkg/statistics", diff --git a/pkg/statistics/handle/globalstats/global_stats.go b/pkg/statistics/handle/globalstats/global_stats.go index 1e9fbdc201d85..9ba5544994f7e 100644 --- a/pkg/statistics/handle/globalstats/global_stats.go +++ b/pkg/statistics/handle/globalstats/global_stats.go @@ -19,8 +19,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" statslogutil "github.com/pingcap/tidb/pkg/statistics/handle/logutil" diff --git a/pkg/statistics/handle/globalstats/global_stats_async.go b/pkg/statistics/handle/globalstats/global_stats_async.go index 2ef30c6cdba24..5536620674a37 100644 --- a/pkg/statistics/handle/globalstats/global_stats_async.go +++ b/pkg/statistics/handle/globalstats/global_stats_async.go @@ -23,8 +23,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/statistics/handle/handle.go b/pkg/statistics/handle/handle.go index 9b62f8b789d1b..3350d6b983dec 100644 --- a/pkg/statistics/handle/handle.go +++ b/pkg/statistics/handle/handle.go @@ -17,7 +17,7 @@ package handle import ( "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/sysproctrack" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/statistics/handle/handletest/lockstats/BUILD.bazel b/pkg/statistics/handle/handletest/lockstats/BUILD.bazel index 8623e0b7fb6f2..f97197ce34b67 100644 --- a/pkg/statistics/handle/handletest/lockstats/BUILD.bazel +++ b/pkg/statistics/handle/handletest/lockstats/BUILD.bazel @@ -14,6 +14,7 @@ go_test( "//pkg/config", "//pkg/domain", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/statistics", "//pkg/testkit", diff --git a/pkg/statistics/handle/handletest/lockstats/lock_partition_stats_test.go b/pkg/statistics/handle/handletest/lockstats/lock_partition_stats_test.go index 3b184012efa24..7f3ff5ff660c7 100644 --- a/pkg/statistics/handle/handletest/lockstats/lock_partition_stats_test.go +++ b/pkg/statistics/handle/handletest/lockstats/lock_partition_stats_test.go @@ -23,7 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -537,7 +538,7 @@ func setupTestEnvironmentWithPartitionedTableT(t *testing.T) (kv.Storage, *domai tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int, b varchar(10), index idx_b (b)) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20))") tk.MustExec("analyze table test.t") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.Nil(t, err) return store, dom, tk, tbl.Meta() diff --git a/pkg/statistics/handle/handletest/lockstats/lock_table_stats_test.go b/pkg/statistics/handle/handletest/lockstats/lock_table_stats_test.go index 1bcbce207b270..69854b2dc7ba8 100644 --- a/pkg/statistics/handle/handletest/lockstats/lock_table_stats_test.go +++ b/pkg/statistics/handle/handletest/lockstats/lock_table_stats_test.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -167,9 +168,9 @@ func TestLockAndUnlockTablesStats(t *testing.T) { tk.MustExec("create table t1(a int, b varchar(10), index idx_b (b))") tk.MustExec("create table t2(a int, b varchar(10), index idx_b (b))") tk.MustExec("analyze table test.t1, test.t2") - tbl1, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.Nil(t, err) - tbl2, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.Nil(t, err) handle := domain.GetDomain(tk.Session()).StatsHandle() @@ -360,7 +361,7 @@ func setupTestEnvironmentWithTableT(t *testing.T) (kv.Storage, *domain.Domain, * tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int, b varchar(10), index idx_b (b))") tk.MustExec("analyze table test.t") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.Nil(t, err) return store, dom, tk, tbl.Meta() diff --git a/pkg/statistics/handle/history/BUILD.bazel b/pkg/statistics/handle/history/BUILD.bazel index ab62dd729d37a..1a1a44f789cea 100644 --- a/pkg/statistics/handle/history/BUILD.bazel +++ b/pkg/statistics/handle/history/BUILD.bazel @@ -6,7 +6,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/statistics/handle/history", visibility = ["//visibility:public"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx", "//pkg/statistics/handle/cache", "//pkg/statistics/handle/storage", diff --git a/pkg/statistics/handle/history/history_stats.go b/pkg/statistics/handle/history/history_stats.go index 03171f8e6d422..54e109b78b2ea 100644 --- a/pkg/statistics/handle/history/history_stats.go +++ b/pkg/statistics/handle/history/history_stats.go @@ -18,7 +18,7 @@ import ( "time" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle/cache" "github.com/pingcap/tidb/pkg/statistics/handle/storage" diff --git a/pkg/statistics/handle/storage/BUILD.bazel b/pkg/statistics/handle/storage/BUILD.bazel index b89e2135e4466..f8a6f26b0b03c 100644 --- a/pkg/statistics/handle/storage/BUILD.bazel +++ b/pkg/statistics/handle/storage/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/config", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", @@ -61,6 +62,7 @@ go_test( deps = [ ":storage", "//pkg/domain", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/cardinality", "//pkg/sessionctx/variable", diff --git a/pkg/statistics/handle/storage/dump_test.go b/pkg/statistics/handle/storage/dump_test.go index 82bf11d1b9dac..08470b647b9df 100644 --- a/pkg/statistics/handle/storage/dump_test.go +++ b/pkg/statistics/handle/storage/dump_test.go @@ -25,7 +25,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/domain" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/internal" "github.com/pingcap/tidb/pkg/statistics/handle/storage" @@ -96,7 +97,7 @@ func TestConversion(t *testing.T) { require.Nil(t, h.DumpStatsDeltaToKV(true)) require.Nil(t, h.Update(context.Background(), is)) - tableInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tableInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) jsonTbl, err := h.DumpStatsToJSON("test", tableInfo.Meta(), nil, true) require.NoError(t, err) @@ -121,7 +122,7 @@ func getStatsJSON(t *testing.T, dom *domain.Domain, db, tableName string) *handl is := dom.InfoSchema() h := dom.StatsHandle() require.Nil(t, h.Update(context.Background(), is)) - table, err := is.TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(tableName)) + table, err := is.TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(tableName)) require.NoError(t, err) tableInfo := table.Meta() jsonTbl, err := h.DumpStatsToJSON("test", tableInfo, nil, true) @@ -133,7 +134,7 @@ func persistStats(ctx context.Context, t *testing.T, dom *domain.Domain, db, tab is := dom.InfoSchema() h := dom.StatsHandle() require.Nil(t, h.Update(context.Background(), is)) - table, err := is.TableByName(context.Background(), model.NewCIStr(db), model.NewCIStr(tableName)) + table, err := is.TableByName(context.Background(), pmodel.NewCIStr(db), pmodel.NewCIStr(tableName)) require.NoError(t, err) tableInfo := table.Meta() err = h.PersistStatsBySnapshot(ctx, "test", tableInfo, math.MaxUint64, persist) @@ -207,7 +208,7 @@ func TestLoadPartitionStats(t *testing.T) { tk.MustExec("insert into t values " + strings.Join(vals, ",")) tk.MustExec("analyze table t") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := table.Meta() jsonTbl, err := dom.StatsHandle().DumpStatsToJSON("test", tableInfo, nil, true) @@ -253,7 +254,7 @@ func TestLoadPredicateColumns(t *testing.T) { require.NoError(t, h.DumpColStatsUsageToKV()) tk.MustExec("analyze table t") - table, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := table.Meta() jsonTbl, err := h.DumpStatsToJSON("test", tableInfo, nil, true) @@ -299,7 +300,7 @@ func TestLoadPartitionStatsErrPanic(t *testing.T) { tk.MustExec("insert into t values " + strings.Join(vals, ",")) tk.MustExec("analyze table t") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := table.Meta() jsonTbl, err := dom.StatsHandle().DumpStatsToJSON("test", tableInfo, nil, true) @@ -338,7 +339,7 @@ PARTITION BY RANGE ( a ) ( h := dom.StatsHandle() require.Nil(t, h.Update(context.Background(), is)) - table, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := table.Meta() jsonTbl, err := h.DumpStatsToJSON("test", tableInfo, nil, true) @@ -374,7 +375,7 @@ func TestDumpAlteredTable(t *testing.T) { tk.MustExec("create table t(a int, b int)") tk.MustExec("analyze table t") tk.MustExec("alter table t drop column a") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) _, err = h.DumpStatsToJSON("test", table.Meta(), nil, true) require.NoError(t, err) @@ -391,7 +392,7 @@ func TestDumpCMSketchWithTopN(t *testing.T) { testKit.MustExec("analyze table t") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() h := dom.StatsHandle() @@ -433,7 +434,7 @@ func TestDumpPseudoColumns(t *testing.T) { testKit.MustExec("analyze table t index idx") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) h := dom.StatsHandle() _, err = h.DumpStatsToJSON("test", tbl.Meta(), nil, true) @@ -454,7 +455,7 @@ func TestDumpExtendedStats(t *testing.T) { tk.MustExec("analyze table t") is := dom.InfoSchema() - tableInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tableInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tbl := h.GetTableStats(tableInfo.Meta()) jsonTbl, err := h.DumpStatsToJSON("test", tableInfo.Meta(), nil, true) @@ -491,7 +492,7 @@ func TestDumpVer2Stats(t *testing.T) { tk.MustExec("analyze table t with 2 topn") h := dom.StatsHandle() is := dom.InfoSchema() - tableInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tableInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) storageTbl, err := h.TableStatsFromStorage(tableInfo.Meta(), tableInfo.Meta().ID, false, 0) @@ -543,7 +544,7 @@ func TestLoadStatsForNewCollation(t *testing.T) { tk.MustExec("analyze table t with 2 topn") h := dom.StatsHandle() is := dom.InfoSchema() - tableInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tableInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) storageTbl, err := h.TableStatsFromStorage(tableInfo.Meta(), tableInfo.Meta().ID, false, 0) @@ -594,7 +595,7 @@ func TestJSONTableToBlocks(t *testing.T) { tk.MustExec("analyze table t with 2 topn") h := dom.StatsHandle() is := dom.InfoSchema() - tableInfo, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tableInfo, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) dumpJSONTable, err := h.DumpStatsToJSON("test", tableInfo.Meta(), nil, true) @@ -668,7 +669,7 @@ func TestLoadStatsFromOldVersion(t *testing.T) { jsonTbl := &handleutil.JSONTable{} require.NoError(t, json.Unmarshal([]byte(statsJSONFromOldVersion), jsonTbl)) require.NoError(t, h.LoadStatsFromJSON(context.Background(), is, jsonTbl, 0)) - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) statsTbl := h.GetTableStats(tbl.Meta()) statsTbl.ForEachColumnImmutable(func(i int64, col *statistics.Column) bool { diff --git a/pkg/statistics/handle/storage/json.go b/pkg/statistics/handle/storage/json.go index 88529945bde14..b7d4eb1f96816 100644 --- a/pkg/statistics/handle/storage/json.go +++ b/pkg/statistics/handle/storage/json.go @@ -21,7 +21,7 @@ import ( "github.com/klauspost/compress/gzip" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/statistics/handle/storage/read.go b/pkg/statistics/handle/storage/read.go index bd189ed9a593d..e101e85e3f03e 100644 --- a/pkg/statistics/handle/storage/read.go +++ b/pkg/statistics/handle/storage/read.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/statistics/handle/storage/stats_read_writer.go b/pkg/statistics/handle/storage/stats_read_writer.go index 7f9b1cd9553ee..d020d39e690a7 100644 --- a/pkg/statistics/handle/storage/stats_read_writer.go +++ b/pkg/statistics/handle/storage/stats_read_writer.go @@ -25,7 +25,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" @@ -638,7 +639,7 @@ func (s *statsReadWriter) LoadStatsFromJSONConcurrently( // LoadStatsFromJSONNoUpdate will load statistic from JSONTable, and save it to the storage. func (s *statsReadWriter) LoadStatsFromJSONNoUpdate(ctx context.Context, is infoschema.InfoSchema, jsonTbl *util.JSONTable, concurrencyForPartition int) error { - table, err := is.TableByName(context.Background(), model.NewCIStr(jsonTbl.DatabaseName), model.NewCIStr(jsonTbl.TableName)) + table, err := is.TableByName(context.Background(), pmodel.NewCIStr(jsonTbl.DatabaseName), pmodel.NewCIStr(jsonTbl.TableName)) if err != nil { return errors.Trace(err) } diff --git a/pkg/statistics/handle/syncload/BUILD.bazel b/pkg/statistics/handle/syncload/BUILD.bazel index 163a56a0d5d66..97a428d3a8323 100644 --- a/pkg/statistics/handle/syncload/BUILD.bazel +++ b/pkg/statistics/handle/syncload/BUILD.bazel @@ -9,8 +9,8 @@ go_library( "//pkg/config", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", @@ -39,6 +39,7 @@ go_test( deps = [ ":syncload", "//pkg/config", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", diff --git a/pkg/statistics/handle/syncload/stats_syncload.go b/pkg/statistics/handle/syncload/stats_syncload.go index 7eeb6f88efba1..a49e5b707bb8a 100644 --- a/pkg/statistics/handle/syncload/stats_syncload.go +++ b/pkg/statistics/handle/syncload/stats_syncload.go @@ -25,8 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" diff --git a/pkg/statistics/handle/syncload/stats_syncload_test.go b/pkg/statistics/handle/syncload/stats_syncload_test.go index 3b759a5c39e78..51cf1ff8d98f6 100644 --- a/pkg/statistics/handle/syncload/stats_syncload_test.go +++ b/pkg/statistics/handle/syncload/stats_syncload_test.go @@ -21,7 +21,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics/handle/syncload" @@ -72,7 +73,7 @@ func TestConcurrentLoadHist(t *testing.T) { testKit.MustExec("analyze table t") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() h := dom.StatsHandle() @@ -115,7 +116,7 @@ func TestConcurrentLoadHistTimeout(t *testing.T) { testKit.MustExec("analyze table t") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() h := dom.StatsHandle() @@ -167,7 +168,7 @@ func TestConcurrentLoadHistWithPanicAndFail(t *testing.T) { testKit.MustExec("analyze table t") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() h := dom.StatsHandle() @@ -282,7 +283,7 @@ func TestRetry(t *testing.T) { testKit.MustExec("analyze table t") is := dom.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tableInfo := tbl.Meta() diff --git a/pkg/statistics/handle/types/BUILD.bazel b/pkg/statistics/handle/types/BUILD.bazel index df7a6ea2acfa1..997bc1fcd8349 100644 --- a/pkg/statistics/handle/types/BUILD.bazel +++ b/pkg/statistics/handle/types/BUILD.bazel @@ -7,8 +7,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", "//pkg/statistics", diff --git a/pkg/statistics/handle/types/interfaces.go b/pkg/statistics/handle/types/interfaces.go index 20c724b25a578..9b8aea83c6ba7 100644 --- a/pkg/statistics/handle/types/interfaces.go +++ b/pkg/statistics/handle/types/interfaces.go @@ -20,8 +20,8 @@ import ( "time" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/statistics/handle/usage/BUILD.bazel b/pkg/statistics/handle/usage/BUILD.bazel index 3ea0a7ed2e01d..1f8ea04e0b14c 100644 --- a/pkg/statistics/handle/usage/BUILD.bazel +++ b/pkg/statistics/handle/usage/BUILD.bazel @@ -11,8 +11,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/sessionctx", "//pkg/sessionctx/variable", "//pkg/statistics/handle/storage", @@ -39,6 +39,7 @@ go_test( flaky = True, shard_count = 10, deps = [ + "//pkg/meta/model", "//pkg/parser/model", "//pkg/statistics/handle/usage/indexusage", "//pkg/testkit", diff --git a/pkg/statistics/handle/usage/index_usage.go b/pkg/statistics/handle/usage/index_usage.go index c440d79b46daa..3da412f4106de 100644 --- a/pkg/statistics/handle/usage/index_usage.go +++ b/pkg/statistics/handle/usage/index_usage.go @@ -18,7 +18,7 @@ import ( "context" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle/usage/indexusage" "github.com/pingcap/tidb/pkg/statistics/handle/util" diff --git a/pkg/statistics/handle/usage/index_usage_integration_test.go b/pkg/statistics/handle/usage/index_usage_integration_test.go index 7ab1f63c81eb3..d9fb767f8fb4a 100644 --- a/pkg/statistics/handle/usage/index_usage_integration_test.go +++ b/pkg/statistics/handle/usage/index_usage_integration_test.go @@ -19,7 +19,8 @@ import ( "fmt" "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics/handle/usage/indexusage" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -51,7 +52,7 @@ func TestGCIndexUsage(t *testing.T) { c := dom.StatsHandle().NewSessionIndexUsageCollector() is := tk.Session().GetDomainInfoSchema() - db, ok := is.SchemaByName(model.NewCIStr("test")) + db, ok := is.SchemaByName(pmodel.NewCIStr("test")) require.True(t, ok) tblInfos, err := is.SchemaTableInfos(context.Background(), db.Name) diff --git a/pkg/statistics/handle/usage/indexusage/BUILD.bazel b/pkg/statistics/handle/usage/indexusage/BUILD.bazel index d1f9216cba450..731370c58dc67 100644 --- a/pkg/statistics/handle/usage/indexusage/BUILD.bazel +++ b/pkg/statistics/handle/usage/indexusage/BUILD.bazel @@ -6,7 +6,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/statistics/handle/usage/indexusage", visibility = ["//visibility:public"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/statistics/handle/usage/collector", ], ) diff --git a/pkg/statistics/handle/usage/indexusage/collector.go b/pkg/statistics/handle/usage/indexusage/collector.go index 3bb381f4db1a3..b21d9d4aa73cc 100644 --- a/pkg/statistics/handle/usage/indexusage/collector.go +++ b/pkg/statistics/handle/usage/indexusage/collector.go @@ -18,7 +18,7 @@ import ( "sync" "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/statistics/handle/usage/collector" ) diff --git a/pkg/statistics/handle/usage/predicate_column.go b/pkg/statistics/handle/usage/predicate_column.go index 2531bc23bae92..d179d91138aa3 100644 --- a/pkg/statistics/handle/usage/predicate_column.go +++ b/pkg/statistics/handle/usage/predicate_column.go @@ -17,7 +17,7 @@ package usage import ( "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types" "github.com/pingcap/tidb/pkg/statistics/handle/usage/indexusage" diff --git a/pkg/statistics/handle/usage/predicatecolumn/BUILD.bazel b/pkg/statistics/handle/usage/predicatecolumn/BUILD.bazel index d93d212de06b1..34c6efb651ec0 100644 --- a/pkg/statistics/handle/usage/predicatecolumn/BUILD.bazel +++ b/pkg/statistics/handle/usage/predicatecolumn/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/infoschema", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/sessionctx", "//pkg/statistics", diff --git a/pkg/statistics/handle/usage/predicatecolumn/predicate_column.go b/pkg/statistics/handle/usage/predicatecolumn/predicate_column.go index 7bc88e11bc884..057784993a79c 100644 --- a/pkg/statistics/handle/usage/predicatecolumn/predicate_column.go +++ b/pkg/statistics/handle/usage/predicatecolumn/predicate_column.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" diff --git a/pkg/statistics/handle/usage/session_stats_collect.go b/pkg/statistics/handle/usage/session_stats_collect.go index da783971b4651..928e76756582a 100644 --- a/pkg/statistics/handle/usage/session_stats_collect.go +++ b/pkg/statistics/handle/usage/session_stats_collect.go @@ -23,8 +23,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics/handle/storage" diff --git a/pkg/statistics/handle/util/BUILD.bazel b/pkg/statistics/handle/util/BUILD.bazel index c02f1b21f1a1e..3ebb2632a4585 100644 --- a/pkg/statistics/handle/util/BUILD.bazel +++ b/pkg/statistics/handle/util/BUILD.bazel @@ -16,9 +16,9 @@ go_library( "//pkg/ddl/util", "//pkg/infoschema", "//pkg/kv", - "//pkg/parser/ast", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/sysproctrack", "//pkg/sessionctx/variable", @@ -46,6 +46,7 @@ go_test( embed = [":util"], flaky = True, deps = [ + "//pkg/meta/model", "//pkg/parser/model", "@com_github_stretchr_testify//require", ], diff --git a/pkg/statistics/handle/util/ddl_event.go b/pkg/statistics/handle/util/ddl_event.go index 9917a451bb663..aed9dcf76edd6 100644 --- a/pkg/statistics/handle/util/ddl_event.go +++ b/pkg/statistics/handle/util/ddl_event.go @@ -19,7 +19,7 @@ import ( ddlutil "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle/logutil" "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/statistics/handle/util/ddl_event_test.go b/pkg/statistics/handle/util/ddl_event_test.go index d564bf3312fa1..ac9c953aa4e1d 100644 --- a/pkg/statistics/handle/util/ddl_event_test.go +++ b/pkg/statistics/handle/util/ddl_event_test.go @@ -17,7 +17,8 @@ package util import ( "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -28,7 +29,7 @@ func TestEventString(t *testing.T) { schemaID: 1, tableInfo: &model.TableInfo{ ID: 1, - Name: model.NewCIStr("Table1"), + Name: pmodel.NewCIStr("Table1"), }, partInfo: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ @@ -38,7 +39,7 @@ func TestEventString(t *testing.T) { }, oldTableInfo: &model.TableInfo{ ID: 4, - Name: model.NewCIStr("Table2"), + Name: pmodel.NewCIStr("Table2"), }, oldPartInfo: &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ @@ -47,8 +48,8 @@ func TestEventString(t *testing.T) { }, }, columnInfos: []*model.ColumnInfo{ - {ID: 7, Name: model.NewCIStr("Column1")}, - {ID: 8, Name: model.NewCIStr("Column2")}, + {ID: 7, Name: pmodel.NewCIStr("Column1")}, + {ID: 8, Name: pmodel.NewCIStr("Column2")}, }, } diff --git a/pkg/statistics/handle/util/util.go b/pkg/statistics/handle/util/util.go index 0a1aa5db8279a..ef4840ceb8fbb 100644 --- a/pkg/statistics/handle/util/util.go +++ b/pkg/statistics/handle/util/util.go @@ -23,8 +23,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util" @@ -222,7 +222,7 @@ func Exec(sctx sessionctx.Context, sql string, args ...any) (sqlexec.RecordSet, } // ExecRows is a helper function to execute sql and return rows and fields. -func ExecRows(sctx sessionctx.Context, sql string, args ...any) (rows []chunk.Row, fields []*ast.ResultField, err error) { +func ExecRows(sctx sessionctx.Context, sql string, args ...any) (rows []chunk.Row, fields []*resolve.ResultField, err error) { if intest.InTest { if v := sctx.Value(mock.RestrictedSQLExecutorKey{}); v != nil { return v.(*mock.MockRestrictedSQLExecutor).ExecRestrictedSQL(StatsCtx, @@ -235,7 +235,7 @@ func ExecRows(sctx sessionctx.Context, sql string, args ...any) (rows []chunk.Ro } // ExecWithOpts is a helper function to execute sql and return rows and fields. -func ExecWithOpts(sctx sessionctx.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...any) (rows []chunk.Row, fields []*ast.ResultField, err error) { +func ExecWithOpts(sctx sessionctx.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...any) (rows []chunk.Row, fields []*resolve.ResultField, err error) { sqlExec := sctx.GetRestrictedSQLExecutor() return sqlExec.ExecRestrictedSQL(StatsCtx, opts, sql, args...) } diff --git a/pkg/statistics/histogram_test.go b/pkg/statistics/histogram_test.go index d51b367a3dc46..cf26b1cfd97d1 100644 --- a/pkg/statistics/histogram_test.go +++ b/pkg/statistics/histogram_test.go @@ -19,7 +19,8 @@ import ( "testing" "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/codec" @@ -538,7 +539,7 @@ func TestMergeBucketNDV(t *testing.T) { func TestIndexQueryBytes(t *testing.T) { ctx := mock.NewContext() sc := ctx.GetSessionVars().StmtCtx - idx := &Index{Info: &model.IndexInfo{Columns: []*model.IndexColumn{{Name: model.NewCIStr("a"), Offset: 0}}}} + idx := &Index{Info: &model.IndexInfo{Columns: []*model.IndexColumn{{Name: pmodel.NewCIStr("a"), Offset: 0}}}} idx.Histogram = *NewHistogram(0, 15, 0, 0, types.NewFieldType(mysql.TypeBlob), 0, 0) low, err1 := codec.EncodeKey(sc.TimeZone(), nil, types.NewBytesDatum([]byte("0"))) require.NoError(t, err1) diff --git a/pkg/statistics/index.go b/pkg/statistics/index.go index d882b54e03397..c961613241b3e 100644 --- a/pkg/statistics/index.go +++ b/pkg/statistics/index.go @@ -15,7 +15,7 @@ package statistics import ( - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/planner/util/debugtrace" diff --git a/pkg/statistics/sample.go b/pkg/statistics/sample.go index 77d444e2163af..8b6e02f8f92e0 100644 --- a/pkg/statistics/sample.go +++ b/pkg/statistics/sample.go @@ -22,9 +22,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" @@ -273,7 +273,7 @@ func (s SampleBuilder) CollectColumnStats() ([]*SampleCollector, *SortedBuilder, } // RowToDatums converts row to datum slice. -func RowToDatums(row chunk.Row, fields []*ast.ResultField) []types.Datum { +func RowToDatums(row chunk.Row, fields []*resolve.ResultField) []types.Datum { datums := make([]types.Datum, len(fields)) for i, f := range fields { datums[i] = row.GetDatum(i, &f.Column.FieldType) diff --git a/pkg/statistics/statistics_test.go b/pkg/statistics/statistics_test.go index ca192db2f0d24..566c72eff2aee 100644 --- a/pkg/statistics/statistics_test.go +++ b/pkg/statistics/statistics_test.go @@ -21,9 +21,9 @@ import ( "time" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" @@ -48,17 +48,17 @@ type recordSet struct { data []types.Datum count int cursor int - fields []*ast.ResultField + fields []*resolve.ResultField } -func (r *recordSet) Fields() []*ast.ResultField { +func (r *recordSet) Fields() []*resolve.ResultField { return r.fields } func (r *recordSet) setFields(tps ...uint8) { - r.fields = make([]*ast.ResultField, len(tps)) + r.fields = make([]*resolve.ResultField, len(tps)) for i := 0; i < len(tps); i++ { - rf := new(ast.ResultField) + rf := new(resolve.ResultField) rf.Column = new(model.ColumnInfo) rf.Column.FieldType = *types.NewFieldType(tps[i]) r.fields[i] = rf diff --git a/pkg/statistics/table.go b/pkg/statistics/table.go index 5f4d8f705e8f9..7c9fe60e1f6a1 100644 --- a/pkg/statistics/table.go +++ b/pkg/statistics/table.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/context" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/store/driver/BUILD.bazel b/pkg/store/driver/BUILD.bazel index 8ea468e176415..a220d9fb786a4 100644 --- a/pkg/store/driver/BUILD.bazel +++ b/pkg/store/driver/BUILD.bazel @@ -47,7 +47,7 @@ go_test( deps = [ "//pkg/domain", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/session", "//pkg/store/copr", "//pkg/store/mockstore", diff --git a/pkg/store/driver/client_test.go b/pkg/store/driver/client_test.go index cb0c4abaa20c1..2576fce5426bc 100644 --- a/pkg/store/driver/client_test.go +++ b/pkg/store/driver/client_test.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" diff --git a/pkg/store/driver/txn/BUILD.bazel b/pkg/store/driver/txn/BUILD.bazel index fdc6d834f0bcc..77243dd690bcd 100644 --- a/pkg/store/driver/txn/BUILD.bazel +++ b/pkg/store/driver/txn/BUILD.bazel @@ -16,7 +16,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/sessionctx/binloginfo", "//pkg/store/driver/error", diff --git a/pkg/store/driver/txn/error.go b/pkg/store/driver/txn/error.go index 58f3aabac8e29..8d53ea6fc4561 100644 --- a/pkg/store/driver/txn/error.go +++ b/pkg/store/driver/txn/error.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/pingcap/tidb/pkg/table/tables" diff --git a/pkg/store/driver/txn/txn_driver.go b/pkg/store/driver/txn/txn_driver.go index 03c288852a70f..d9a4ab5110567 100644 --- a/pkg/store/driver/txn/txn_driver.go +++ b/pkg/store/driver/txn/txn_driver.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/binloginfo" derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/pingcap/tidb/pkg/store/driver/options" diff --git a/pkg/store/gcworker/BUILD.bazel b/pkg/store/gcworker/BUILD.bazel index d7ee3ca29e63f..3044a32492718 100644 --- a/pkg/store/gcworker/BUILD.bazel +++ b/pkg/store/gcworker/BUILD.bazel @@ -12,8 +12,8 @@ go_library( "//pkg/ddl/util", "//pkg/domain/infosync", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", - "//pkg/parser/model", "//pkg/parser/terror", "//pkg/privilege", "//pkg/session", @@ -59,7 +59,7 @@ go_test( "//pkg/domain", "//pkg/domain/infosync", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/session", "//pkg/store/mockstore", "//pkg/testkit/testmain", diff --git a/pkg/store/gcworker/gc_worker.go b/pkg/store/gcworker/gc_worker.go index 593abbb9c4fde..9db350dc81922 100644 --- a/pkg/store/gcworker/gc_worker.go +++ b/pkg/store/gcworker/gc_worker.go @@ -38,8 +38,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/privilege" "github.com/pingcap/tidb/pkg/session" diff --git a/pkg/store/gcworker/gc_worker_test.go b/pkg/store/gcworker/gc_worker_test.go index 8d6aab291bc7c..e4c8c2e347e2b 100644 --- a/pkg/store/gcworker/gc_worker_test.go +++ b/pkg/store/gcworker/gc_worker_test.go @@ -36,7 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/stretchr/testify/require" diff --git a/pkg/store/helper/BUILD.bazel b/pkg/store/helper/BUILD.bazel index a65d2b88624ec..aa4ff8b708b26 100644 --- a/pkg/store/helper/BUILD.bazel +++ b/pkg/store/helper/BUILD.bazel @@ -8,7 +8,7 @@ go_library( deps = [ "//pkg/infoschema/context", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/store/driver/error", "//pkg/tablecodec", "//pkg/util", @@ -38,6 +38,7 @@ go_test( shard_count = 6, deps = [ "//pkg/infoschema/context", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/store/mockstore", "//pkg/tablecodec", diff --git a/pkg/store/helper/helper.go b/pkg/store/helper/helper.go index 20d7ec3e472e8..b39afc1a31387 100644 --- a/pkg/store/helper/helper.go +++ b/pkg/store/helper/helper.go @@ -32,7 +32,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/store/helper/helper_test.go b/pkg/store/helper/helper_test.go index b339ad5656744..a22e04fc810fa 100644 --- a/pkg/store/helper/helper_test.go +++ b/pkg/store/helper/helper_test.go @@ -29,7 +29,8 @@ import ( "github.com/gorilla/mux" "github.com/pingcap/log" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/helper" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/tablecodec" @@ -66,7 +67,7 @@ func TestHotRegion(t *testing.T) { require.Equal(t, expected, regionMetric) dbInfo := &model.DBInfo{ - Name: model.NewCIStr("test"), + Name: pmodel.NewCIStr("test"), } require.NoError(t, err) @@ -214,7 +215,7 @@ func mockHotRegionResponse(w http.ResponseWriter, _ *http.Request) { } func getMockRegionsTableInfoSchema() []*model.DBInfo { - dbInfo := &model.DBInfo{Name: model.NewCIStr("test")} + dbInfo := &model.DBInfo{Name: pmodel.NewCIStr("test")} dbInfo.Deprecated.Tables = []*model.TableInfo{ { ID: 41, diff --git a/pkg/store/mockstore/mockcopr/BUILD.bazel b/pkg/store/mockstore/mockcopr/BUILD.bazel index 669bb6b795fde..b95b7c77e29d8 100644 --- a/pkg/store/mockstore/mockcopr/BUILD.bazel +++ b/pkg/store/mockstore/mockcopr/BUILD.bazel @@ -18,11 +18,11 @@ go_library( "//pkg/expression", "//pkg/expression/aggregation", "//pkg/kv", - "//pkg/parser/ast", + "//pkg/meta/model", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", diff --git a/pkg/store/mockstore/mockcopr/analyze.go b/pkg/store/mockstore/mockcopr/analyze.go index b58793ec0bb10..869f0d7ed4aba 100644 --- a/pkg/store/mockstore/mockcopr/analyze.go +++ b/pkg/store/mockstore/mockcopr/analyze.go @@ -21,9 +21,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" @@ -131,7 +131,7 @@ func (h coprHandler) handleAnalyzeIndexReq(req *coprocessor.Request, analyzeReq type analyzeColumnsExec struct { tblExec *tableScanExec - fields []*ast.ResultField + fields []*resolve.ResultField } func (h coprHandler) handleAnalyzeColumnsReq(req *coprocessor.Request, analyzeReq *tipb.AnalyzeReq) (_ *coprocessor.Response, err error) { @@ -186,9 +186,9 @@ func (h coprHandler) handleAnalyzeColumnsReq(req *coprocessor.Request, analyzeRe rd: rd, }, } - e.fields = make([]*ast.ResultField, len(columns)) + e.fields = make([]*resolve.ResultField, len(columns)) for i := range e.fields { - rf := new(ast.ResultField) + rf := new(resolve.ResultField) rf.Column = new(model.ColumnInfo) ft := types.FieldType{} ft.SetType(mysql.TypeBlob) @@ -252,7 +252,7 @@ func (h coprHandler) handleAnalyzeColumnsReq(req *coprocessor.Request, analyzeRe } // Fields implements the sqlexec.RecordSet Fields interface. -func (e *analyzeColumnsExec) Fields() []*ast.ResultField { +func (e *analyzeColumnsExec) Fields() []*resolve.ResultField { return e.fields } diff --git a/pkg/store/mockstore/mockcopr/cop_handler_dag.go b/pkg/store/mockstore/mockcopr/cop_handler_dag.go index c87d169516c66..8e56e54564647 100644 --- a/pkg/store/mockstore/mockcopr/cop_handler_dag.go +++ b/pkg/store/mockstore/mockcopr/cop_handler_dag.go @@ -28,8 +28,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/store/mockstore/mockcopr/executor.go b/pkg/store/mockstore/mockcopr/executor.go index 4ad3f196eb0f7..0b9bea665c161 100644 --- a/pkg/store/mockstore/mockcopr/executor.go +++ b/pkg/store/mockstore/mockcopr/executor.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/store/mockstore/unistore/cophandler/BUILD.bazel b/pkg/store/mockstore/unistore/cophandler/BUILD.bazel index 05aaa880c84d3..34df6dbad4c4a 100644 --- a/pkg/store/mockstore/unistore/cophandler/BUILD.bazel +++ b/pkg/store/mockstore/unistore/cophandler/BUILD.bazel @@ -16,11 +16,11 @@ go_library( "//pkg/expression", "//pkg/expression/aggregation", "//pkg/kv", - "//pkg/parser/ast", + "//pkg/meta/model", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", diff --git a/pkg/store/mockstore/unistore/cophandler/analyze.go b/pkg/store/mockstore/unistore/cophandler/analyze.go index 3a5a20b7a525c..48db874028ea2 100644 --- a/pkg/store/mockstore/unistore/cophandler/analyze.go +++ b/pkg/store/mockstore/unistore/cophandler/analyze.go @@ -27,10 +27,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" @@ -270,7 +270,7 @@ type analyzeColumnsExec struct { decoder *rowcodec.ChunkDecoder req *chunk.Chunk evalCtx *evalContext - fields []*ast.ResultField + fields []*resolve.ResultField } func buildBaseAnalyzeColumnsExec(dbReader *dbreader.DBReader, rans []kv.KeyRange, analyzeReq *tipb.AnalyzeReq, startTS uint64) (*analyzeColumnsExec, *statistics.SampleBuilder, int64, error) { @@ -297,9 +297,9 @@ func buildBaseAnalyzeColumnsExec(dbReader *dbreader.DBReader, rans []kv.KeyRange decoder: decoder, evalCtx: evalCtx, } - e.fields = make([]*ast.ResultField, len(columns)) + e.fields = make([]*resolve.ResultField, len(columns)) for i := range e.fields { - rf := new(ast.ResultField) + rf := new(resolve.ResultField) rf.Column = new(model.ColumnInfo) ft := types.FieldType{} ft.SetType(mysql.TypeBlob) @@ -404,9 +404,9 @@ func handleAnalyzeFullSamplingReq( decoder: decoder, evalCtx: evalCtx, } - e.fields = make([]*ast.ResultField, len(columns)) + e.fields = make([]*resolve.ResultField, len(columns)) for i := range e.fields { - rf := new(ast.ResultField) + rf := new(resolve.ResultField) rf.Column = new(model.ColumnInfo) ft := types.FieldType{} ft.SetType(mysql.TypeBlob) @@ -460,7 +460,7 @@ func handleAnalyzeFullSamplingReq( } // Fields implements the sqlexec.RecordSet Fields interface. -func (e *analyzeColumnsExec) Fields() []*ast.ResultField { +func (e *analyzeColumnsExec) Fields() []*resolve.ResultField { return e.fields } diff --git a/pkg/store/mockstore/unistore/cophandler/closure_exec.go b/pkg/store/mockstore/unistore/cophandler/closure_exec.go index 7ab29cfa7be15..eb23b84ab07e8 100644 --- a/pkg/store/mockstore/unistore/cophandler/closure_exec.go +++ b/pkg/store/mockstore/unistore/cophandler/closure_exec.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/store/mockstore/unistore/cophandler/cop_handler.go b/pkg/store/mockstore/unistore/cophandler/cop_handler.go index 7097f07104476..1835d0b536351 100644 --- a/pkg/store/mockstore/unistore/cophandler/cop_handler.go +++ b/pkg/store/mockstore/unistore/cophandler/cop_handler.go @@ -30,8 +30,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/store/mockstore/unistore/cophandler/mpp.go b/pkg/store/mockstore/unistore/cophandler/mpp.go index 6d37d152c3d80..c516af69cdd89 100644 --- a/pkg/store/mockstore/unistore/cophandler/mpp.go +++ b/pkg/store/mockstore/unistore/cophandler/mpp.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/kvproto/pkg/mpp" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/mockstore/unistore/client" diff --git a/pkg/table/BUILD.bazel b/pkg/table/BUILD.bazel index 90d494dd7ad75..d730e6c966292 100644 --- a/pkg/table/BUILD.bazel +++ b/pkg/table/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//pkg/expression/context", "//pkg/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/charset", @@ -58,6 +59,7 @@ go_test( "//pkg/errctx", "//pkg/errno", "//pkg/expression", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/charset", "//pkg/parser/model", diff --git a/pkg/table/column.go b/pkg/table/column.go index 46ebdf71d5435..cba250b1480e4 100644 --- a/pkg/table/column.go +++ b/pkg/table/column.go @@ -28,10 +28,10 @@ import ( "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/expression" exprctx "github.com/pingcap/tidb/pkg/expression/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" field_types "github.com/pingcap/tidb/pkg/parser/types" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" diff --git a/pkg/table/column_test.go b/pkg/table/column_test.go index 25b0e0db9befb..b308f786b1841 100644 --- a/pkg/table/column_test.go +++ b/pkg/table/column_test.go @@ -21,9 +21,10 @@ import ( "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" @@ -521,7 +522,7 @@ func TestGetDefaultValue(t *testing.T) { func newCol(name string) *Column { return ToColumn(&model.ColumnInfo{ - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), State: model.StatePublic, }) } diff --git a/pkg/table/constraint.go b/pkg/table/constraint.go index f3a13a78b77ae..21bf2f8d0fba6 100644 --- a/pkg/table/constraint.go +++ b/pkg/table/constraint.go @@ -17,8 +17,9 @@ package table import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/logutil" @@ -174,7 +175,7 @@ func (checker *checkConstraintChecker) Leave(in ast.Node) (out ast.Node, ok bool } // ContainsAutoIncrementCol checks if there is auto-increment col in given cols -func ContainsAutoIncrementCol(cols []model.CIStr, tblInfo *model.TableInfo) bool { +func ContainsAutoIncrementCol(cols []pmodel.CIStr, tblInfo *model.TableInfo) bool { if autoIncCol := tblInfo.GetAutoIncrementColInfo(); autoIncCol != nil { for _, col := range cols { if col.L == autoIncCol.Name.L { @@ -186,7 +187,7 @@ func ContainsAutoIncrementCol(cols []model.CIStr, tblInfo *model.TableInfo) bool } // HasForeignKeyRefAction checks if there is foreign key with referential action in check constraints -func HasForeignKeyRefAction(fkInfos []*model.FKInfo, constraints []*ast.Constraint, checkConstr *ast.Constraint, dependedCols []model.CIStr) error { +func HasForeignKeyRefAction(fkInfos []*model.FKInfo, constraints []*ast.Constraint, checkConstr *ast.Constraint, dependedCols []pmodel.CIStr) error { if fkInfos != nil { return checkForeignKeyRefActionByFKInfo(fkInfos, checkConstr, dependedCols) } @@ -195,8 +196,8 @@ func HasForeignKeyRefAction(fkInfos []*model.FKInfo, constraints []*ast.Constrai continue } refCol := cons.Refer - if refCol.OnDelete.ReferOpt != model.ReferOptionNoOption || refCol.OnUpdate.ReferOpt != model.ReferOptionNoOption { - var fkCols []model.CIStr + if refCol.OnDelete.ReferOpt != pmodel.ReferOptionNoOption || refCol.OnUpdate.ReferOpt != pmodel.ReferOptionNoOption { + var fkCols []pmodel.CIStr for _, key := range cons.Keys { fkCols = append(fkCols, key.Column.Name) } @@ -210,7 +211,7 @@ func HasForeignKeyRefAction(fkInfos []*model.FKInfo, constraints []*ast.Constrai return nil } -func checkForeignKeyRefActionByFKInfo(fkInfos []*model.FKInfo, checkConstr *ast.Constraint, dependedCols []model.CIStr) error { +func checkForeignKeyRefActionByFKInfo(fkInfos []*model.FKInfo, checkConstr *ast.Constraint, dependedCols []pmodel.CIStr) error { for _, fkInfo := range fkInfos { if fkInfo.OnDelete != 0 || fkInfo.OnUpdate != 0 { for _, col := range dependedCols { @@ -223,7 +224,7 @@ func checkForeignKeyRefActionByFKInfo(fkInfos []*model.FKInfo, checkConstr *ast. return nil } -func hasSpecifiedCol(cols []model.CIStr, col model.CIStr) bool { +func hasSpecifiedCol(cols []pmodel.CIStr, col pmodel.CIStr) bool { for _, c := range cols { if c.L == col.L { return true diff --git a/pkg/table/context/BUILD.bazel b/pkg/table/context/BUILD.bazel index c49178bd63055..21fe8ee0715ee 100644 --- a/pkg/table/context/BUILD.bazel +++ b/pkg/table/context/BUILD.bazel @@ -14,7 +14,7 @@ go_library( "//pkg/infoschema/context", "//pkg/kv", "//pkg/meta/autoid", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", "//pkg/tablecodec", diff --git a/pkg/table/context/table.go b/pkg/table/context/table.go index 06c06359c2ef0..63ed251a0959a 100644 --- a/pkg/table/context/table.go +++ b/pkg/table/context/table.go @@ -18,7 +18,7 @@ import ( exprctx "github.com/pingcap/tidb/pkg/expression/context" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/rowcodec" diff --git a/pkg/table/contextimpl/BUILD.bazel b/pkg/table/contextimpl/BUILD.bazel index 1f39291ead924..da643f143cddc 100644 --- a/pkg/table/contextimpl/BUILD.bazel +++ b/pkg/table/contextimpl/BUILD.bazel @@ -9,7 +9,7 @@ go_library( "//pkg/expression/context", "//pkg/infoschema/context", "//pkg/meta/autoid", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx", "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", @@ -27,7 +27,7 @@ go_test( flaky = True, deps = [ ":contextimpl", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/binloginfo", "//pkg/sessionctx/variable", "//pkg/table", diff --git a/pkg/table/contextimpl/table.go b/pkg/table/contextimpl/table.go index 329bba43f60d0..a13c25f61df27 100644 --- a/pkg/table/contextimpl/table.go +++ b/pkg/table/contextimpl/table.go @@ -19,7 +19,7 @@ import ( exprctx "github.com/pingcap/tidb/pkg/expression/context" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" diff --git a/pkg/table/contextimpl/table_test.go b/pkg/table/contextimpl/table_test.go index a50ea689ed332..d3bef9b4ecc9e 100644 --- a/pkg/table/contextimpl/table_test.go +++ b/pkg/table/contextimpl/table_test.go @@ -17,7 +17,7 @@ package contextimpl_test import ( "testing" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/binloginfo" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" diff --git a/pkg/table/index.go b/pkg/table/index.go index 10f9c2b1bffe3..a372884f2314f 100644 --- a/pkg/table/index.go +++ b/pkg/table/index.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/types" ) diff --git a/pkg/table/table.go b/pkg/table/table.go index a1736445a0867..28a02dbdffd32 100644 --- a/pkg/table/table.go +++ b/pkg/table/table.go @@ -27,7 +27,8 @@ import ( exprctx "github.com/pingcap/tidb/pkg/expression/context" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" tbctx "github.com/pingcap/tidb/pkg/table/context" @@ -454,7 +455,7 @@ type PartitionedTable interface { GetPartitionIdxByRow(expression.EvalContext, []types.Datum) (int, error) GetAllPartitionIDs() []int64 GetPartitionColumnIDs() []int64 - GetPartitionColumnNames() []model.CIStr + GetPartitionColumnNames() []pmodel.CIStr CheckForExchangePartition(ctx expression.EvalContext, pi *model.PartitionInfo, r []types.Datum, partID, ntID int64) error } diff --git a/pkg/table/tables/BUILD.bazel b/pkg/table/tables/BUILD.bazel index d292d9753072b..d9c7aeff2be4e 100644 --- a/pkg/table/tables/BUILD.bazel +++ b/pkg/table/tables/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", @@ -87,6 +88,7 @@ go_test( "//pkg/lightning/backend/encode", "//pkg/lightning/backend/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/ast", diff --git a/pkg/table/tables/index.go b/pkg/table/tables/index.go index bb2c6626f85db..8be28248092e7 100644 --- a/pkg/table/tables/index.go +++ b/pkg/table/tables/index.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/table/tables/index_test.go b/pkg/table/tables/index_test.go index 7819f6805dfa6..be91f0987d844 100644 --- a/pkg/table/tables/index_test.go +++ b/pkg/table/tables/index_test.go @@ -24,9 +24,10 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lightning/backend/encode" lkv "github.com/pingcap/tidb/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" @@ -322,7 +323,7 @@ func TestTableOperationsInDDLDropIndexWriteOnly(t *testing.T) { for { time.Sleep(20 * time.Millisecond) // wait the DDL state change to `StateWriteOnly` - tblInfo, err := do.InfoSchema().TableInfoByName(model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := do.InfoSchema().TableInfoByName(pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) if state := tblInfo.Indices[0].State; state != model.StatePublic { require.Equal(t, model.StateWriteOnly, state) @@ -345,7 +346,7 @@ func TestTableOperationsInDDLDropIndexWriteOnly(t *testing.T) { // update some rows: 1 in storage, 1 in memory buffer. tk2.MustExec("update t set a = a + 10 where a in (2, 6)") // should be tested in `StateWriteOnly` state. - tblInfo, err := tk2.Session().GetInfoSchema().TableInfoByName(model.NewCIStr("test"), model.NewCIStr("t")) + tblInfo, err := tk2.Session().GetInfoSchema().TableInfoByName(pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) require.Equal(t, model.StateWriteOnly, tblInfo.Indices[0].State) // commit should success without any assertion fail. diff --git a/pkg/table/tables/mutation_checker.go b/pkg/table/tables/mutation_checker.go index 33f18ea37cb95..a63ca32eaf2f9 100644 --- a/pkg/table/tables/mutation_checker.go +++ b/pkg/table/tables/mutation_checker.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/table/tables/mutation_checker_test.go b/pkg/table/tables/mutation_checker_test.go index a9407ee173367..fc069c02c0fbf 100644 --- a/pkg/table/tables/mutation_checker_test.go +++ b/pkg/table/tables/mutation_checker_test.go @@ -20,7 +20,8 @@ import ( "time" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" @@ -75,12 +76,12 @@ func TestCompareIndexData(t *testing.T) { cols := make([]*table.Column, 0) indexCols := make([]*model.IndexColumn, 0) for i, ft := range data.fts { - cols = append(cols, &table.Column{ColumnInfo: &model.ColumnInfo{Name: model.NewCIStr(fmt.Sprintf("c%d", i)), FieldType: *ft}}) + cols = append(cols, &table.Column{ColumnInfo: &model.ColumnInfo{Name: pmodel.NewCIStr(fmt.Sprintf("c%d", i)), FieldType: *ft}}) indexCols = append(indexCols, &model.IndexColumn{Offset: i, Length: data.indexLength[i]}) } - indexInfo := &model.IndexInfo{Name: model.NewCIStr("i0"), Columns: indexCols} + indexInfo := &model.IndexInfo{Name: pmodel.NewCIStr("i0"), Columns: indexCols} - err := compareIndexData(tc, cols, data.indexData, data.inputData, indexInfo, &model.TableInfo{Name: model.NewCIStr("t")}) + err := compareIndexData(tc, cols, data.indexData, data.inputData, indexInfo, &model.TableInfo{Name: pmodel.NewCIStr("t")}) require.Equal(t, data.correct, err == nil, "case id = %v", caseID) } } @@ -256,7 +257,7 @@ func TestCheckIndexKeysAndCheckHandleConsistency(t *testing.T) { tc = tc.WithLocation(lc) tableInfo := model.TableInfo{ ID: 1, - Name: model.NewCIStr("t"), + Name: pmodel.NewCIStr("t"), Columns: columnInfos, Indices: indexInfos, PKIsHandle: false, diff --git a/pkg/table/tables/partition.go b/pkg/table/tables/partition.go index 87a759d63d9eb..8dc157b50afd4 100644 --- a/pkg/table/tables/partition.go +++ b/pkg/table/tables/partition.go @@ -31,9 +31,10 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/contextstatic" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" @@ -259,24 +260,24 @@ func NewPartitionExprBuildCtx() expression.BuildContext { ) } -func newPartitionExpr(tblInfo *model.TableInfo, tp model.PartitionType, expr string, partCols []model.CIStr, defs []model.PartitionDefinition) (*PartitionExpr, error) { +func newPartitionExpr(tblInfo *model.TableInfo, tp pmodel.PartitionType, expr string, partCols []pmodel.CIStr, defs []model.PartitionDefinition) (*PartitionExpr, error) { ctx := NewPartitionExprBuildCtx() - dbName := model.NewCIStr(ctx.GetEvalCtx().CurrentDB()) + dbName := pmodel.NewCIStr(ctx.GetEvalCtx().CurrentDB()) columns, names, err := expression.ColumnInfos2ColumnsAndNames(ctx, dbName, tblInfo.Name, tblInfo.Cols(), tblInfo) if err != nil { return nil, err } switch tp { - case model.PartitionTypeNone: + case pmodel.PartitionTypeNone: // Nothing to do return nil, nil - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: return generateRangePartitionExpr(ctx, expr, partCols, defs, columns, names) - case model.PartitionTypeHash: + case pmodel.PartitionTypeHash: return generateHashPartitionExpr(ctx, expr, columns, names) - case model.PartitionTypeKey: + case pmodel.PartitionTypeKey: return generateKeyPartitionExpr(ctx, expr, partCols, columns, names) - case model.PartitionTypeList: + case pmodel.PartitionTypeList: return generateListPartitionExpr(ctx, tblInfo, expr, partCols, defs, columns, names) } panic("cannot reach here") @@ -676,7 +677,7 @@ func fixOldVersionPartitionInfo(sctx expression.BuildContext, str string) (int64 return ret, true } -func rangePartitionExprStrings(cols []model.CIStr, expr string) []string { +func rangePartitionExprStrings(cols []pmodel.CIStr, expr string) []string { var s []string if len(cols) > 0 { s = make([]string, 0, len(cols)) @@ -689,7 +690,7 @@ func rangePartitionExprStrings(cols []model.CIStr, expr string) []string { return s } -func generateKeyPartitionExpr(ctx expression.BuildContext, expr string, partCols []model.CIStr, +func generateKeyPartitionExpr(ctx expression.BuildContext, expr string, partCols []pmodel.CIStr, columns []*expression.Column, names types.NameSlice) (*PartitionExpr, error) { ret := &PartitionExpr{ ForKeyPruning: &ForKeyPruning{}, @@ -704,7 +705,7 @@ func generateKeyPartitionExpr(ctx expression.BuildContext, expr string, partCols return ret, nil } -func generateRangePartitionExpr(ctx expression.BuildContext, expr string, partCols []model.CIStr, +func generateRangePartitionExpr(ctx expression.BuildContext, expr string, partCols []pmodel.CIStr, defs []model.PartitionDefinition, columns []*expression.Column, names types.NameSlice) (*PartitionExpr, error) { // The caller should assure partition info is not nil. p := parser.New() @@ -794,7 +795,7 @@ func findIdxByColUniqueID(cols []*expression.Column, col *expression.Column) int return -1 } -func extractPartitionExprColumns(ctx expression.BuildContext, expr string, partCols []model.CIStr, columns []*expression.Column, names types.NameSlice) (expression.Expression, []*expression.Column, []int, error) { +func extractPartitionExprColumns(ctx expression.BuildContext, expr string, partCols []pmodel.CIStr, columns []*expression.Column, names types.NameSlice) (expression.Expression, []*expression.Column, []int, error) { var cols []*expression.Column var partExpr expression.Expression if len(partCols) == 0 { @@ -825,7 +826,7 @@ func extractPartitionExprColumns(ctx expression.BuildContext, expr string, partC return partExpr, deDupCols, offset, nil } -func generateListPartitionExpr(ctx expression.BuildContext, tblInfo *model.TableInfo, expr string, partCols []model.CIStr, +func generateListPartitionExpr(ctx expression.BuildContext, tblInfo *model.TableInfo, expr string, partCols []pmodel.CIStr, defs []model.PartitionDefinition, columns []*expression.Column, names types.NameSlice) (*PartitionExpr, error) { // The caller should assure partition info is not nil. partExpr, exprCols, offset, err := extractPartitionExprColumns(ctx, expr, partCols, columns, names) @@ -902,7 +903,7 @@ func (lp *ForListPruning) buildListPruner(ctx expression.BuildContext, exprStr s } func (lp *ForListPruning) buildListColumnsPruner(ctx expression.BuildContext, - tblInfo *model.TableInfo, partCols []model.CIStr, defs []model.PartitionDefinition, + tblInfo *model.TableInfo, partCols []pmodel.CIStr, defs []model.PartitionDefinition, columns []*expression.Column, names types.NameSlice) error { schema := expression.NewSchema(columns...) p := parser.New() @@ -1266,13 +1267,13 @@ func (t *partitionedTable) GetPartitionColumnIDs() []int64 { return colIDs } -func (t *partitionedTable) GetPartitionColumnNames() []model.CIStr { +func (t *partitionedTable) GetPartitionColumnNames() []pmodel.CIStr { pi := t.Meta().Partition if len(pi.Columns) > 0 { return pi.Columns } colIDs := t.GetPartitionColumnIDs() - colNames := make([]model.CIStr, 0, len(colIDs)) + colNames := make([]pmodel.CIStr, 0, len(colIDs)) for _, colID := range colIDs { for _, col := range t.Cols() { if col.ID == colID { @@ -1301,24 +1302,24 @@ func (t *partitionedTable) CheckForExchangePartition(ctx expression.EvalContext, } // locatePartitionCommon returns the partition idx of the input record. -func (t *partitionedTable) locatePartitionCommon(ctx expression.EvalContext, tp model.PartitionType, partitionExpr *PartitionExpr, num uint64, columnsPartitioned bool, r []types.Datum) (int, error) { +func (t *partitionedTable) locatePartitionCommon(ctx expression.EvalContext, tp pmodel.PartitionType, partitionExpr *PartitionExpr, num uint64, columnsPartitioned bool, r []types.Datum) (int, error) { var err error var idx int switch tp { - case model.PartitionTypeRange: + case pmodel.PartitionTypeRange: if columnsPartitioned { idx, err = t.locateRangeColumnPartition(ctx, partitionExpr, r) } else { idx, err = t.locateRangePartition(ctx, partitionExpr, r) } - case model.PartitionTypeHash: + case pmodel.PartitionTypeHash: // Note that only LIST and RANGE supports REORGANIZE PARTITION idx, err = t.locateHashPartition(ctx, partitionExpr, num, r) - case model.PartitionTypeKey: + case pmodel.PartitionTypeKey: idx, err = partitionExpr.LocateKeyPartition(num, r) - case model.PartitionTypeList: + case pmodel.PartitionTypeList: idx, err = partitionExpr.locateListPartition(ctx, r) - case model.PartitionTypeNone: + case pmodel.PartitionTypeNone: idx = 0 } if err != nil { diff --git a/pkg/table/tables/tables.go b/pkg/table/tables/tables.go index 623e7ef729b9f..57f80ffd9cedb 100644 --- a/pkg/table/tables/tables.go +++ b/pkg/table/tables/tables.go @@ -34,8 +34,8 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/binloginfo" diff --git a/pkg/table/tables/tables_test.go b/pkg/table/tables/tables_test.go index c0bfb70c8cfcd..4f1ac053cfe17 100644 --- a/pkg/table/tables/tables_test.go +++ b/pkg/table/tables/tables_test.go @@ -27,8 +27,9 @@ import ( "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx" @@ -80,7 +81,7 @@ func TestBasic(t *testing.T) { require.Nil(t, sessiontxn.NewTxn(context.Background(), tk.Session())) txn, err := tk.Session().Txn(true) require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) require.Greater(t, tb.Meta().ID, int64(0)) require.Equal(t, "t", tb.Meta().Name.L) @@ -180,7 +181,7 @@ func TestTypes(t *testing.T) { tk := testkit.NewTestKit(t, store) _, err := tk.Session().Execute(context.Background(), "CREATE TABLE test.t (c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 text, c6 blob, c7 varchar(64), c8 time, c9 timestamp null default CURRENT_TIMESTAMP, c10 decimal(10,1))") require.NoError(t, err) - _, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + _, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) _, err = tk.Session().Execute(ctx, "insert test.t values (1, 2, 3, 4, '5', '6', '7', '10:10:10', null, 1.4)") require.NoError(t, err) @@ -234,7 +235,7 @@ func TestUniqueIndexMultipleNullEntries(t *testing.T) { require.NoError(t, err) _, err = tk.Session().Execute(ctx, "CREATE TABLE test.t (a int primary key auto_increment, b varchar(255) unique)") require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) require.Greater(t, tb.Meta().ID, int64(0)) require.Equal(t, "t", tb.Meta().Name.L) @@ -314,7 +315,7 @@ func TestUnsignedPK(t *testing.T) { require.NoError(t, err) _, err = tk.Session().Execute(context.Background(), "CREATE TABLE test.tPK (a bigint unsigned primary key, b varchar(255))") require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tPK")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tPK")) require.NoError(t, err) require.Nil(t, sessiontxn.NewTxn(context.Background(), tk.Session())) txn, err := tk.Session().Txn(true) @@ -342,7 +343,7 @@ func TestIterRecords(t *testing.T) { require.Nil(t, sessiontxn.NewTxn(context.Background(), tk.Session())) txn, err := tk.Session().Txn(true) require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tIter")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tIter")) require.NoError(t, err) totalCount := 0 err = tables.IterRecords(tb, tk.Session(), tb.Cols(), func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) { @@ -363,7 +364,7 @@ func TestTableFromMeta(t *testing.T) { require.Nil(t, sessiontxn.NewTxn(context.Background(), tk.Session())) _, err := tk.Session().Txn(true) require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("meta")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("meta")) require.NoError(t, err) tbInfo := tb.Meta().Clone() @@ -385,7 +386,7 @@ func TestTableFromMeta(t *testing.T) { require.Error(t, err) tk.MustExec(`create table t_mock (id int) partition by range (id) (partition p0 values less than maxvalue)`) - tb, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_mock")) + tb, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t_mock")) require.NoError(t, err) tt := table.MockTableFromMeta(tb.Meta()) _, ok := tt.(table.PartitionedTable) @@ -394,7 +395,7 @@ func TestTableFromMeta(t *testing.T) { require.Equal(t, table.NormalTable, tt.Type()) tk.MustExec("create table t_meta (a int) shard_row_id_bits = 15") - tb, err = domain.GetDomain(tk.Session()).InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_meta")) + tb, err = domain.GetDomain(tk.Session()).InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t_meta")) require.NoError(t, err) _, err = tables.AllocHandle(context.Background(), tk.Session().GetTableCtx(), tb) require.NoError(t, err) @@ -415,7 +416,7 @@ func TestHiddenColumn(t *testing.T) { tk.MustExec("USE test_hidden;") tk.MustExec("CREATE TABLE t (a int primary key, b int as (a+1), c int, d int as (c+1) stored, e int, f tinyint as (a+1));") tk.MustExec("insert into t values (1, default, 3, default, 5, default);") - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test_hidden"), model.NewCIStr("t")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test_hidden"), pmodel.NewCIStr("t")) require.NoError(t, err) colInfo := tb.Meta().Columns // Set column b, d, f to hidden @@ -580,7 +581,7 @@ func TestAddRecordWithCtx(t *testing.T) { require.NoError(t, err) _, err = tk.Session().Execute(context.Background(), "CREATE TABLE test.tRecord (a bigint unsigned primary key, b varchar(255))") require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("tRecord")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("tRecord")) require.NoError(t, err) defer func() { _, err := tk.Session().Execute(context.Background(), "DROP TABLE test.tRecord") @@ -943,7 +944,7 @@ func TestSkipWriteUntouchedIndices(t *testing.T) { tk.MustExec("insert into t values(4, 5, 6)") defer tk.MustExec("rollback") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) ctx := tk.Session().GetTableCtx() @@ -1017,7 +1018,7 @@ func TestDupKeyCheckMode(t *testing.T) { require.NoError(t, err) return txn } - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) ctx := tk.Session().GetTableCtx() getHandleFlags := func(h kv.Handle, memBuffer kv.MemBuffer) kv.KeyFlags { diff --git a/pkg/table/tables/test/partition/BUILD.bazel b/pkg/table/tables/test/partition/BUILD.bazel index 527f7c26c443e..e09b6d24b732c 100644 --- a/pkg/table/tables/test/partition/BUILD.bazel +++ b/pkg/table/tables/test/partition/BUILD.bazel @@ -12,6 +12,7 @@ go_test( deps = [ "//pkg/domain", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/sessiontxn", "//pkg/table", diff --git a/pkg/table/tables/test/partition/partition_test.go b/pkg/table/tables/test/partition/partition_test.go index 4aea56803e93e..1700d0cb3efb6 100644 --- a/pkg/table/tables/test/partition/partition_test.go +++ b/pkg/table/tables/test/partition/partition_test.go @@ -26,7 +26,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" @@ -57,11 +58,11 @@ PARTITION BY RANGE ( id ) ( require.NoError(t, err) _, err = tk.Session().Execute(ctx, createTable1) require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tbInfo := tb.Meta() p0 := tbInfo.Partition.Definitions[0] - require.Equal(t, model.NewCIStr("p0"), p0.Name) + require.Equal(t, pmodel.NewCIStr("p0"), p0.Name) require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session())) txn, err := tk.Session().Txn(true) require.NoError(t, err) @@ -108,7 +109,7 @@ PARTITION BY RANGE ( id ) ( require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session())) txn, err = tk.Session().Txn(true) require.NoError(t, err) - tb, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tb, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) _, err = tb.AddRecord(tk.Session().GetTableCtx(), txn, types.MakeDatums(22)) require.NoError(t, err) @@ -122,7 +123,7 @@ PARTITION BY RANGE ( id ) ( require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session())) txn, err = tk.Session().Txn(true) require.NoError(t, err) - tb, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t3")) + tb, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t3")) require.NoError(t, err) _, err = tb.AddRecord(tk.Session().GetTableCtx(), txn, types.MakeDatums(11)) require.True(t, table.ErrNoPartitionForGivenValue.Equal(err)) @@ -140,7 +141,7 @@ PARTITION BY RANGE ( id ) ( require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session())) txn, err = tk.Session().Txn(true) require.NoError(t, err) - tb, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t4")) + tb, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t4")) require.NoError(t, err) _, err = tb.AddRecord(tk.Session().GetTableCtx(), txn, types.MakeDatums(1, 11)) require.True(t, table.ErrNoPartitionForGivenValue.Equal(err)) @@ -157,7 +158,7 @@ func TestHashPartitionAddRecord(t *testing.T) { require.NoError(t, err) _, err = tk.Session().Execute(context.Background(), `CREATE TABLE test.t1 (id int(11), index(id)) PARTITION BY HASH (id) partitions 4;`) require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tbInfo := tb.Meta() p0 := tbInfo.Partition.Definitions[0] @@ -194,7 +195,7 @@ func TestHashPartitionAddRecord(t *testing.T) { // Test for partition expression is negative number. _, err = tk.Session().Execute(context.Background(), `CREATE TABLE test.t2 (id int(11), index(id)) PARTITION BY HASH (id) partitions 11;`) require.NoError(t, err) - tb, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tb, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tbInfo = tb.Meta() for i := 0; i < 11; i++ { @@ -228,7 +229,7 @@ PARTITION BY RANGE ( id ) ( require.NoError(t, err) _, err = tk.Session().Execute(context.Background(), createTable1) require.NoError(t, err) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tbInfo := tb.Meta() ps := tbInfo.GetPartitionInfo() @@ -254,7 +255,7 @@ func TestGeneratePartitionExpr(t *testing.T) { partition p3 values less than maxvalue)`) require.NoError(t, err) - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) type partitionExpr interface { PartitionExpr() *tables.PartitionExpr @@ -361,14 +362,14 @@ func TestIssue31629(t *testing.T) { require.NoError(t, err) tk.MustQuery("show warnings").Check(testkit.Rows()) - tb, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("Issue31629"), model.NewCIStr("t1")) + tb, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("Issue31629"), pmodel.NewCIStr("t1")) require.NoError(t, err) tbp, ok := tb.(table.PartitionedTable) require.Truef(t, ok, "test %d does not generate a table.PartitionedTable: %s (%T, %+v)", i, createTable, tb, tb) colNames := tbp.GetPartitionColumnNames() - checkNames := []model.CIStr{model.NewCIStr(tt.cols[0])} + checkNames := []pmodel.CIStr{pmodel.NewCIStr(tt.cols[0])} for i := 1; i < len(tt.cols); i++ { - checkNames = append(checkNames, model.NewCIStr(tt.cols[i])) + checkNames = append(checkNames, pmodel.NewCIStr(tt.cols[i])) } require.ElementsMatchf(t, colNames, checkNames, "test %d %s", i, createTable) tk.MustExec("drop table t1") @@ -2675,7 +2676,7 @@ func checkDMLInAllStates(t *testing.T, tk, tk2 *testkit.TestKit, schemaName, alt transitions := 0 var currTbl table.Table currSchema := sessiontxn.GetTxnManager(tk2.Session()).GetTxnInfoSchema() - prevTbl, err := currSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + prevTbl, err := currSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.NoError(t, err) var hookErr error testfailpoint.EnableCall(t, "github.com/pingcap/tidb/pkg/ddl/onJobRunBefore", func(job *model.Job) { @@ -2723,7 +2724,7 @@ func checkDMLInAllStates(t *testing.T, tk, tk2 *testkit.TestKit, schemaName, alt tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0")) tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0")) currSchema = sessiontxn.GetTxnManager(tk2.Session()).GetTxnInfoSchema() - currTbl, hookErr = currSchema.TableByName(context.Background(), model.NewCIStr(schemaName), model.NewCIStr("t")) + currTbl, hookErr = currSchema.TableByName(context.Background(), pmodel.NewCIStr(schemaName), pmodel.NewCIStr("t")) require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl)) // Now using previous schema version diff --git a/pkg/table/temptable/BUILD.bazel b/pkg/table/temptable/BUILD.bazel index 22984092c9d88..ff7d98f7656de 100644 --- a/pkg/table/temptable/BUILD.bazel +++ b/pkg/table/temptable/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/sessionctx", @@ -42,6 +43,7 @@ go_test( "//pkg/infoschema", "//pkg/kv", "//pkg/meta/autoid", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx", diff --git a/pkg/table/temptable/ddl.go b/pkg/table/temptable/ddl.go index 9872244359e7a..feee5890ebbb9 100644 --- a/pkg/table/temptable/ddl.go +++ b/pkg/table/temptable/ddl.go @@ -23,8 +23,9 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" @@ -36,8 +37,8 @@ import ( // TemporaryTableDDL is an interface providing ddl operations for temporary table type TemporaryTableDDL interface { CreateLocalTemporaryTable(db *model.DBInfo, info *model.TableInfo) error - DropLocalTemporaryTable(schema model.CIStr, tblName model.CIStr) error - TruncateLocalTemporaryTable(schema model.CIStr, tblName model.CIStr) error + DropLocalTemporaryTable(schema pmodel.CIStr, tblName pmodel.CIStr) error + TruncateLocalTemporaryTable(schema pmodel.CIStr, tblName pmodel.CIStr) error } // temporaryTableDDL implements temptable.TemporaryTableDDL @@ -58,7 +59,7 @@ func (d *temporaryTableDDL) CreateLocalTemporaryTable(db *model.DBInfo, info *mo return ensureLocalTemporaryTables(d.sctx).AddTable(db, tbl) } -func (d *temporaryTableDDL) DropLocalTemporaryTable(schema model.CIStr, tblName model.CIStr) error { +func (d *temporaryTableDDL) DropLocalTemporaryTable(schema pmodel.CIStr, tblName pmodel.CIStr) error { tbl, err := checkLocalTemporaryExistsAndReturn(d.sctx, schema, tblName) if err != nil { return err @@ -68,7 +69,7 @@ func (d *temporaryTableDDL) DropLocalTemporaryTable(schema model.CIStr, tblName return d.clearTemporaryTableRecords(tbl.Meta().ID) } -func (d *temporaryTableDDL) TruncateLocalTemporaryTable(schema model.CIStr, tblName model.CIStr) error { +func (d *temporaryTableDDL) TruncateLocalTemporaryTable(schema pmodel.CIStr, tblName pmodel.CIStr) error { oldTbl, err := checkLocalTemporaryExistsAndReturn(d.sctx, schema, tblName) if err != nil { return err @@ -123,7 +124,7 @@ func (d *temporaryTableDDL) clearTemporaryTableRecords(tblID int64) error { return nil } -func checkLocalTemporaryExistsAndReturn(sctx sessionctx.Context, schema model.CIStr, tblName model.CIStr) (table.Table, error) { +func checkLocalTemporaryExistsAndReturn(sctx sessionctx.Context, schema pmodel.CIStr, tblName pmodel.CIStr) (table.Table, error) { ident := ast.Ident{Schema: schema, Name: tblName} localTemporaryTables := getLocalTemporaryTables(sctx) if localTemporaryTables == nil { diff --git a/pkg/table/temptable/ddl_test.go b/pkg/table/temptable/ddl_test.go index b757ffa951f90..cc3fc5cc915ae 100644 --- a/pkg/table/temptable/ddl_test.go +++ b/pkg/table/temptable/ddl_test.go @@ -20,7 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/mockstore" @@ -63,7 +64,7 @@ func TestAddLocalTemporaryTable(t *testing.T) { require.NotNil(t, sessVars.LocalTemporaryTables) require.NotNil(t, sessVars.TemporaryTableData) require.Equal(t, int64(1), tbl1.ID) - got, exists := sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t1")) + got, exists := sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, exists) require.Equal(t, got.Meta(), tbl1) @@ -71,7 +72,7 @@ func TestAddLocalTemporaryTable(t *testing.T) { err = ddl.CreateLocalTemporaryTable(db1, tbl2) require.NoError(t, err) require.Equal(t, int64(2), tbl2.ID) - got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t2")) + got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t2")) require.True(t, exists) require.Equal(t, got.Meta(), tbl2) @@ -88,20 +89,20 @@ func TestAddLocalTemporaryTable(t *testing.T) { tbl1x := newMockTable("t1") err = ddl.CreateLocalTemporaryTable(db1, tbl1x) require.True(t, infoschema.ErrTableExists.Equal(err)) - got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t1")) + got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, exists) require.Equal(t, got.Meta(), tbl1) // insert should be success for same table name in different db err = ddl.CreateLocalTemporaryTable(db2, tbl1x) require.NoError(t, err) - got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db2"), model.NewCIStr("t1")) + got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db2"), pmodel.NewCIStr("t1")) require.Equal(t, int64(4), got.Meta().ID) require.True(t, exists) require.Equal(t, got.Meta(), tbl1x) // tbl1 still exist - got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t1")) + got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, exists) require.Equal(t, got.Meta(), tbl1) } @@ -113,7 +114,7 @@ func TestRemoveLocalTemporaryTable(t *testing.T) { db1 := newMockSchema("db1") // remove when empty - err := ddl.DropLocalTemporaryTable(model.NewCIStr("db1"), model.NewCIStr("t1")) + err := ddl.DropLocalTemporaryTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, infoschema.ErrTableNotExists.Equal(err)) // add one table @@ -126,11 +127,11 @@ func TestRemoveLocalTemporaryTable(t *testing.T) { require.NoError(t, err) // remove failed when table not found - err = ddl.DropLocalTemporaryTable(model.NewCIStr("db1"), model.NewCIStr("t2")) + err = ddl.DropLocalTemporaryTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("t2")) require.True(t, infoschema.ErrTableNotExists.Equal(err)) // remove failed when table not found (same table name in different db) - err = ddl.DropLocalTemporaryTable(model.NewCIStr("db2"), model.NewCIStr("t1")) + err = ddl.DropLocalTemporaryTable(pmodel.NewCIStr("db2"), pmodel.NewCIStr("t1")) require.True(t, infoschema.ErrTableNotExists.Equal(err)) // check failed remove should have no effects @@ -142,9 +143,9 @@ func TestRemoveLocalTemporaryTable(t *testing.T) { require.Equal(t, []byte("v1"), val) // remove success - err = ddl.DropLocalTemporaryTable(model.NewCIStr("db1"), model.NewCIStr("t1")) + err = ddl.DropLocalTemporaryTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.NoError(t, err) - got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t1")) + got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.Nil(t, got) require.False(t, exists) val, err = sessVars.TemporaryTableData.Get(context.Background(), k) @@ -159,7 +160,7 @@ func TestTruncateLocalTemporaryTable(t *testing.T) { db1 := newMockSchema("db1") // truncate when empty - err := ddl.TruncateLocalTemporaryTable(model.NewCIStr("db1"), model.NewCIStr("t1")) + err := ddl.TruncateLocalTemporaryTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, infoschema.ErrTableNotExists.Equal(err)) require.Nil(t, sessVars.LocalTemporaryTables) require.Nil(t, sessVars.TemporaryTableData) @@ -174,13 +175,13 @@ func TestTruncateLocalTemporaryTable(t *testing.T) { require.NoError(t, err) // truncate failed for table not exist - err = ddl.TruncateLocalTemporaryTable(model.NewCIStr("db1"), model.NewCIStr("t2")) + err = ddl.TruncateLocalTemporaryTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("t2")) require.True(t, infoschema.ErrTableNotExists.Equal(err)) - err = ddl.TruncateLocalTemporaryTable(model.NewCIStr("db2"), model.NewCIStr("t1")) + err = ddl.TruncateLocalTemporaryTable(pmodel.NewCIStr("db2"), pmodel.NewCIStr("t1")) require.True(t, infoschema.ErrTableNotExists.Equal(err)) // check failed should have no effects - got, exists := sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t1")) + got, exists := sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, exists) require.Equal(t, got.Meta(), tbl1) val, err := sessVars.TemporaryTableData.Get(context.Background(), k) @@ -197,9 +198,9 @@ func TestTruncateLocalTemporaryTable(t *testing.T) { require.NoError(t, err) // truncate success - err = ddl.TruncateLocalTemporaryTable(model.NewCIStr("db1"), model.NewCIStr("t1")) + err = ddl.TruncateLocalTemporaryTable(pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.NoError(t, err) - got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), model.NewCIStr("db1"), model.NewCIStr("t1")) + got, exists = sessVars.LocalTemporaryTables.(*infoschema.SessionTables).TableByName(context.Background(), pmodel.NewCIStr("db1"), pmodel.NewCIStr("t1")) require.True(t, exists) require.NotEqual(t, got.Meta(), tbl1) require.Equal(t, int64(3), got.Meta().ID) @@ -214,13 +215,13 @@ func TestTruncateLocalTemporaryTable(t *testing.T) { } func newMockTable(tblName string) *model.TableInfo { - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong)} - c2 := &model.ColumnInfo{ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeVarchar)} + c1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong)} + c2 := &model.ColumnInfo{ID: 2, Name: pmodel.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeVarchar)} - tblInfo := &model.TableInfo{Name: model.NewCIStr(tblName), Columns: []*model.ColumnInfo{c1, c2}, PKIsHandle: true} + tblInfo := &model.TableInfo{Name: pmodel.NewCIStr(tblName), Columns: []*model.ColumnInfo{c1, c2}, PKIsHandle: true} return tblInfo } func newMockSchema(schemaName string) *model.DBInfo { - return &model.DBInfo{ID: 10, Name: model.NewCIStr(schemaName), State: model.StatePublic} + return &model.DBInfo{ID: 10, Name: pmodel.NewCIStr(schemaName), State: model.StatePublic} } diff --git a/pkg/table/temptable/interceptor.go b/pkg/table/temptable/interceptor.go index ed741ceedd89a..1918e77748bc2 100644 --- a/pkg/table/temptable/interceptor.go +++ b/pkg/table/temptable/interceptor.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/driver/txn" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/table/temptable/interceptor_test.go b/pkg/table/temptable/interceptor_test.go index 59c30a939fb75..4988beff6ba33 100644 --- a/pkg/table/temptable/interceptor_test.go +++ b/pkg/table/temptable/interceptor_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/driver/txn" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" diff --git a/pkg/table/temptable/main_test.go b/pkg/table/temptable/main_test.go index dd5e5fdd26c58..359aee02463a6 100644 --- a/pkg/table/temptable/main_test.go +++ b/pkg/table/temptable/main_test.go @@ -24,7 +24,8 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/testkit/testsetup" @@ -75,10 +76,10 @@ func (is *mockedInfoSchema) TableByID(_ context.Context, tblID int64) (table.Tab tblInfo := &model.TableInfo{ ID: tblID, - Name: model.NewCIStr(fmt.Sprintf("tb%d", tblID)), + Name: pmodel.NewCIStr(fmt.Sprintf("tb%d", tblID)), Columns: []*model.ColumnInfo{{ ID: 1, - Name: model.NewCIStr("col1"), + Name: pmodel.NewCIStr("col1"), Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong), State: model.StatePublic, diff --git a/pkg/tablecodec/BUILD.bazel b/pkg/tablecodec/BUILD.bazel index c51b894fed44b..efa8ac0777c3d 100644 --- a/pkg/tablecodec/BUILD.bazel +++ b/pkg/tablecodec/BUILD.bazel @@ -8,8 +8,8 @@ go_library( deps = [ "//pkg/errno", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/structure", diff --git a/pkg/tablecodec/tablecodec.go b/pkg/tablecodec/tablecodec.go index 042f620f2e416..ff7819b6f9e01 100644 --- a/pkg/tablecodec/tablecodec.go +++ b/pkg/tablecodec/tablecodec.go @@ -26,8 +26,8 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/structure" diff --git a/pkg/testkit/BUILD.bazel b/pkg/testkit/BUILD.bazel index 4c95a1311dcc5..8fc0f08c9fddf 100644 --- a/pkg/testkit/BUILD.bazel +++ b/pkg/testkit/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "//pkg/parser/auth", "//pkg/parser/terror", "//pkg/planner/core", + "//pkg/planner/core/resolve", "//pkg/resourcemanager", "//pkg/session", "//pkg/session/txninfo", diff --git a/pkg/testkit/ddlhelper/BUILD.bazel b/pkg/testkit/ddlhelper/BUILD.bazel index 50ccd41d37437..deaaf3d50ba55 100644 --- a/pkg/testkit/ddlhelper/BUILD.bazel +++ b/pkg/testkit/ddlhelper/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/ddl", + "//pkg/meta/model", "//pkg/parser/ast", - "//pkg/parser/model", ], ) diff --git a/pkg/testkit/ddlhelper/helper.go b/pkg/testkit/ddlhelper/helper.go index fcbfb8ad442cb..fcaf79b8faf91 100644 --- a/pkg/testkit/ddlhelper/helper.go +++ b/pkg/testkit/ddlhelper/helper.go @@ -16,8 +16,8 @@ package ddlhelper import ( "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" ) // BuildTableInfoFromAST builds model.TableInfo from a SQL statement. diff --git a/pkg/testkit/testkit.go b/pkg/testkit/testkit.go index cd7a355550a66..dbc6ef83a0622 100644 --- a/pkg/testkit/testkit.go +++ b/pkg/testkit/testkit.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/session" sessiontypes "github.com/pingcap/tidb/pkg/session/types" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -687,7 +688,7 @@ func (m MockPumpClient) PullBinlogs(ctx context.Context, in *binlog.PullBinlogRe var _ sqlexec.RecordSet = &rowsRecordSet{} type rowsRecordSet struct { - fields []*ast.ResultField + fields []*resolve.ResultField rows []chunk.Row idx int @@ -696,7 +697,7 @@ type rowsRecordSet struct { err error } -func (r *rowsRecordSet) Fields() []*ast.ResultField { +func (r *rowsRecordSet) Fields() []*resolve.ResultField { return r.fields } diff --git a/pkg/timer/tablestore/BUILD.bazel b/pkg/timer/tablestore/BUILD.bazel index 1443720588261..d329d57126083 100644 --- a/pkg/timer/tablestore/BUILD.bazel +++ b/pkg/timer/tablestore/BUILD.bazel @@ -39,9 +39,9 @@ go_test( shard_count = 8, deps = [ "//pkg/kv", - "//pkg/parser/ast", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/sessionctx/variable", "//pkg/timer/api", diff --git a/pkg/timer/tablestore/sql_test.go b/pkg/timer/tablestore/sql_test.go index f2f6db8aca255..6ab4141ed4393 100644 --- a/pkg/timer/tablestore/sql_test.go +++ b/pkg/timer/tablestore/sql_test.go @@ -24,9 +24,9 @@ import ( "github.com/ngaut/pools" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/timer/api" @@ -652,7 +652,7 @@ func TestTakeSession(t *testing.T) { // Get returns a session pool.On("Get").Return(se, nil).Once() rs := &sqlexec.SimpleRecordSet{ - ResultFields: []*ast.ResultField{{ + ResultFields: []*resolve.ResultField{{ Column: &model.ColumnInfo{ FieldType: *types.NewFieldType(mysql.TypeString), }, diff --git a/pkg/ttl/cache/BUILD.bazel b/pkg/ttl/cache/BUILD.bazel index 12fc545e6b34f..f8f82617d89be 100644 --- a/pkg/ttl/cache/BUILD.bazel +++ b/pkg/ttl/cache/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/expression/contextstatic", "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/charset", "//pkg/parser/model", @@ -55,6 +56,7 @@ go_test( deps = [ "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/ttl/cache/infoschema.go b/pkg/ttl/cache/infoschema.go index c9ca50eeaa760..7f6803445e7cd 100644 --- a/pkg/ttl/cache/infoschema.go +++ b/pkg/ttl/cache/infoschema.go @@ -18,7 +18,8 @@ import ( "time" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/ttl/session" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" @@ -91,7 +92,7 @@ func (isc *InfoSchemaCache) Update(se session.Session) error { return nil } -func (isc *InfoSchemaCache) newTable(schema model.CIStr, tblInfo *model.TableInfo, +func (isc *InfoSchemaCache) newTable(schema pmodel.CIStr, tblInfo *model.TableInfo, par *model.PartitionDefinition) (*PhysicalTable, error) { id := tblInfo.ID if par != nil { @@ -105,7 +106,7 @@ func (isc *InfoSchemaCache) newTable(schema model.CIStr, tblInfo *model.TableInf } } - partitionName := model.NewCIStr("") + partitionName := pmodel.NewCIStr("") if par != nil { partitionName = par.Name } diff --git a/pkg/ttl/cache/table.go b/pkg/ttl/cache/table.go index 0763d19712cfd..2b97f6c8ef49d 100644 --- a/pkg/ttl/cache/table.go +++ b/pkg/ttl/cache/table.go @@ -26,9 +26,10 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/contextstatic" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/table/tables" @@ -98,10 +99,10 @@ type PhysicalTable struct { // ID is the physical ID of the table ID int64 // Schema is the database name of the table - Schema model.CIStr + Schema pmodel.CIStr *model.TableInfo // Partition is the partition name - Partition model.CIStr + Partition pmodel.CIStr // PartitionDef is the partition definition PartitionDef *model.PartitionDefinition // KeyColumns is the cluster index key columns for the table @@ -113,9 +114,9 @@ type PhysicalTable struct { } // NewBasePhysicalTable create a new PhysicalTable with specific timeColumn. -func NewBasePhysicalTable(schema model.CIStr, +func NewBasePhysicalTable(schema pmodel.CIStr, tbl *model.TableInfo, - partition model.CIStr, + partition pmodel.CIStr, timeColumn *model.ColumnInfo, ) (*PhysicalTable, error) { if tbl.State != model.StatePublic { @@ -166,7 +167,7 @@ func NewBasePhysicalTable(schema model.CIStr, } // NewPhysicalTable create a new PhysicalTable -func NewPhysicalTable(schema model.CIStr, tbl *model.TableInfo, partition model.CIStr) (*PhysicalTable, error) { +func NewPhysicalTable(schema pmodel.CIStr, tbl *model.TableInfo, partition pmodel.CIStr) (*PhysicalTable, error) { ttlInfo := tbl.TTLInfo if ttlInfo == nil { return nil, errors.Errorf("table '%s.%s' is not a ttl table", schema, tbl.Name) diff --git a/pkg/ttl/cache/table_test.go b/pkg/ttl/cache/table_test.go index 060884ffad17b..d26627c3d04cb 100644 --- a/pkg/ttl/cache/table_test.go +++ b/pkg/ttl/cache/table_test.go @@ -20,8 +20,9 @@ import ( "testing" "time" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/ttl/cache" "github.com/pingcap/tidb/pkg/ttl/session" @@ -94,12 +95,12 @@ func TestNewTTLTable(t *testing.T) { for _, c := range cases { is := do.InfoSchema() - tbl, err := is.TableByName(context.Background(), model.NewCIStr(c.db), model.NewCIStr(c.tbl)) + tbl, err := is.TableByName(context.Background(), pmodel.NewCIStr(c.db), pmodel.NewCIStr(c.tbl)) require.NoError(t, err) tblInfo := tbl.Meta() var physicalTbls []*cache.PhysicalTable if tblInfo.Partition == nil { - ttlTbl, err := cache.NewPhysicalTable(model.NewCIStr(c.db), tblInfo, model.NewCIStr("")) + ttlTbl, err := cache.NewPhysicalTable(pmodel.NewCIStr(c.db), tblInfo, pmodel.NewCIStr("")) if c.timeCol == "" { require.Error(t, err) continue @@ -108,7 +109,7 @@ func TestNewTTLTable(t *testing.T) { physicalTbls = append(physicalTbls, ttlTbl) } else { for _, partition := range tblInfo.Partition.Definitions { - ttlTbl, err := cache.NewPhysicalTable(model.NewCIStr(c.db), tblInfo, partition.Name) + ttlTbl, err := cache.NewPhysicalTable(pmodel.NewCIStr(c.db), tblInfo, partition.Name) if c.timeCol == "" { require.Error(t, err) continue @@ -169,10 +170,10 @@ func TestTableEvalTTLExpireTime(t *testing.T) { tk.MustExec("set @@time_zone='Asia/Tokyo'") tk.MustExec("create table test.t(a int, t datetime) ttl = `t` + interval 1 month") - tb, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tb, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblInfo := tb.Meta() - ttlTbl, err := cache.NewPhysicalTable(model.NewCIStr("test"), tblInfo, model.NewCIStr("")) + ttlTbl, err := cache.NewPhysicalTable(pmodel.NewCIStr("test"), tblInfo, pmodel.NewCIStr("")) require.NoError(t, err) se := session.NewSession(tk.Session(), tk.Session(), nil) @@ -193,10 +194,10 @@ func TestTableEvalTTLExpireTime(t *testing.T) { // should support a string format interval tk.MustExec("create table test.t2(a int, t datetime) ttl = `t` + interval '1:3' hour_minute") - tb2, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tb2, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tblInfo2 := tb2.Meta() - ttlTbl2, err := cache.NewPhysicalTable(model.NewCIStr("test"), tblInfo2, model.NewCIStr("")) + ttlTbl2, err := cache.NewPhysicalTable(pmodel.NewCIStr("test"), tblInfo2, pmodel.NewCIStr("")) require.NoError(t, err) now, err = time.ParseInLocation(time.DateTime, "2020-01-01 15:00:00", tz1) require.NoError(t, err) diff --git a/pkg/ttl/sqlbuilder/BUILD.bazel b/pkg/ttl/sqlbuilder/BUILD.bazel index 936719355c613..4096caae7dc4c 100644 --- a/pkg/ttl/sqlbuilder/BUILD.bazel +++ b/pkg/ttl/sqlbuilder/BUILD.bazel @@ -6,9 +6,9 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/ttl/sqlbuilder", visibility = ["//visibility:public"], deps = [ + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/format", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/ttl/cache", "//pkg/types", @@ -29,6 +29,7 @@ go_test( deps = [ ":sqlbuilder", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/ttl/sqlbuilder/sql.go b/pkg/ttl/sqlbuilder/sql.go index e1329bd268781..4c05679eb8c9d 100644 --- a/pkg/ttl/sqlbuilder/sql.go +++ b/pkg/ttl/sqlbuilder/sql.go @@ -23,9 +23,9 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/ttl/cache" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/ttl/sqlbuilder/sql_test.go b/pkg/ttl/sqlbuilder/sql_test.go index 1ee61285e9483..83c415eae9bde 100644 --- a/pkg/ttl/sqlbuilder/sql_test.go +++ b/pkg/ttl/sqlbuilder/sql_test.go @@ -22,9 +22,10 @@ import ( "time" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/testkit" @@ -38,19 +39,19 @@ import ( func TestEscape(t *testing.T) { tb := &cache.PhysicalTable{ - Schema: model.NewCIStr("testp;\"';123`456"), + Schema: pmodel.NewCIStr("testp;\"';123`456"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("tp\"';123`456"), + Name: pmodel.NewCIStr("tp\"';123`456"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("col1\"';123`456"), FieldType: *types.NewFieldType(mysql.TypeString)}, + {Name: pmodel.NewCIStr("col1\"';123`456"), FieldType: *types.NewFieldType(mysql.TypeString)}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time\"';123`456"), + Name: pmodel.NewCIStr("time\"';123`456"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, PartitionDef: &model.PartitionDefinition{ - Name: model.NewCIStr("p1\"';123`456"), + Name: pmodel.NewCIStr("p1\"';123`456"), }, } @@ -356,7 +357,7 @@ func TestFormatSQLDatum(t *testing.T) { sb.WriteString("\n);") tk.MustExec(sb.String()) - tbl, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) for i, c := range cases { @@ -403,43 +404,43 @@ func TestSQLBuilder(t *testing.T) { var b *sqlbuilder.SQLBuilder t1 := &cache.PhysicalTable{ - Schema: model.NewCIStr("test"), + Schema: pmodel.NewCIStr("test"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, } t2 := &cache.PhysicalTable{ - Schema: model.NewCIStr("test2"), + Schema: pmodel.NewCIStr("test2"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, - {Name: model.NewCIStr("b"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, + {Name: pmodel.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {Name: pmodel.NewCIStr("b"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, } tp := &cache.PhysicalTable{ - Schema: model.NewCIStr("testp"), + Schema: pmodel.NewCIStr("testp"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("tp"), + Name: pmodel.NewCIStr("tp"), }, KeyColumns: t1.KeyColumns, TimeColumn: t1.TimeColumn, PartitionDef: &model.PartitionDefinition{ - Name: model.NewCIStr("p1"), + Name: pmodel.NewCIStr("p1"), }, } @@ -579,31 +580,31 @@ func TestSQLBuilder(t *testing.T) { func TestScanQueryGenerator(t *testing.T) { t1 := &cache.PhysicalTable{ - Schema: model.NewCIStr("test"), + Schema: pmodel.NewCIStr("test"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, + {Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, } t2 := &cache.PhysicalTable{ - Schema: model.NewCIStr("test2"), + Schema: pmodel.NewCIStr("test2"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, - {Name: model.NewCIStr("b"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, - {Name: model.NewCIStr("c"), FieldType: types.NewFieldTypeBuilder().SetType(mysql.TypeString).SetFlag(mysql.BinaryFlag).Build()}, + {Name: pmodel.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, + {Name: pmodel.NewCIStr("b"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {Name: pmodel.NewCIStr("c"), FieldType: types.NewFieldTypeBuilder().SetType(mysql.TypeString).SetFlag(mysql.BinaryFlag).Build()}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, } @@ -863,30 +864,30 @@ func TestScanQueryGenerator(t *testing.T) { func TestBuildDeleteSQL(t *testing.T) { t1 := &cache.PhysicalTable{ - Schema: model.NewCIStr("test"), + Schema: pmodel.NewCIStr("test"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t1"), + Name: pmodel.NewCIStr("t1"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, + {Name: pmodel.NewCIStr("id"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, } t2 := &cache.PhysicalTable{ - Schema: model.NewCIStr("test2"), + Schema: pmodel.NewCIStr("test2"), TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t2"), + Name: pmodel.NewCIStr("t2"), }, KeyColumns: []*model.ColumnInfo{ - {Name: model.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, - {Name: model.NewCIStr("b"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {Name: pmodel.NewCIStr("a"), FieldType: *types.NewFieldType(mysql.TypeInt24)}, + {Name: pmodel.NewCIStr("b"), FieldType: *types.NewFieldType(mysql.TypeVarchar)}, }, TimeColumn: &model.ColumnInfo{ - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), FieldType: *types.NewFieldType(mysql.TypeDatetime), }, } diff --git a/pkg/ttl/ttlworker/BUILD.bazel b/pkg/ttl/ttlworker/BUILD.bazel index 6b69d9a73fc10..5358c4a6172c6 100644 --- a/pkg/ttl/ttlworker/BUILD.bazel +++ b/pkg/ttl/ttlworker/BUILD.bazel @@ -19,6 +19,7 @@ go_library( deps = [ "//pkg/infoschema", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/model", "//pkg/parser/terror", @@ -75,6 +76,7 @@ go_test( "//pkg/infoschema", "//pkg/infoschema/context", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/ttl/ttlworker/job_manager_integration_test.go b/pkg/ttl/ttlworker/job_manager_integration_test.go index 8458df879f146..685f5afc4be45 100644 --- a/pkg/ttl/ttlworker/job_manager_integration_test.go +++ b/pkg/ttl/ttlworker/job_manager_integration_test.go @@ -30,8 +30,9 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" dbsession "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/testkit" @@ -163,7 +164,7 @@ func TestFinishJob(t *testing.T) { sessionFactory := sessionFactory(t, store) - testTable := &cache.PhysicalTable{ID: 2, Schema: model.NewCIStr("db1"), TableInfo: &model.TableInfo{ID: 1, Name: model.NewCIStr("t1"), TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} + testTable := &cache.PhysicalTable{ID: 2, Schema: pmodel.NewCIStr("db1"), TableInfo: &model.TableInfo{ID: 1, Name: pmodel.NewCIStr("t1"), TTLInfo: &model.TTLInfo{IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitDay)}}} tk.MustExec("insert into mysql.tidb_ttl_table_status(table_id) values (2)") @@ -280,7 +281,7 @@ func TestTriggerTTLJob(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t(id int primary key, t timestamp) TTL=`t` + INTERVAL 1 DAY") - tbl, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) tblID := tbl.Meta().ID require.NoError(t, err) @@ -335,13 +336,13 @@ func TestTTLDeleteWithTimeZoneChange(t *testing.T) { tk.MustExec("set @@global.tidb_ttl_running_tasks=32") tk.MustExec("create table t1(id int primary key, t datetime) TTL=`t` + INTERVAL 1 DAY TTL_ENABLE='OFF'") - tbl1, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl1, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) tblID1 := tbl1.Meta().ID tk.MustExec("insert into t1 values(1, NOW()), (2, NOW() - INTERVAL 31 HOUR), (3, NOW() - INTERVAL 33 HOUR)") tk.MustExec("create table t2(id int primary key, t timestamp) TTL=`t` + INTERVAL 1 DAY TTL_ENABLE='OFF'") - tbl2, err := do.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t2")) + tbl2, err := do.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t2")) require.NoError(t, err) tblID2 := tbl2.Meta().ID tk.MustExec("insert into t2 values(1, NOW()), (2, NOW() - INTERVAL 31 HOUR), (3, NOW() - INTERVAL 33 HOUR)") @@ -461,7 +462,7 @@ func TestSubmitJob(t *testing.T) { "PARTITION p0 VALUES LESS THAN (10)," + "PARTITION p1 VALUES LESS THAN (100)" + ")") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ttlp1")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ttlp1")) require.NoError(t, err) tableID := table.Meta().ID var physicalID int64 @@ -537,7 +538,7 @@ func TestRescheduleJobs(t *testing.T) { now := time.Now() tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTTL) @@ -596,7 +597,7 @@ func TestRescheduleJobsAfterTableDropped(t *testing.T) { now := time.Now() createTableSQL := "create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'" tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTTL) @@ -636,7 +637,7 @@ func TestRescheduleJobsAfterTableDropped(t *testing.T) { // resume the table tk.MustExec(rb.resume) - table, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) m.DoGC(context.TODO(), se) } @@ -650,7 +651,7 @@ func TestJobTimeout(t *testing.T) { waitAndStopTTLManager(t, dom) tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) tableID := table.Meta().ID require.NoError(t, err) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTTL) @@ -718,7 +719,7 @@ func TestTriggerScanTask(t *testing.T) { waitAndStopTTLManager(t, dom) tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) tblID := tbl.Meta().ID @@ -907,7 +908,7 @@ func TestJobMetrics(t *testing.T) { waitAndStopTTLManager(t, dom) tk.MustExec("create table test.t (id int, created_at datetime) ttl = `created_at` + interval 1 minute ttl_job_interval = '1m'") - table, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + table, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTTL) @@ -1079,19 +1080,19 @@ func TestManagerJobAdapterCanSubmitJob(t *testing.T) { // not ttl table tk.MustExec("create table t1(t timestamp)") - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t1")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t1")) require.NoError(t, err) require.False(t, adapter.CanSubmitJob(tbl.Meta().ID, tbl.Meta().ID)) // ttl table tk.MustExec("create table ttl1(t timestamp) TTL=`t`+interval 1 DAY") - tbl, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ttl1")) + tbl, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ttl1")) require.NoError(t, err) require.True(t, adapter.CanSubmitJob(tbl.Meta().ID, tbl.Meta().ID)) // ttl table but disabled tk.MustExec("create table ttl2(t timestamp) TTL=`t`+interval 1 DAY TTL_ENABLE='OFF'") - tbl, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ttl2")) + tbl, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ttl2")) require.NoError(t, err) require.False(t, adapter.CanSubmitJob(tbl.Meta().ID, tbl.Meta().ID)) @@ -1100,7 +1101,7 @@ func TestManagerJobAdapterCanSubmitJob(t *testing.T) { "PARTITION p0 VALUES LESS THAN (10)," + "PARTITION p1 VALUES LESS THAN (100)" + ")") - tbl, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ttlp1")) + tbl, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ttlp1")) require.NoError(t, err) for _, def := range tbl.Meta().Partition.Definitions { require.True(t, adapter.CanSubmitJob(tbl.Meta().ID, def.ID)) @@ -1127,7 +1128,7 @@ func TestManagerJobAdapterCanSubmitJob(t *testing.T) { tk.MustExec("update mysql.tidb_ttl_task set status='finished' where job_id=?", jobID) } } - tbl, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("ttl1")) + tbl, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("ttl1")) require.NoError(t, err) require.True(t, adapter.CanSubmitJob(tbl.Meta().ID, tbl.Meta().ID)) tk.MustExec("update mysql.tidb_ttl_task set status='running' where job_id='8'") diff --git a/pkg/ttl/ttlworker/job_manager_test.go b/pkg/ttl/ttlworker/job_manager_test.go index 775744f67672f..f509d26e3dbb1 100644 --- a/pkg/ttl/ttlworker/job_manager_test.go +++ b/pkg/ttl/ttlworker/job_manager_test.go @@ -21,8 +21,9 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" timerapi "github.com/pingcap/tidb/pkg/timer/api" "github.com/pingcap/tidb/pkg/ttl/cache" @@ -369,7 +370,7 @@ func TestLockTable(t *testing.T) { oldJobExpireTime := now.Add(-time.Hour) oldJobStartTime := now.Add(-30 * time.Minute) - testPhysicalTable := &cache.PhysicalTable{ID: 1, Schema: model.NewCIStr("test"), TableInfo: &model.TableInfo{ID: 1, Name: model.NewCIStr("t1"), TTLInfo: &model.TTLInfo{ColumnName: model.NewCIStr("test"), IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitMinute), JobInterval: "1h"}}} + testPhysicalTable := &cache.PhysicalTable{ID: 1, Schema: pmodel.NewCIStr("test"), TableInfo: &model.TableInfo{ID: 1, Name: pmodel.NewCIStr("t1"), TTLInfo: &model.TTLInfo{ColumnName: pmodel.NewCIStr("test"), IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitMinute), JobInterval: "1h"}}} type executeInfo struct { sql string diff --git a/pkg/ttl/ttlworker/session_test.go b/pkg/ttl/ttlworker/session_test.go index 8b7f79a83da1b..b9022802b1814 100644 --- a/pkg/ttl/ttlworker/session_test.go +++ b/pkg/ttl/ttlworker/session_test.go @@ -25,8 +25,9 @@ import ( "github.com/ngaut/pools" "github.com/pingcap/tidb/pkg/infoschema" infoschemactx "github.com/pingcap/tidb/pkg/infoschema/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -42,18 +43,18 @@ var idAllocator atomic.Int64 func newMockTTLTbl(t *testing.T, name string) *cache.PhysicalTable { tblInfo := &model.TableInfo{ ID: idAllocator.Add(1), - Name: model.NewCIStr(name), + Name: pmodel.NewCIStr(name), Columns: []*model.ColumnInfo{ { ID: 1, - Name: model.NewCIStr("time"), + Name: pmodel.NewCIStr("time"), Offset: 0, FieldType: *types.NewFieldType(mysql.TypeDatetime), State: model.StatePublic, }, }, TTLInfo: &model.TTLInfo{ - ColumnName: model.NewCIStr("time"), + ColumnName: pmodel.NewCIStr("time"), IntervalExprStr: "1", IntervalTimeUnit: int(ast.TimeUnitSecond), Enable: true, @@ -62,7 +63,7 @@ func newMockTTLTbl(t *testing.T, name string) *cache.PhysicalTable { State: model.StatePublic, } - tbl, err := cache.NewPhysicalTable(model.NewCIStr("test"), tblInfo, model.NewCIStr("")) + tbl, err := cache.NewPhysicalTable(pmodel.NewCIStr("test"), tblInfo, pmodel.NewCIStr("")) require.NoError(t, err) return tbl } @@ -303,7 +304,7 @@ func TestValidateTTLWork(t *testing.T) { // test table name changed tbl2 = tbl.TableInfo.Clone() - tbl2.Name = model.NewCIStr("testcc") + tbl2.Name = pmodel.NewCIStr("testcc") s.sessionInfoSchema = newMockInfoSchema(tbl2) err = validateTTLWork(ctx, s, tbl, expire) require.EqualError(t, err, "[schema:1146]Table 'test.t1' doesn't exist") @@ -318,8 +319,8 @@ func TestValidateTTLWork(t *testing.T) { // test time column name changed tbl2 = tbl.TableInfo.Clone() tbl2.Columns[0] = tbl2.Columns[0].Clone() - tbl2.Columns[0].Name = model.NewCIStr("time2") - tbl2.TTLInfo.ColumnName = model.NewCIStr("time2") + tbl2.Columns[0].Name = pmodel.NewCIStr("time2") + tbl2.TTLInfo.ColumnName = pmodel.NewCIStr("time2") s.sessionInfoSchema = newMockInfoSchema(tbl2) err = validateTTLWork(ctx, s, tbl, expire) require.EqualError(t, err, "time column name changed") @@ -354,14 +355,14 @@ func TestValidateTTLWork(t *testing.T) { tp := tbl.TableInfo.Clone() tp.Partition = &model.PartitionInfo{ Definitions: []model.PartitionDefinition{ - {ID: 1023, Name: model.NewCIStr("p0")}, + {ID: 1023, Name: pmodel.NewCIStr("p0")}, }, } - tbl, err = cache.NewPhysicalTable(model.NewCIStr("test"), tp, model.NewCIStr("p0")) + tbl, err = cache.NewPhysicalTable(pmodel.NewCIStr("test"), tp, pmodel.NewCIStr("p0")) require.NoError(t, err) tbl2 = tp.Clone() tbl2.Partition = tp.Partition.Clone() - tbl2.Partition.Definitions[0].Name = model.NewCIStr("p1") + tbl2.Partition.Definitions[0].Name = pmodel.NewCIStr("p1") s.sessionInfoSchema = newMockInfoSchema(tbl2) err = validateTTLWork(ctx, s, tbl, expire) require.EqualError(t, err, "partition 'p0' is not found in ttl table 'test.t1'") diff --git a/pkg/ttl/ttlworker/timer_sync.go b/pkg/ttl/ttlworker/timer_sync.go index bc18b94241d84..92501c63f1837 100644 --- a/pkg/ttl/ttlworker/timer_sync.go +++ b/pkg/ttl/ttlworker/timer_sync.go @@ -23,8 +23,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" timerapi "github.com/pingcap/tidb/pkg/timer/api" "github.com/pingcap/tidb/pkg/ttl/cache" "github.com/pingcap/tidb/pkg/ttl/session" @@ -222,7 +223,7 @@ func (g *TTLTimersSyncer) SyncTimers(ctx context.Context, is infoschema.InfoSche } } -func (g *TTLTimersSyncer) syncTimersForTable(ctx context.Context, se session.Session, schema model.CIStr, tblInfo *model.TableInfo) []string { +func (g *TTLTimersSyncer) syncTimersForTable(ctx context.Context, se session.Session, schema pmodel.CIStr, tblInfo *model.TableInfo) []string { if tblInfo.Partition == nil { key := buildTimerKey(tblInfo, nil) if _, err := g.syncOneTimer(ctx, se, schema, tblInfo, nil, false); err != nil { @@ -244,7 +245,7 @@ func (g *TTLTimersSyncer) syncTimersForTable(ctx context.Context, se session.Ses return keys } -func (g *TTLTimersSyncer) shouldSyncTimer(timer *timerapi.TimerRecord, schema model.CIStr, tblInfo *model.TableInfo, partition *model.PartitionDefinition) bool { +func (g *TTLTimersSyncer) shouldSyncTimer(timer *timerapi.TimerRecord, schema pmodel.CIStr, tblInfo *model.TableInfo, partition *model.PartitionDefinition) bool { if timer == nil { return true } @@ -256,7 +257,7 @@ func (g *TTLTimersSyncer) shouldSyncTimer(timer *timerapi.TimerRecord, schema mo timer.SchedPolicyExpr != ttlInfo.JobInterval } -func (g *TTLTimersSyncer) syncOneTimer(ctx context.Context, se session.Session, schema model.CIStr, tblInfo *model.TableInfo, partition *model.PartitionDefinition, skipCache bool) (*timerapi.TimerRecord, error) { +func (g *TTLTimersSyncer) syncOneTimer(ctx context.Context, se session.Session, schema pmodel.CIStr, tblInfo *model.TableInfo, partition *model.PartitionDefinition, skipCache bool) (*timerapi.TimerRecord, error) { key := buildTimerKey(tblInfo, partition) tags := getTimerTags(schema, tblInfo, partition) ttlInfo := tblInfo.TTLInfo @@ -351,7 +352,7 @@ func (g *TTLTimersSyncer) syncOneTimer(ctx context.Context, se session.Session, return timer, nil } -func getTimerTags(schema model.CIStr, tblInfo *model.TableInfo, partition *model.PartitionDefinition) []string { +func getTimerTags(schema pmodel.CIStr, tblInfo *model.TableInfo, partition *model.PartitionDefinition) []string { dbTag := fmt.Sprintf("db=%s", schema.O) tblTag := fmt.Sprintf("table=%s", tblInfo.Name.O) if partition != nil { diff --git a/pkg/util/BUILD.bazel b/pkg/util/BUILD.bazel index 1b20bcd29ea1f..d4be539c6ffb4 100644 --- a/pkg/util/BUILD.bazel +++ b/pkg/util/BUILD.bazel @@ -29,6 +29,7 @@ go_library( "//pkg/config", "//pkg/infoschema/context", "//pkg/kv", + "//pkg/meta/model", "//pkg/metrics", "//pkg/parser", "//pkg/parser/auth", @@ -82,6 +83,7 @@ go_test( shard_count = 50, deps = [ "//pkg/kv", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/util/dbutil/BUILD.bazel b/pkg/util/dbutil/BUILD.bazel index 098cd4915ac0f..c6880117717d4 100644 --- a/pkg/util/dbutil/BUILD.bazel +++ b/pkg/util/dbutil/BUILD.bazel @@ -17,10 +17,10 @@ go_library( deps = [ "//pkg/errno", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/auth", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/types", "//pkg/types/parser_driver", @@ -48,8 +48,8 @@ go_test( deps = [ "//pkg/errno", "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/types", "//pkg/util/dbutil/dbutiltest", diff --git a/pkg/util/dbutil/common.go b/pkg/util/dbutil/common.go index d9928c8041006..b21564ba2ed86 100644 --- a/pkg/util/dbutil/common.go +++ b/pkg/util/dbutil/common.go @@ -29,8 +29,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" tmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" diff --git a/pkg/util/dbutil/common_test.go b/pkg/util/dbutil/common_test.go index 44290d34c8e04..0187059d7e49d 100644 --- a/pkg/util/dbutil/common_test.go +++ b/pkg/util/dbutil/common_test.go @@ -23,7 +23,7 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" pmysql "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/stretchr/testify/require" diff --git a/pkg/util/dbutil/dbutiltest/BUILD.bazel b/pkg/util/dbutil/dbutiltest/BUILD.bazel index 31486006ae64d..08609c1c0c282 100644 --- a/pkg/util/dbutil/dbutiltest/BUILD.bazel +++ b/pkg/util/dbutil/dbutiltest/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/ddl", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", diff --git a/pkg/util/dbutil/dbutiltest/utils.go b/pkg/util/dbutil/dbutiltest/utils.go index 90c75e2fe5f37..c6149398721b7 100644 --- a/pkg/util/dbutil/dbutiltest/utils.go +++ b/pkg/util/dbutil/dbutiltest/utils.go @@ -21,9 +21,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" _ "github.com/pingcap/tidb/pkg/planner/core" // to setup expression.EvalAstExpr. See: https://github.com/pingcap/tidb/blob/a94cff903cd1e7f3b050db782da84273ef5592f4/planner/core/optimizer.go#L202 "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/dbutil" @@ -60,11 +61,11 @@ func GetTableInfoBySQL(createTableSQL string, parser2 *parser.Parser) (table *mo // put primary key in indices if table.PKIsHandle { pkIndex := &model.IndexInfo{ - Name: model.NewCIStr("PRIMARY"), + Name: pmodel.NewCIStr("PRIMARY"), Primary: true, State: model.StatePublic, Unique: true, - Tp: model.IndexTypeBtree, + Tp: pmodel.IndexTypeBtree, Columns: []*model.IndexColumn{ { Name: table.GetPkName(), diff --git a/pkg/util/dbutil/index.go b/pkg/util/dbutil/index.go index 5c4fb980f0bff..1155ea3551c99 100644 --- a/pkg/util/dbutil/index.go +++ b/pkg/util/dbutil/index.go @@ -21,7 +21,7 @@ import ( "strconv" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // IndexInfo contains information of table index. diff --git a/pkg/util/dbutil/table.go b/pkg/util/dbutil/table.go index 6dfd3d62e4bab..00cf954cc4ae3 100644 --- a/pkg/util/dbutil/table.go +++ b/pkg/util/dbutil/table.go @@ -17,7 +17,7 @@ package dbutil import ( "strings" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" _ "github.com/pingcap/tidb/pkg/types/parser_driver" // for parser driver ) diff --git a/pkg/util/dbutil/table_test.go b/pkg/util/dbutil/table_test.go index 0f2a1ccbeaaf1..d9febec38c949 100644 --- a/pkg/util/dbutil/table_test.go +++ b/pkg/util/dbutil/table_test.go @@ -18,8 +18,8 @@ import ( "fmt" "testing" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/dbutil" diff --git a/pkg/util/deadlockhistory/BUILD.bazel b/pkg/util/deadlockhistory/BUILD.bazel index 25a478a4526d9..f9415cb09dd07 100644 --- a/pkg/util/deadlockhistory/BUILD.bazel +++ b/pkg/util/deadlockhistory/BUILD.bazel @@ -25,6 +25,7 @@ go_test( embed = [":deadlockhistory"], flaky = True, deps = [ + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/model", "//pkg/testkit/testsetup", diff --git a/pkg/util/deadlockhistory/deadlock_history_test.go b/pkg/util/deadlockhistory/deadlock_history_test.go index a8154b19e1ec9..65354687698eb 100644 --- a/pkg/util/deadlockhistory/deadlock_history_test.go +++ b/pkg/util/deadlockhistory/deadlock_history_test.go @@ -20,8 +20,9 @@ import ( "github.com/pingcap/kvproto/pkg/deadlock" "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/assert" @@ -196,15 +197,15 @@ func TestGetDatum(t *testing.T) { }) dummyColumnInfo := []*model.ColumnInfo{ - {Name: model.NewCIStr(ColDeadlockIDStr)}, - {Name: model.NewCIStr(ColOccurTimeStr)}, - {Name: model.NewCIStr(ColRetryableStr)}, - {Name: model.NewCIStr(ColTryLockTrxIDStr)}, - {Name: model.NewCIStr(ColCurrentSQLDigestStr)}, - {Name: model.NewCIStr(ColCurrentSQLDigestTextStr)}, - {Name: model.NewCIStr(ColKeyStr)}, - {Name: model.NewCIStr(ColKeyInfoStr)}, - {Name: model.NewCIStr(ColTrxHoldingLockStr)}, + {Name: pmodel.NewCIStr(ColDeadlockIDStr)}, + {Name: pmodel.NewCIStr(ColOccurTimeStr)}, + {Name: pmodel.NewCIStr(ColRetryableStr)}, + {Name: pmodel.NewCIStr(ColTryLockTrxIDStr)}, + {Name: pmodel.NewCIStr(ColCurrentSQLDigestStr)}, + {Name: pmodel.NewCIStr(ColCurrentSQLDigestTextStr)}, + {Name: pmodel.NewCIStr(ColKeyStr)}, + {Name: pmodel.NewCIStr(ColKeyInfoStr)}, + {Name: pmodel.NewCIStr(ColTrxHoldingLockStr)}, } res := getAllDatum(h, dummyColumnInfo) diff --git a/pkg/util/domainutil/BUILD.bazel b/pkg/util/domainutil/BUILD.bazel index 32ccbf9120e5d..81e4bf40464c0 100644 --- a/pkg/util/domainutil/BUILD.bazel +++ b/pkg/util/domainutil/BUILD.bazel @@ -5,5 +5,5 @@ go_library( srcs = ["repair_vars.go"], importpath = "github.com/pingcap/tidb/pkg/util/domainutil", visibility = ["//visibility:public"], - deps = ["//pkg/parser/model"], + deps = ["//pkg/meta/model"], ) diff --git a/pkg/util/domainutil/repair_vars.go b/pkg/util/domainutil/repair_vars.go index 1bd3bc5ff4758..07cf86f1dd14d 100644 --- a/pkg/util/domainutil/repair_vars.go +++ b/pkg/util/domainutil/repair_vars.go @@ -18,7 +18,7 @@ import ( "strings" "sync" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) type repairInfo struct { diff --git a/pkg/util/gcutil/BUILD.bazel b/pkg/util/gcutil/BUILD.bazel index 16a0fbcf4e916..8a95024685b05 100644 --- a/pkg/util/gcutil/BUILD.bazel +++ b/pkg/util/gcutil/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx", "//pkg/sessionctx/variable", "@com_github_pingcap_errors//:errors", diff --git a/pkg/util/gcutil/gcutil.go b/pkg/util/gcutil/gcutil.go index 751dfefe4ed7b..05c9e64e0d445 100644 --- a/pkg/util/gcutil/gcutil.go +++ b/pkg/util/gcutil/gcutil.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/tikv/client-go/v2/oracle" diff --git a/pkg/util/generatedexpr/BUILD.bazel b/pkg/util/generatedexpr/BUILD.bazel index 18a7323de356d..460546c8809f2 100644 --- a/pkg/util/generatedexpr/BUILD.bazel +++ b/pkg/util/generatedexpr/BUILD.bazel @@ -6,10 +6,10 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/util/generatedexpr", visibility = ["//visibility:public"], deps = [ + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/charset", - "//pkg/parser/model", "//pkg/util", "@com_github_pingcap_errors//:errors", ], diff --git a/pkg/util/generatedexpr/generated_expr.go b/pkg/util/generatedexpr/generated_expr.go index 0a407a504adec..7c966e97f4ea7 100644 --- a/pkg/util/generatedexpr/generated_expr.go +++ b/pkg/util/generatedexpr/generated_expr.go @@ -18,10 +18,10 @@ import ( "fmt" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util" ) diff --git a/pkg/util/hint/BUILD.bazel b/pkg/util/hint/BUILD.bazel index b3fb39aadfe8d..9d345f43d1ae3 100644 --- a/pkg/util/hint/BUILD.bazel +++ b/pkg/util/hint/BUILD.bazel @@ -11,6 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/errno", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/format", diff --git a/pkg/util/hint/hint.go b/pkg/util/hint/hint.go index b28e5c4dc1777..02b240711ab80 100644 --- a/pkg/util/hint/hint.go +++ b/pkg/util/hint/hint.go @@ -22,9 +22,10 @@ import ( "github.com/pingcap/errors" mysql "github.com/pingcap/tidb/pkg/errno" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/dbterror" ) @@ -290,7 +291,7 @@ func (sh *StmtHints) addHypoIndex(db, tbl, idx string, idxInfo *model.IndexInfo) // ParseStmtHints parses statement hints. func ParseStmtHints(hints []*ast.TableOptimizerHint, setVarHintChecker func(varName, hint string) (ok bool, warning error), - hypoIndexChecker func(db, tbl, col model.CIStr) (colOffset int, err error), + hypoIndexChecker func(db, tbl, col pmodel.CIStr) (colOffset int, err error), currentDB string, replicaReadFollower byte) ( // to avoid cycle import stmtHints StmtHints, offs []int, warns []error) { if len(hints) == 0 { @@ -343,12 +344,12 @@ func ParseStmtHints(hints []*ast.TableOptimizerHint, } tbl := hint.Tables[0].TableName idx := hint.Tables[1].TableName - var colNames []model.CIStr + var colNames []pmodel.CIStr var cols []*model.IndexColumn invalid := false for i := 2; i < len(hint.Tables); i++ { colNames = append(colNames, hint.Tables[i].TableName) - offset, err := hypoIndexChecker(model.NewCIStr(db), tbl, hint.Tables[i].TableName) + offset, err := hypoIndexChecker(pmodel.NewCIStr(db), tbl, hint.Tables[i].TableName) if err != nil { invalid = true warns = append(warns, errors.NewNoStackErrorf("invalid HYPO_INDEX hint: %v", err)) @@ -367,7 +368,7 @@ func ParseStmtHints(hints []*ast.TableOptimizerHint, Name: idx, Columns: cols, State: model.StatePublic, - Tp: model.IndexTypeHypo, + Tp: pmodel.IndexTypeHypo, } stmtHints.addHypoIndex(db, tbl.L, idx.L, idxInfo) case "set_var": @@ -551,18 +552,18 @@ type PlanHints struct { // HintedTable indicates which table this hint should take effect on. type HintedTable struct { - DBName model.CIStr // the database name - TblName model.CIStr // the table name - Partitions []model.CIStr // partition information - SelectOffset int // the select block offset of this hint - Matched bool // whether this hint is applied successfully + DBName pmodel.CIStr // the database name + TblName pmodel.CIStr // the table name + Partitions []pmodel.CIStr // partition information + SelectOffset int // the select block offset of this hint + Matched bool // whether this hint is applied successfully } // HintedIndex indicates which index this hint should take effect on. type HintedIndex struct { - DBName model.CIStr // the database name - TblName model.CIStr // the table name - Partitions []model.CIStr // partition information + DBName pmodel.CIStr // the database name + TblName pmodel.CIStr // the table name + Partitions []pmodel.CIStr // partition information IndexHint *ast.IndexHint // the original parser index hint structure // Matched indicates whether this index hint // has been successfully applied to a DataSource. @@ -572,7 +573,7 @@ type HintedIndex struct { } // Match checks whether the hint is matched with the given dbName and tblName. -func (hint *HintedIndex) Match(dbName, tblName model.CIStr) bool { +func (hint *HintedIndex) Match(dbName, tblName pmodel.CIStr) bool { return hint.TblName.L == tblName.L && (hint.DBName.L == dbName.L || hint.DBName.L == "*") // for universal bindings, e.g. *.t @@ -809,7 +810,7 @@ func ParsePlanHints(hints []*ast.TableOptimizerHint, case HintUseIndex, HintIgnoreIndex, HintForceIndex, HintOrderIndex, HintNoOrderIndex: dbName := hint.Tables[0].DBName if dbName.L == "" { - dbName = model.NewCIStr(currentDB) + dbName = pmodel.NewCIStr(currentDB) } var hintType ast.IndexHintType switch hint.HintName.L { @@ -835,7 +836,7 @@ func ParsePlanHints(hints []*ast.TableOptimizerHint, }, }) case HintReadFromStorage: - switch hint.HintData.(model.CIStr).L { + switch hint.HintData.(pmodel.CIStr).L { case HintTiFlash: tiflashTables = append(tiflashTables, tableNames2HintTableInfo(currentDB, hint.HintName.L, hint.Tables, hintProcessor, currentLevel, warnHandler)...) case HintTiKV: @@ -844,7 +845,7 @@ func ParsePlanHints(hints []*ast.TableOptimizerHint, case HintIndexMerge: dbName := hint.Tables[0].DBName if dbName.L == "" { - dbName = model.NewCIStr(currentDB) + dbName = pmodel.NewCIStr(currentDB) } indexMergeHintList = append(indexMergeHintList, HintedIndex{ DBName: dbName, @@ -945,7 +946,7 @@ func tableNames2HintTableInfo(currentDB, hintName string, hintTables []ast.HintT return nil } hintTableInfos := make([]HintedTable, 0, len(hintTables)) - defaultDBName := model.NewCIStr(currentDB) + defaultDBName := pmodel.NewCIStr(currentDB) isInapplicable := false for _, hintTable := range hintTables { tableInfo := HintedTable{ diff --git a/pkg/util/keydecoder/BUILD.bazel b/pkg/util/keydecoder/BUILD.bazel index bf8759d232273..8d7d53a5f03e2 100644 --- a/pkg/util/keydecoder/BUILD.bazel +++ b/pkg/util/keydecoder/BUILD.bazel @@ -8,7 +8,7 @@ go_library( deps = [ "//pkg/infoschema", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/tablecodec", "//pkg/util/logutil", "@com_github_pingcap_errors//:errors", @@ -27,6 +27,7 @@ go_test( flaky = True, deps = [ "//pkg/infoschema", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/planner/core", "//pkg/sessionctx/stmtctx", diff --git a/pkg/util/keydecoder/keydecoder.go b/pkg/util/keydecoder/keydecoder.go index 115845c29f21f..01235de7f6644 100644 --- a/pkg/util/keydecoder/keydecoder.go +++ b/pkg/util/keydecoder/keydecoder.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" diff --git a/pkg/util/keydecoder/keydecoder_test.go b/pkg/util/keydecoder/keydecoder_test.go index 380472525a0a0..e37e674882bff 100644 --- a/pkg/util/keydecoder/keydecoder_test.go +++ b/pkg/util/keydecoder/keydecoder_test.go @@ -18,7 +18,8 @@ import ( "testing" "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" _ "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/table" @@ -35,28 +36,28 @@ func TestDecodeKey(t *testing.T) { table.MockTableFromMeta = tables.MockTableFromMeta tableInfo1 := &model.TableInfo{ ID: 1, - Name: model.NewCIStr("table1"), + Name: pmodel.NewCIStr("table1"), Indices: []*model.IndexInfo{ - {ID: 1, Name: model.NewCIStr("index1"), State: model.StatePublic}, + {ID: 1, Name: pmodel.NewCIStr("index1"), State: model.StatePublic}, }, } - tableInfo2 := &model.TableInfo{ID: 2, Name: model.NewCIStr("table2")} + tableInfo2 := &model.TableInfo{ID: 2, Name: pmodel.NewCIStr("table2")} tableInfo3 := &model.TableInfo{ ID: 3, - Name: model.NewCIStr("table3"), + Name: pmodel.NewCIStr("table3"), Columns: []*model.ColumnInfo{ - {ID: 10, Name: model.NewCIStr("col"), State: model.StatePublic}, + {ID: 10, Name: pmodel.NewCIStr("col"), State: model.StatePublic}, }, Indices: []*model.IndexInfo{ - {ID: 4, Name: model.NewCIStr("index4"), State: model.StatePublic}, + {ID: 4, Name: pmodel.NewCIStr("index4"), State: model.StatePublic}, }, Partition: &model.PartitionInfo{ - Type: model.PartitionTypeRange, + Type: pmodel.PartitionTypeRange, Expr: "`col`", Enable: true, Definitions: []model.PartitionDefinition{ - {ID: 5, Name: model.NewCIStr("p0"), LessThan: []string{"10"}}, - {ID: 6, Name: model.NewCIStr("p1"), LessThan: []string{"MAXVALUE"}}, + {ID: 5, Name: pmodel.NewCIStr("p0"), LessThan: []string{"10"}}, + {ID: 6, Name: pmodel.NewCIStr("p1"), LessThan: []string{"MAXVALUE"}}, }, }, } diff --git a/pkg/util/logutil/BUILD.bazel b/pkg/util/logutil/BUILD.bazel index 88f8c13fea867..f073b405d8ce1 100644 --- a/pkg/util/logutil/BUILD.bazel +++ b/pkg/util/logutil/BUILD.bazel @@ -11,7 +11,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/util/logutil", visibility = ["//visibility:public"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_golang_protobuf//proto", "@com_github_grpc_ecosystem_go_grpc_middleware//logging/zap", "@com_github_opentracing_opentracing_go//:opentracing-go", @@ -38,7 +38,7 @@ go_test( flaky = True, deps = [ "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit/testsetup", "@com_github_google_uuid//:uuid", "@com_github_pingcap_kvproto//pkg/metapb", diff --git a/pkg/util/logutil/consistency/BUILD.bazel b/pkg/util/logutil/consistency/BUILD.bazel index f251a22a8c198..3e0c759417749 100644 --- a/pkg/util/logutil/consistency/BUILD.bazel +++ b/pkg/util/logutil/consistency/BUILD.bazel @@ -8,7 +8,7 @@ go_library( deps = [ "//pkg/errno", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/store/helper", "//pkg/tablecodec", "//pkg/types", diff --git a/pkg/util/logutil/consistency/reporter.go b/pkg/util/logutil/consistency/reporter.go index 0727bc0ce4579..7aeb63c798441 100644 --- a/pkg/util/logutil/consistency/reporter.go +++ b/pkg/util/logutil/consistency/reporter.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/store/helper" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/util/logutil/log.go b/pkg/util/logutil/log.go index 5f7fee6c28a5d..9ae1b0d44f027 100644 --- a/pkg/util/logutil/log.go +++ b/pkg/util/logutil/log.go @@ -28,7 +28,7 @@ import ( tlog "github.com/opentracing/opentracing-go/log" "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" "go.uber.org/zap/zapcore" diff --git a/pkg/util/logutil/log_test.go b/pkg/util/logutil/log_test.go index c39eac811c5a3..428fefc478b00 100644 --- a/pkg/util/logutil/log_test.go +++ b/pkg/util/logutil/log_test.go @@ -28,7 +28,7 @@ import ( "github.com/google/uuid" "github.com/pingcap/log" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zapcore" diff --git a/pkg/util/misc.go b/pkg/util/misc.go index c36dc284f1c1a..2c0d2ae16429e 100644 --- a/pkg/util/misc.go +++ b/pkg/util/misc.go @@ -40,9 +40,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/config" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/util/collate" @@ -179,11 +180,11 @@ func SyntaxWarn(err error) error { var ( // InformationSchemaName is the `INFORMATION_SCHEMA` database name. - InformationSchemaName = model.NewCIStr("INFORMATION_SCHEMA") + InformationSchemaName = pmodel.NewCIStr("INFORMATION_SCHEMA") // PerformanceSchemaName is the `PERFORMANCE_SCHEMA` database name. - PerformanceSchemaName = model.NewCIStr("PERFORMANCE_SCHEMA") + PerformanceSchemaName = pmodel.NewCIStr("PERFORMANCE_SCHEMA") // MetricSchemaName is the `METRICS_SCHEMA` database name. - MetricSchemaName = model.NewCIStr("METRICS_SCHEMA") + MetricSchemaName = pmodel.NewCIStr("METRICS_SCHEMA") // ClusterTableInstanceColumnName is the `INSTANCE` column name of the cluster table. ClusterTableInstanceColumnName = "INSTANCE" ) @@ -448,7 +449,7 @@ func init() { } // GetSequenceByName could be used in expression package without import cycle problem. -var GetSequenceByName func(is infoschema.MetaOnlyInfoSchema, schema, sequence model.CIStr) (SequenceTable, error) +var GetSequenceByName func(is infoschema.MetaOnlyInfoSchema, schema, sequence pmodel.CIStr) (SequenceTable, error) // SequenceTable is implemented by tableCommon, // and it is specialised in handling sequence operation. diff --git a/pkg/util/misc_test.go b/pkg/util/misc_test.go index 27550fadfa8fa..4908ccd50667a 100644 --- a/pkg/util/misc_test.go +++ b/pkg/util/misc_test.go @@ -21,8 +21,9 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" @@ -155,7 +156,7 @@ func TestBasicFuncRandomBuf(t *testing.T) { func TestToPB(t *testing.T) { column := &model.ColumnInfo{ ID: 1, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Offset: 0, DefaultValue: 0, FieldType: *types.NewFieldType(0), @@ -165,7 +166,7 @@ func TestToPB(t *testing.T) { column2 := &model.ColumnInfo{ ID: 1, - Name: model.NewCIStr("c"), + Name: pmodel.NewCIStr("c"), Offset: 0, DefaultValue: 0, FieldType: *types.NewFieldType(0), diff --git a/pkg/util/mock/BUILD.bazel b/pkg/util/mock/BUILD.bazel index a9dc5cb09a2ba..3c14502716741 100644 --- a/pkg/util/mock/BUILD.bazel +++ b/pkg/util/mock/BUILD.bazel @@ -18,10 +18,12 @@ go_library( "//pkg/extension", "//pkg/infoschema/context", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/terror", "//pkg/planner/context", + "//pkg/planner/core/resolve", "//pkg/session/cursor", "//pkg/sessionctx", "//pkg/sessionctx/sessionstates", diff --git a/pkg/util/mock/context.go b/pkg/util/mock/context.go index 1c87a2678afe9..0d7dc5a698e21 100644 --- a/pkg/util/mock/context.go +++ b/pkg/util/mock/context.go @@ -28,10 +28,12 @@ import ( "github.com/pingcap/tidb/pkg/extension" infoschema "github.com/pingcap/tidb/pkg/infoschema/context" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" planctx "github.com/pingcap/tidb/pkg/planner/context" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/session/cursor" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/sessionstates" @@ -161,12 +163,12 @@ func (*Context) ParseWithParams(_ context.Context, _ string, _ ...any) (ast.Stmt } // ExecRestrictedStmt implements sqlexec.RestrictedSQLExecutor ExecRestrictedStmt interface. -func (*Context) ExecRestrictedStmt(_ context.Context, _ ast.StmtNode, _ ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*ast.ResultField, error) { +func (*Context) ExecRestrictedStmt(_ context.Context, _ ast.StmtNode, _ ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*resolve.ResultField, error) { return nil, nil, errors.Errorf("Not Supported") } // ExecRestrictedSQL implements sqlexec.RestrictedSQLExecutor ExecRestrictedSQL interface. -func (*Context) ExecRestrictedSQL(_ context.Context, _ []sqlexec.OptionFuncAlias, _ string, _ ...any) ([]chunk.Row, []*ast.ResultField, error) { +func (*Context) ExecRestrictedSQL(_ context.Context, _ []sqlexec.OptionFuncAlias, _ string, _ ...any) ([]chunk.Row, []*resolve.ResultField, error) { return nil, nil, errors.Errorf("Not Supported") } @@ -506,8 +508,8 @@ func (*Context) ReleaseTableLockByTableIDs(_ []int64) { } // CheckTableLocked implements the sessionctx.Context interface. -func (*Context) CheckTableLocked(_ int64) (bool, model.TableLockType) { - return false, model.TableLockNone +func (*Context) CheckTableLocked(_ int64) (bool, pmodel.TableLockType) { + return false, pmodel.TableLockNone } // GetAllTableLocks implements the sessionctx.Context interface. diff --git a/pkg/util/ranger/BUILD.bazel b/pkg/util/ranger/BUILD.bazel index 7790e2da046d8..e2d98b515f10c 100644 --- a/pkg/util/ranger/BUILD.bazel +++ b/pkg/util/ranger/BUILD.bazel @@ -15,10 +15,10 @@ go_library( "//pkg/errctx", "//pkg/expression", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/ast", "//pkg/parser/charset", "//pkg/parser/format", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/context", diff --git a/pkg/util/ranger/detacher.go b/pkg/util/ranger/detacher.go index 38292d475d52f..d7ae0d07b56df 100644 --- a/pkg/util/ranger/detacher.go +++ b/pkg/util/ranger/detacher.go @@ -19,8 +19,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/util/rowDecoder/BUILD.bazel b/pkg/util/rowDecoder/BUILD.bazel index 4afcd1eec0de9..f5f541bc65d39 100644 --- a/pkg/util/rowDecoder/BUILD.bazel +++ b/pkg/util/rowDecoder/BUILD.bazel @@ -9,7 +9,7 @@ go_library( "//pkg/expression", "//pkg/expression/context", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/table", "//pkg/table/tables", "//pkg/tablecodec", @@ -31,6 +31,7 @@ go_test( ":rowDecoder", "//pkg/expression", "//pkg/kv", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/planner/core", diff --git a/pkg/util/rowDecoder/decoder.go b/pkg/util/rowDecoder/decoder.go index 330ffa2771617..b42973db34396 100644 --- a/pkg/util/rowDecoder/decoder.go +++ b/pkg/util/rowDecoder/decoder.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" exprctx "github.com/pingcap/tidb/pkg/expression/context" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/util/rowDecoder/decoder_test.go b/pkg/util/rowDecoder/decoder_test.go index b51ebed181638..152e8387a394d 100644 --- a/pkg/util/rowDecoder/decoder_test.go +++ b/pkg/util/rowDecoder/decoder_test.go @@ -20,7 +20,8 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" _ "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" @@ -38,13 +39,13 @@ import ( func TestRowDecoder(t *testing.T) { defer view.Stop() - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong)} - c2 := &model.ColumnInfo{ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeVarchar)} - c3 := &model.ColumnInfo{ID: 3, Name: model.NewCIStr("c3"), State: model.StatePublic, Offset: 2, FieldType: *types.NewFieldType(mysql.TypeNewDecimal)} - c4 := &model.ColumnInfo{ID: 4, Name: model.NewCIStr("c4"), State: model.StatePublic, Offset: 3, FieldType: *types.NewFieldType(mysql.TypeTimestamp)} - c5 := &model.ColumnInfo{ID: 5, Name: model.NewCIStr("c5"), State: model.StatePublic, Offset: 4, FieldType: *types.NewFieldType(mysql.TypeDuration), OriginDefaultValue: "02:00:02"} - c6 := &model.ColumnInfo{ID: 6, Name: model.NewCIStr("c6"), State: model.StatePublic, Offset: 5, FieldType: *types.NewFieldType(mysql.TypeTimestamp), GeneratedExprString: "c4+c5"} - c7 := &model.ColumnInfo{ID: 7, Name: model.NewCIStr("c7"), State: model.StatePublic, Offset: 6, FieldType: *types.NewFieldType(mysql.TypeLonglong)} + c1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong)} + c2 := &model.ColumnInfo{ID: 2, Name: pmodel.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeVarchar)} + c3 := &model.ColumnInfo{ID: 3, Name: pmodel.NewCIStr("c3"), State: model.StatePublic, Offset: 2, FieldType: *types.NewFieldType(mysql.TypeNewDecimal)} + c4 := &model.ColumnInfo{ID: 4, Name: pmodel.NewCIStr("c4"), State: model.StatePublic, Offset: 3, FieldType: *types.NewFieldType(mysql.TypeTimestamp)} + c5 := &model.ColumnInfo{ID: 5, Name: pmodel.NewCIStr("c5"), State: model.StatePublic, Offset: 4, FieldType: *types.NewFieldType(mysql.TypeDuration), OriginDefaultValue: "02:00:02"} + c6 := &model.ColumnInfo{ID: 6, Name: pmodel.NewCIStr("c6"), State: model.StatePublic, Offset: 5, FieldType: *types.NewFieldType(mysql.TypeTimestamp), GeneratedExprString: "c4+c5"} + c7 := &model.ColumnInfo{ID: 7, Name: pmodel.NewCIStr("c7"), State: model.StatePublic, Offset: 6, FieldType: *types.NewFieldType(mysql.TypeLonglong)} c7.AddFlag(mysql.PriKeyFlag) cols := []*model.ColumnInfo{c1, c2, c3, c4, c5, c6, c7} @@ -148,14 +149,14 @@ func TestRowDecoder(t *testing.T) { func TestClusterIndexRowDecoder(t *testing.T) { defer view.Stop() - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong)} - c2 := &model.ColumnInfo{ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeVarchar)} - c3 := &model.ColumnInfo{ID: 3, Name: model.NewCIStr("c3"), State: model.StatePublic, Offset: 2, FieldType: *types.NewFieldType(mysql.TypeNewDecimal)} + c1 := &model.ColumnInfo{ID: 1, Name: pmodel.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeLonglong)} + c2 := &model.ColumnInfo{ID: 2, Name: pmodel.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeVarchar)} + c3 := &model.ColumnInfo{ID: 3, Name: pmodel.NewCIStr("c3"), State: model.StatePublic, Offset: 2, FieldType: *types.NewFieldType(mysql.TypeNewDecimal)} c1.AddFlag(mysql.PriKeyFlag) c2.AddFlag(mysql.PriKeyFlag) - pk := &model.IndexInfo{ID: 1, Name: model.NewCIStr("primary"), State: model.StatePublic, Primary: true, Columns: []*model.IndexColumn{ - {Name: model.NewCIStr("c1"), Offset: 0}, - {Name: model.NewCIStr("c2"), Offset: 1}, + pk := &model.IndexInfo{ID: 1, Name: pmodel.NewCIStr("primary"), State: model.StatePublic, Primary: true, Columns: []*model.IndexColumn{ + {Name: pmodel.NewCIStr("c1"), Offset: 0}, + {Name: pmodel.NewCIStr("c2"), Offset: 1}, }} cols := []*model.ColumnInfo{c1, c2, c3} diff --git a/pkg/util/rowcodec/BUILD.bazel b/pkg/util/rowcodec/BUILD.bazel index 22f366b5ac02e..c4f65ed621f92 100644 --- a/pkg/util/rowcodec/BUILD.bazel +++ b/pkg/util/rowcodec/BUILD.bazel @@ -12,7 +12,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/parser/types", "//pkg/types", @@ -35,7 +35,7 @@ go_test( flaky = True, deps = [ "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/parser/mysql", "//pkg/sessionctx/stmtctx", "//pkg/tablecodec", diff --git a/pkg/util/rowcodec/bench_test.go b/pkg/util/rowcodec/bench_test.go index ebd601a8b9c23..cfc78e3434fcd 100644 --- a/pkg/util/rowcodec/bench_test.go +++ b/pkg/util/rowcodec/bench_test.go @@ -19,7 +19,7 @@ import ( "time" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/util/rowcodec/common.go b/pkg/util/rowcodec/common.go index 518dd16fa9681..1732af635065e 100644 --- a/pkg/util/rowcodec/common.go +++ b/pkg/util/rowcodec/common.go @@ -23,7 +23,7 @@ import ( "unsafe" "github.com/pingcap/errors" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/types" data "github.com/pingcap/tidb/pkg/types" diff --git a/pkg/util/rowcodec/decoder.go b/pkg/util/rowcodec/decoder.go index 2d633853cc422..be1b46210d32b 100644 --- a/pkg/util/rowcodec/decoder.go +++ b/pkg/util/rowcodec/decoder.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" diff --git a/pkg/util/rowcodec/rowcodec_test.go b/pkg/util/rowcodec/rowcodec_test.go index b9837c3b20bb1..e6c0e106eb1eb 100644 --- a/pkg/util/rowcodec/rowcodec_test.go +++ b/pkg/util/rowcodec/rowcodec_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/pkg/util/schemacmp/BUILD.bazel b/pkg/util/schemacmp/BUILD.bazel index 1399538d41388..3a3087abfa191 100644 --- a/pkg/util/schemacmp/BUILD.bazel +++ b/pkg/util/schemacmp/BUILD.bazel @@ -11,6 +11,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/util/schemacmp", visibility = ["//visibility:public"], deps = [ + "//pkg/meta/model", "//pkg/parser/charset", "//pkg/parser/format", "//pkg/parser/model", @@ -32,9 +33,9 @@ go_test( deps = [ ":schemacmp", "//pkg/ddl", + "//pkg/meta/model", "//pkg/parser", "//pkg/parser/ast", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/planner", "//pkg/sessionctx", diff --git a/pkg/util/schemacmp/table.go b/pkg/util/schemacmp/table.go index 95ccb59e2d0aa..e0135c36305c8 100644 --- a/pkg/util/schemacmp/table.go +++ b/pkg/util/schemacmp/table.go @@ -19,8 +19,9 @@ import ( "slices" "strings" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/format" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/types" ) @@ -114,7 +115,7 @@ func encodeImplicitPrimaryKeyToLattice(ci *model.ColumnInfo) Tuple { EqualitySingleton(indexColumnSlice{indexColumn{colName: ci.Name.L, length: types.UnspecifiedLength}}), Bool(false), Bool(false), - Singleton(model.IndexTypeBtree), + Singleton(pmodel.IndexTypeBtree), } } @@ -132,7 +133,7 @@ func restoreIndexInfoFromUnwrapped(ctx *format.RestoreCtx, index []any, keyName ctx.WriteName(keyName) } - if tp := index[indexInfoTupleIndexType].(model.IndexType); tp != model.IndexTypeBtree { + if tp := index[indexInfoTupleIndexType].(pmodel.IndexType); tp != pmodel.IndexTypeBtree { ctx.WriteKeyWord(" USING ") ctx.WriteKeyWord(tp.String()) } diff --git a/pkg/util/schemacmp/table_test.go b/pkg/util/schemacmp/table_test.go index 670c9600f3252..cd974a34d234b 100644 --- a/pkg/util/schemacmp/table_test.go +++ b/pkg/util/schemacmp/table_test.go @@ -20,9 +20,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" _ "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/sessionctx" diff --git a/pkg/util/sqlexec/BUILD.bazel b/pkg/util/sqlexec/BUILD.bazel index 565ebd18615fe..28c05ae6b02e0 100644 --- a/pkg/util/sqlexec/BUILD.bazel +++ b/pkg/util/sqlexec/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/terror", + "//pkg/planner/core/resolve", "//pkg/sessionctx/sysproctrack", "//pkg/sessionctx/variable", "//pkg/types", diff --git a/pkg/util/sqlexec/mock/BUILD.bazel b/pkg/util/sqlexec/mock/BUILD.bazel index 6a4e668ccae27..b4504e53e2791 100644 --- a/pkg/util/sqlexec/mock/BUILD.bazel +++ b/pkg/util/sqlexec/mock/BUILD.bazel @@ -10,6 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/parser/ast", + "//pkg/planner/core/resolve", "//pkg/sessionctx", "//pkg/util/chunk", "//pkg/util/mock", diff --git a/pkg/util/sqlexec/mock/restricted_sql_executor_mock.go b/pkg/util/sqlexec/mock/restricted_sql_executor_mock.go index 7db258957e7ab..0ee7f88c8b64b 100644 --- a/pkg/util/sqlexec/mock/restricted_sql_executor_mock.go +++ b/pkg/util/sqlexec/mock/restricted_sql_executor_mock.go @@ -14,6 +14,7 @@ import ( reflect "reflect" ast "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" chunk "github.com/pingcap/tidb/pkg/util/chunk" sqlexec "github.com/pingcap/tidb/pkg/util/sqlexec" gomock "go.uber.org/mock/gomock" @@ -48,7 +49,7 @@ func (m *MockRestrictedSQLExecutor) ISGOMOCK() struct{} { } // ExecRestrictedSQL mocks base method. -func (m *MockRestrictedSQLExecutor) ExecRestrictedSQL(arg0 context.Context, arg1 []func(*sqlexec.ExecOption), arg2 string, arg3 ...any) ([]chunk.Row, []*ast.ResultField, error) { +func (m *MockRestrictedSQLExecutor) ExecRestrictedSQL(arg0 context.Context, arg1 []func(*sqlexec.ExecOption), arg2 string, arg3 ...any) ([]chunk.Row, []*resolve.ResultField, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1, arg2} for _, a := range arg3 { @@ -56,7 +57,7 @@ func (m *MockRestrictedSQLExecutor) ExecRestrictedSQL(arg0 context.Context, arg1 } ret := m.ctrl.Call(m, "ExecRestrictedSQL", varargs...) ret0, _ := ret[0].([]chunk.Row) - ret1, _ := ret[1].([]*ast.ResultField) + ret1, _ := ret[1].([]*resolve.ResultField) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } @@ -69,7 +70,7 @@ func (mr *MockRestrictedSQLExecutorMockRecorder) ExecRestrictedSQL(arg0, arg1, a } // ExecRestrictedStmt mocks base method. -func (m *MockRestrictedSQLExecutor) ExecRestrictedStmt(arg0 context.Context, arg1 ast.StmtNode, arg2 ...func(*sqlexec.ExecOption)) ([]chunk.Row, []*ast.ResultField, error) { +func (m *MockRestrictedSQLExecutor) ExecRestrictedStmt(arg0 context.Context, arg1 ast.StmtNode, arg2 ...func(*sqlexec.ExecOption)) ([]chunk.Row, []*resolve.ResultField, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1} for _, a := range arg2 { @@ -77,7 +78,7 @@ func (m *MockRestrictedSQLExecutor) ExecRestrictedStmt(arg0 context.Context, arg } ret := m.ctrl.Call(m, "ExecRestrictedStmt", varargs...) ret0, _ := ret[0].([]chunk.Row) - ret1, _ := ret[1].([]*ast.ResultField) + ret1, _ := ret[1].([]*resolve.ResultField) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } diff --git a/pkg/util/sqlexec/restricted_sql_executor.go b/pkg/util/sqlexec/restricted_sql_executor.go index a1dd7142afa0c..72b05bf3f29b4 100644 --- a/pkg/util/sqlexec/restricted_sql_executor.go +++ b/pkg/util/sqlexec/restricted_sql_executor.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/sessionctx/sysproctrack" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/chunk" @@ -48,9 +49,9 @@ type RestrictedSQLExecutor interface { // This function only saves you from processing potentially unsafe parameters. ParseWithParams(ctx context.Context, sql string, args ...any) (ast.StmtNode, error) // ExecRestrictedStmt run sql statement in ctx with some restrictions. - ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...OptionFuncAlias) ([]chunk.Row, []*ast.ResultField, error) + ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...OptionFuncAlias) ([]chunk.Row, []*resolve.ResultField, error) // ExecRestrictedSQL run sql string in ctx with internal session. - ExecRestrictedSQL(ctx context.Context, opts []OptionFuncAlias, sql string, args ...any) ([]chunk.Row, []*ast.ResultField, error) + ExecRestrictedSQL(ctx context.Context, opts []OptionFuncAlias, sql string, args ...any) ([]chunk.Row, []*resolve.ResultField, error) } // ExecOption is a struct defined for ExecRestrictedStmt/SQL option. @@ -186,7 +187,7 @@ type Statement interface { // RecordSet is an abstract result set interface to help get data from Plan. type RecordSet interface { // Fields gets result fields. - Fields() []*ast.ResultField + Fields() []*resolve.ResultField // Next reads records into chunk. Next(ctx context.Context, req *chunk.Chunk) error diff --git a/pkg/util/sqlexec/simple_record_set.go b/pkg/util/sqlexec/simple_record_set.go index 716d350eb0065..0236b151b5ba8 100644 --- a/pkg/util/sqlexec/simple_record_set.go +++ b/pkg/util/sqlexec/simple_record_set.go @@ -17,21 +17,21 @@ package sqlexec import ( "context" - "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" ) // SimpleRecordSet is a simple implementation of RecordSet. All values are known when creating SimpleRecordSet. type SimpleRecordSet struct { - ResultFields []*ast.ResultField + ResultFields []*resolve.ResultField Rows [][]any MaxChunkSize int idx int } // Fields implements the sqlexec.RecordSet interface. -func (r *SimpleRecordSet) Fields() []*ast.ResultField { +func (r *SimpleRecordSet) Fields() []*resolve.ResultField { return r.ResultFields } diff --git a/pkg/util/stmtsummary/BUILD.bazel b/pkg/util/stmtsummary/BUILD.bazel index 0c57f7db72af9..efa5bc123b98c 100644 --- a/pkg/util/stmtsummary/BUILD.bazel +++ b/pkg/util/stmtsummary/BUILD.bazel @@ -10,8 +10,8 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/util/stmtsummary", visibility = ["//visibility:public"], deps = [ + "//pkg/meta/model", "//pkg/parser/auth", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx/stmtctx", "//pkg/types", @@ -41,6 +41,7 @@ go_test( flaky = True, shard_count = 24, deps = [ + "//pkg/meta/model", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/parser/mysql", diff --git a/pkg/util/stmtsummary/reader.go b/pkg/util/stmtsummary/reader.go index fd33210e70733..df7682484051c 100644 --- a/pkg/util/stmtsummary/reader.go +++ b/pkg/util/stmtsummary/reader.go @@ -19,8 +19,8 @@ import ( "strings" "time" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/logutil" diff --git a/pkg/util/stmtsummary/statement_summary_test.go b/pkg/util/stmtsummary/statement_summary_test.go index 67929b23a1afd..433b4c8a589ab 100644 --- a/pkg/util/stmtsummary/statement_summary_test.go +++ b/pkg/util/stmtsummary/statement_summary_test.go @@ -22,8 +22,9 @@ import ( "testing" "time" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" @@ -803,7 +804,7 @@ func newStmtSummaryReaderForTest(ssMap *stmtSummaryByDigestMap) *stmtSummaryRead for i := range columnNames { cols[i] = &model.ColumnInfo{ ID: int64(i), - Name: model.NewCIStr(columnNames[i]), + Name: pmodel.NewCIStr(columnNames[i]), Offset: i, } } diff --git a/pkg/util/stmtsummary/v2/BUILD.bazel b/pkg/util/stmtsummary/v2/BUILD.bazel index b6657548db165..cb98ce2f6265e 100644 --- a/pkg/util/stmtsummary/v2/BUILD.bazel +++ b/pkg/util/stmtsummary/v2/BUILD.bazel @@ -13,8 +13,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/config", + "//pkg/meta/model", "//pkg/parser/auth", - "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx/stmtctx", "//pkg/types", @@ -50,6 +50,7 @@ go_test( flaky = True, shard_count = 13, deps = [ + "//pkg/meta/model", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/testkit/testsetup", diff --git a/pkg/util/stmtsummary/v2/column.go b/pkg/util/stmtsummary/v2/column.go index 0ae3dfb3ed99b..641fe3956dfca 100644 --- a/pkg/util/stmtsummary/v2/column.go +++ b/pkg/util/stmtsummary/v2/column.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/logutil" diff --git a/pkg/util/stmtsummary/v2/column_test.go b/pkg/util/stmtsummary/v2/column_test.go index 9e7af9cc09805..f2b613d3e708c 100644 --- a/pkg/util/stmtsummary/v2/column_test.go +++ b/pkg/util/stmtsummary/v2/column_test.go @@ -19,23 +19,24 @@ import ( "testing" "time" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) func TestColumn(t *testing.T) { columns := []*model.ColumnInfo{ - {Name: model.NewCIStr(ClusterTableInstanceColumnNameStr)}, - {Name: model.NewCIStr(StmtTypeStr)}, - {Name: model.NewCIStr(SchemaNameStr)}, - {Name: model.NewCIStr(DigestStr)}, - {Name: model.NewCIStr(DigestTextStr)}, - {Name: model.NewCIStr(TableNamesStr)}, - {Name: model.NewCIStr(IndexNamesStr)}, - {Name: model.NewCIStr(SampleUserStr)}, - {Name: model.NewCIStr(ExecCountStr)}, - {Name: model.NewCIStr(SumLatencyStr)}, - {Name: model.NewCIStr(MaxLatencyStr)}, + {Name: pmodel.NewCIStr(ClusterTableInstanceColumnNameStr)}, + {Name: pmodel.NewCIStr(StmtTypeStr)}, + {Name: pmodel.NewCIStr(SchemaNameStr)}, + {Name: pmodel.NewCIStr(DigestStr)}, + {Name: pmodel.NewCIStr(DigestTextStr)}, + {Name: pmodel.NewCIStr(TableNamesStr)}, + {Name: pmodel.NewCIStr(IndexNamesStr)}, + {Name: pmodel.NewCIStr(SampleUserStr)}, + {Name: pmodel.NewCIStr(ExecCountStr)}, + {Name: pmodel.NewCIStr(SumLatencyStr)}, + {Name: pmodel.NewCIStr(MaxLatencyStr)}, } factories := makeColumnFactories(columns) info := GenerateStmtExecInfo4Test("digest") diff --git a/pkg/util/stmtsummary/v2/reader.go b/pkg/util/stmtsummary/v2/reader.go index f5e7c9742a6c8..c33b5a3327a44 100644 --- a/pkg/util/stmtsummary/v2/reader.go +++ b/pkg/util/stmtsummary/v2/reader.go @@ -29,8 +29,8 @@ import ( "time" "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" diff --git a/pkg/util/stmtsummary/v2/reader_test.go b/pkg/util/stmtsummary/v2/reader_test.go index 533bc7ba2ba72..cf5dded90c7ad 100644 --- a/pkg/util/stmtsummary/v2/reader_test.go +++ b/pkg/util/stmtsummary/v2/reader_test.go @@ -22,8 +22,9 @@ import ( "testing" "time" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/set" @@ -227,8 +228,8 @@ func TestMemReader(t *testing.T) { timeLocation, err := time.LoadLocation("Asia/Shanghai") require.NoError(t, err) columns := []*model.ColumnInfo{ - {Name: model.NewCIStr(DigestStr)}, - {Name: model.NewCIStr(ExecCountStr)}, + {Name: pmodel.NewCIStr(DigestStr)}, + {Name: pmodel.NewCIStr(ExecCountStr)}, } ss := NewStmtSummary4Test(3) @@ -281,8 +282,8 @@ func TestHistoryReader(t *testing.T) { timeLocation, err := time.LoadLocation("Asia/Shanghai") require.NoError(t, err) columns := []*model.ColumnInfo{ - {Name: model.NewCIStr(DigestStr)}, - {Name: model.NewCIStr(ExecCountStr)}, + {Name: pmodel.NewCIStr(DigestStr)}, + {Name: pmodel.NewCIStr(ExecCountStr)}, } func() { @@ -429,8 +430,8 @@ func TestHistoryReaderInvalidLine(t *testing.T) { timeLocation, err := time.LoadLocation("Asia/Shanghai") require.NoError(t, err) columns := []*model.ColumnInfo{ - {Name: model.NewCIStr(DigestStr)}, - {Name: model.NewCIStr(ExecCountStr)}, + {Name: pmodel.NewCIStr(DigestStr)}, + {Name: pmodel.NewCIStr(ExecCountStr)}, } reader, err := NewHistoryReader(context.Background(), columns, "", timeLocation, nil, false, nil, nil, 2) diff --git a/pkg/util/tableutil/BUILD.bazel b/pkg/util/tableutil/BUILD.bazel index bafed296d54ae..bc0fca534f478 100644 --- a/pkg/util/tableutil/BUILD.bazel +++ b/pkg/util/tableutil/BUILD.bazel @@ -7,6 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/meta/autoid", - "//pkg/parser/model", + "//pkg/meta/model", ], ) diff --git a/pkg/util/tableutil/tableutil.go b/pkg/util/tableutil/tableutil.go index f8a3bf117539d..e878ec3cc6f4d 100644 --- a/pkg/util/tableutil/tableutil.go +++ b/pkg/util/tableutil/tableutil.go @@ -16,7 +16,7 @@ package tableutil import ( "github.com/pingcap/tidb/pkg/meta/autoid" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // TempTable is used to store transaction-specific or session-specific information for global / local temporary tables. diff --git a/pkg/util/tracing/BUILD.bazel b/pkg/util/tracing/BUILD.bazel index 88842d5e47532..0b88f052dd209 100644 --- a/pkg/util/tracing/BUILD.bazel +++ b/pkg/util/tracing/BUILD.bazel @@ -9,7 +9,7 @@ go_library( importpath = "github.com/pingcap/tidb/pkg/util/tracing", visibility = ["//visibility:public"], deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "@com_github_opentracing_basictracer_go//:basictracer-go", "@com_github_opentracing_opentracing_go//:opentracing-go", ], @@ -27,7 +27,7 @@ go_test( embed = [":tracing"], flaky = True, deps = [ - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit/testsetup", "@com_github_opentracing_basictracer_go//:basictracer-go", "@com_github_opentracing_opentracing_go//:opentracing-go", diff --git a/pkg/util/tracing/util.go b/pkg/util/tracing/util.go index 74e4df97dfda6..4e5346a503a78 100644 --- a/pkg/util/tracing/util.go +++ b/pkg/util/tracing/util.go @@ -20,7 +20,7 @@ import ( "github.com/opentracing/basictracer-go" "github.com/opentracing/opentracing-go" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" ) // TiDBTrace is set as Baggage on traces which are used for tidb tracing. diff --git a/pkg/util/tracing/util_test.go b/pkg/util/tracing/util_test.go index a7f825de569ea..7b4e4a1a8fc5a 100644 --- a/pkg/util/tracing/util_test.go +++ b/pkg/util/tracing/util_test.go @@ -20,7 +20,7 @@ import ( "github.com/opentracing/basictracer-go" "github.com/opentracing/opentracing-go" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/stretchr/testify/require" ) diff --git a/tests/realtikvtest/addindextest/BUILD.bazel b/tests/realtikvtest/addindextest/BUILD.bazel index d1180149494c1..2dc7716205007 100644 --- a/tests/realtikvtest/addindextest/BUILD.bazel +++ b/tests/realtikvtest/addindextest/BUILD.bazel @@ -15,7 +15,7 @@ go_test( "//pkg/config", "//pkg/ddl", "//pkg/ddl/ingest", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit", "//pkg/testkit/testfailpoint", "//tests/realtikvtest", diff --git a/tests/realtikvtest/addindextest/add_index_test.go b/tests/realtikvtest/addindextest/add_index_test.go index 86b6232f12b54..168817d760e82 100644 --- a/tests/realtikvtest/addindextest/add_index_test.go +++ b/tests/realtikvtest/addindextest/add_index_test.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/ingest" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/tests/realtikvtest" diff --git a/tests/realtikvtest/addindextest1/BUILD.bazel b/tests/realtikvtest/addindextest1/BUILD.bazel index 5560686d8c7fe..de6f27ec5ac04 100644 --- a/tests/realtikvtest/addindextest1/BUILD.bazel +++ b/tests/realtikvtest/addindextest1/BUILD.bazel @@ -13,7 +13,7 @@ go_test( "//pkg/ddl/ingest", "//pkg/disttask/framework/storage", "//pkg/errno", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testfailpoint", diff --git a/tests/realtikvtest/addindextest1/disttask_test.go b/tests/realtikvtest/addindextest1/disttask_test.go index 170c2c2746420..c5ddb4a1ac24b 100644 --- a/tests/realtikvtest/addindextest1/disttask_test.go +++ b/tests/realtikvtest/addindextest1/disttask_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/ingest" "github.com/pingcap/tidb/pkg/disttask/framework/storage" "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/tests/realtikvtest/addindextest2/BUILD.bazel b/tests/realtikvtest/addindextest2/BUILD.bazel index d41d6dc3940cc..3590e9da6f6ff 100644 --- a/tests/realtikvtest/addindextest2/BUILD.bazel +++ b/tests/realtikvtest/addindextest2/BUILD.bazel @@ -14,7 +14,7 @@ go_test( "//pkg/disttask/framework/scheduler", "//pkg/kv", "//pkg/lightning/backend/external", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/sessionctx/variable", "//pkg/store/helper", "//pkg/tablecodec", diff --git a/tests/realtikvtest/addindextest2/global_sort_test.go b/tests/realtikvtest/addindextest2/global_sort_test.go index f3d13be48cf4b..b86d7aea3244a 100644 --- a/tests/realtikvtest/addindextest2/global_sort_test.go +++ b/tests/realtikvtest/addindextest2/global_sort_test.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/disttask/framework/scheduler" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/lightning/backend/external" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/helper" "github.com/pingcap/tidb/pkg/tablecodec" diff --git a/tests/realtikvtest/addindextest3/BUILD.bazel b/tests/realtikvtest/addindextest3/BUILD.bazel index ac0fcc528e572..ec295f54d4990 100644 --- a/tests/realtikvtest/addindextest3/BUILD.bazel +++ b/tests/realtikvtest/addindextest3/BUILD.bazel @@ -21,6 +21,7 @@ go_test( "//pkg/errno", "//pkg/kv", "//pkg/lightning/backend/local", + "//pkg/meta/model", "//pkg/parser/model", "//pkg/sessionctx", "//pkg/sessionctx/variable", diff --git a/tests/realtikvtest/addindextest3/functional_test.go b/tests/realtikvtest/addindextest3/functional_test.go index 7a3e29a4d3206..0e9af59522793 100644 --- a/tests/realtikvtest/addindextest3/functional_test.go +++ b/tests/realtikvtest/addindextest3/functional_test.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/ingest" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" @@ -43,7 +44,7 @@ func TestDDLTestEstimateTableRowSize(t *testing.T) { ctx = util.WithInternalSourceType(ctx, "estimate_row_size") tkSess := tk.Session() exec := tkSess.GetRestrictedSQLExecutor() - tbl, err := dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err := dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) size := ddl.EstimateTableRowSizeForTest(ctx, store, exec, tbl) @@ -67,7 +68,7 @@ func TestDDLTestEstimateTableRowSize(t *testing.T) { tk.MustQuery("split table t between (0) and (1000000) regions 2;").Check(testkit.Rows("4 1")) tk.MustExec("set global tidb_analyze_skip_column_types=`json,blob,mediumblob,longblob`") tk.MustExec("analyze table t all columns;") - tbl, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + tbl, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("test"), pmodel.NewCIStr("t")) require.NoError(t, err) size = ddl.EstimateTableRowSizeForTest(ctx, store, exec, tbl) require.Equal(t, 19, size) diff --git a/tests/realtikvtest/addindextest3/ingest_test.go b/tests/realtikvtest/addindextest3/ingest_test.go index 808b861cd8fb3..71e9a194f6f13 100644 --- a/tests/realtikvtest/addindextest3/ingest_test.go +++ b/tests/realtikvtest/addindextest3/ingest_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/lightning/backend/local" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" diff --git a/tests/realtikvtest/addindextest3/operator_test.go b/tests/realtikvtest/addindextest3/operator_test.go index c8a805b693948..692b181d424d7 100644 --- a/tests/realtikvtest/addindextest3/operator_test.go +++ b/tests/realtikvtest/addindextest3/operator_test.go @@ -31,7 +31,8 @@ import ( "github.com/pingcap/tidb/pkg/disttask/operator" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" @@ -330,7 +331,7 @@ func prepare(t *testing.T, tk *testkit.TestKit, dom *domain.Domain, regionCnt in tk.MustQuery("select count(*) from t;").Check(testkit.Rows(fmt.Sprintf("%d", regionCnt))) var err error - tbl, err = dom.InfoSchema().TableByName(context.Background(), model.NewCIStr("op"), model.NewCIStr("t")) + tbl, err = dom.InfoSchema().TableByName(context.Background(), pmodel.NewCIStr("op"), pmodel.NewCIStr("t")) require.NoError(t, err) start = tbl.RecordPrefix() end = tbl.RecordPrefix().PrefixNext() diff --git a/tests/realtikvtest/addindextest4/BUILD.bazel b/tests/realtikvtest/addindextest4/BUILD.bazel index 014dc500e8cfc..7e4e7364ca1c9 100644 --- a/tests/realtikvtest/addindextest4/BUILD.bazel +++ b/tests/realtikvtest/addindextest4/BUILD.bazel @@ -12,7 +12,7 @@ go_test( "//pkg/config", "//pkg/domain", "//pkg/kv", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit", "//tests/realtikvtest", "@com_github_pingcap_failpoint//:failpoint", diff --git a/tests/realtikvtest/addindextest4/failure_test.go b/tests/realtikvtest/addindextest4/failure_test.go index 081eaf4bcbfa7..35e9c8b8f52ec 100644 --- a/tests/realtikvtest/addindextest4/failure_test.go +++ b/tests/realtikvtest/addindextest4/failure_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/tests/realtikvtest" "github.com/stretchr/testify/require" diff --git a/tests/realtikvtest/flashbacktest/BUILD.bazel b/tests/realtikvtest/flashbacktest/BUILD.bazel index 71984d9fd78d8..1903fddd4029c 100644 --- a/tests/realtikvtest/flashbacktest/BUILD.bazel +++ b/tests/realtikvtest/flashbacktest/BUILD.bazel @@ -13,7 +13,7 @@ go_test( "//pkg/ddl/util", "//pkg/errno", "//pkg/meta", - "//pkg/parser/model", + "//pkg/meta/model", "//pkg/testkit", "//pkg/testkit/testfailpoint", "//pkg/testkit/testsetup", diff --git a/tests/realtikvtest/flashbacktest/flashback_test.go b/tests/realtikvtest/flashbacktest/flashback_test.go index 871943eb1d0f2..4e1059cf77dce 100644 --- a/tests/realtikvtest/flashbacktest/flashback_test.go +++ b/tests/realtikvtest/flashbacktest/flashback_test.go @@ -25,7 +25,7 @@ import ( ddlutil "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/meta" - "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testfailpoint" "github.com/pingcap/tidb/pkg/types"