Skip to content

Commit

Permalink
Merge pull request #4 from hanfei1991/release-4.0-29178dfa8dbe
Browse files Browse the repository at this point in the history
refine code
  • Loading branch information
hanfei1991 authored Aug 4, 2020
2 parents 6301f01 + bf49c6f commit f9d4153
Show file tree
Hide file tree
Showing 126 changed files with 3,329 additions and 996 deletions.
6 changes: 1 addition & 5 deletions ddl/column.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,11 +125,7 @@ func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *

// Append the column info to the end of the tblInfo.Columns.
// It will reorder to the right position in "Columns" when it state change to public.
newCols := make([]*model.ColumnInfo, 0, len(cols)+1)
newCols = append(newCols, cols...)
newCols = append(newCols, colInfo)

tblInfo.Columns = newCols
tblInfo.Columns = append(cols, colInfo)
return colInfo, position, nil
}

Expand Down
10 changes: 8 additions & 2 deletions ddl/db_change_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -830,12 +830,15 @@ func (s *testStateChangeSuite) TestParallelAddPrimaryKey(c *C) {
func (s *testStateChangeSuite) TestParallelAlterAddPartition(c *C) {
sql1 := `alter table t_part add partition (
partition p2 values less than (30)
);`
sql2 := `alter table t_part add partition (
partition p3 values less than (30)
);`
f := func(c *C, err1, err2 error) {
c.Assert(err1, IsNil)
c.Assert(err2.Error(), Equals, "[ddl:1493]VALUES LESS THAN value must be strictly increasing for each partition")
}
s.testControlParallelExecSQL(c, sql1, sql1, f)
s.testControlParallelExecSQL(c, sql1, sql2, f)
}

func (s *testStateChangeSuite) TestParallelDropColumn(c *C) {
Expand Down Expand Up @@ -997,7 +1000,10 @@ func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 s
f(c, err1, err2)
}

func (s *testStateChangeSuite) TestParallelUpdateTableReplica(c *C) {
func (s *serialTestStateChangeSuite) TestParallelUpdateTableReplica(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")

ctx := context.Background()
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
Expand Down
28 changes: 24 additions & 4 deletions ddl/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1239,6 +1239,8 @@ func (s *testDBSuite1) TestCancelAddTableAndDropTablePartition(c *C) {
}{
{model.ActionAddTablePartition, model.JobStateNone, model.StateNone, true},
{model.ActionDropTablePartition, model.JobStateNone, model.StateNone, true},
// Add table partition now can be cancelled in ReplicaOnly state.
{model.ActionAddTablePartition, model.JobStateRunning, model.StateReplicaOnly, true},
{model.ActionAddTablePartition, model.JobStateRunning, model.StatePublic, false},
{model.ActionDropTablePartition, model.JobStateRunning, model.StatePublic, false},
}
Expand Down Expand Up @@ -2055,7 +2057,7 @@ func match(c *C, row []interface{}, expected ...interface{}) {
}

// TestCreateTableWithLike2 tests create table with like when refer table have non-public column/index.
func (s *testDBSuite4) TestCreateTableWithLike2(c *C) {
func (s *testSerialDBSuite) TestCreateTableWithLike2(c *C) {
s.tk = testkit.NewTestKit(c, s.store)
s.tk.MustExec("use test_db")
s.tk.MustExec("drop table if exists t1,t2;")
Expand Down Expand Up @@ -2125,6 +2127,9 @@ func (s *testDBSuite4) TestCreateTableWithLike2(c *C) {
checkTbl2()

// Test for table has tiflash replica.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")

s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook)
s.tk.MustExec("drop table if exists t1,t2;")
s.tk.MustExec("create table t1 (a int) partition by hash(a) partitions 2;")
Expand Down Expand Up @@ -2639,7 +2644,7 @@ func (s *testDBSuite3) TestFKOnGeneratedColumns(c *C) {
s.tk.MustExec("drop table t1,t2,t3,t4,t5;")
}

func (s *testDBSuite3) TestTruncateTable(c *C) {
func (s *testSerialDBSuite) TestTruncateTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table truncate_table (c1 int, c2 int)")
Expand Down Expand Up @@ -2686,6 +2691,9 @@ func (s *testDBSuite3) TestTruncateTable(c *C) {
c.Assert(hasOldTableData, IsFalse)

// Test for truncate table should clear the tiflash available status.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")

tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1 (a int);")
tk.MustExec("alter table t1 set tiflash replica 3 location labels 'a','b';")
Expand Down Expand Up @@ -3875,7 +3883,8 @@ func (s *testDBSuite1) TestModifyColumnCharset(c *C) {

}

func (s *testDBSuite1) TestSetTableFlashReplica(c *C) {
func (s *testSerialDBSuite) TestSetTableFlashReplica(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
s.tk = testkit.NewTestKit(c, s.store)
s.tk.MustExec("use test_db")
s.mustExec(c, "drop table if exists t_flash;")
Expand Down Expand Up @@ -3965,6 +3974,14 @@ func (s *testDBSuite1) TestSetTableFlashReplica(c *C) {
t, dbInfo = is.FindTableByPartitionID(t.Meta().ID)
c.Assert(t, IsNil)
c.Assert(dbInfo, IsNil)
failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")

// Test for set replica count more than the tiflash store count.
s.mustExec(c, "drop table if exists t_flash;")
s.tk.MustExec("create table t_flash(a int, b int)")
_, err = s.tk.Exec("alter table t_flash set tiflash replica 2 location labels 'a','b';")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "the tiflash replica count: 2 should be less than the total tiflash server count: 0")
}

func (s *testSerialDBSuite) TestAlterShardRowIDBits(c *C) {
Expand Down Expand Up @@ -4141,7 +4158,10 @@ func (s *testDBSuite2) TestWriteLocal(c *C) {
tk2.MustExec("unlock tables")
}

func (s *testDBSuite2) TestSkipSchemaChecker(c *C) {
func (s *testSerialDBSuite) TestSkipSchemaChecker(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil)
defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount")

s.tk = testkit.NewTestKit(c, s.store)
tk := s.tk
tk.MustExec("use test")
Expand Down
34 changes: 28 additions & 6 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1378,10 +1378,13 @@ func buildTableInfoWithCheck(ctx sessionctx.Context, s *ast.CreateTableStmt, dbC
if err != nil {
return nil, err
}
if err = checkTableInfoValidExtra(tbInfo); err != nil {
// Fix issue 17952 which will cause partition range expr can't be parsed as Int.
// checkTableInfoValidWithStmt will do the constant fold the partition expression first,
// then checkTableInfoValidExtra will pass the tableInfo check successfully.
if err = checkTableInfoValidWithStmt(ctx, tbInfo, s); err != nil {
return nil, err
}
if err = checkTableInfoValidWithStmt(ctx, tbInfo, s); err != nil {
if err = checkTableInfoValidExtra(tbInfo); err != nil {
return nil, err
}
return tbInfo, nil
Expand Down Expand Up @@ -1616,7 +1619,7 @@ func (d *ddl) preSplitAndScatter(ctx sessionctx.Context, tbInfo *model.TableInfo
scatterRegion = variable.TiDBOptOn(val)
}
if pi != nil {
preSplit = func() { splitPartitionTableRegion(ctx, sp, pi, scatterRegion) }
preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, pi, scatterRegion) }
} else {
preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterRegion) }
}
Expand Down Expand Up @@ -2561,14 +2564,15 @@ func (d *ddl) TruncateTablePartition(ctx sessionctx.Context, ident ast.Ident, sp
if err != nil {
return errors.Trace(err)
}
pids := []int64{pid}

job := &model.Job{
SchemaID: schema.ID,
TableID: meta.ID,
SchemaName: schema.Name.L,
Type: model.ActionTruncateTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{pid},
Args: []interface{}{pids},
}

err = d.doDDLJob(ctx, job)
Expand Down Expand Up @@ -2600,7 +2604,8 @@ func (d *ddl) DropTablePartition(ctx sessionctx.Context, ident ast.Ident, spec *
}

partName := spec.PartitionNames[0].L
err = checkDropTablePartition(meta, partName)
partNames := []string{partName}
err = checkDropTablePartition(meta, partNames)
if err != nil {
if ErrDropPartitionNonExistent.Equal(err) && spec.IfExists {
ctx.GetSessionVars().StmtCtx.AppendNote(err)
Expand All @@ -2615,7 +2620,7 @@ func (d *ddl) DropTablePartition(ctx sessionctx.Context, ident ast.Ident, spec *
SchemaName: schema.Name.L,
Type: model.ActionDropTablePartition,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{partName},
Args: []interface{}{partNames},
}

err = d.doDDLJob(ctx, job)
Expand Down Expand Up @@ -3418,6 +3423,11 @@ func (d *ddl) AlterTableSetTiFlashReplica(ctx sessionctx.Context, ident ast.Iden
}
}

err = checkTiFlashReplicaCount(ctx, replicaInfo.Count)
if err != nil {
return errors.Trace(err)
}

job := &model.Job{
SchemaID: schema.ID,
TableID: tb.Meta().ID,
Expand All @@ -3431,6 +3441,18 @@ func (d *ddl) AlterTableSetTiFlashReplica(ctx sessionctx.Context, ident ast.Iden
return errors.Trace(err)
}

func checkTiFlashReplicaCount(ctx sessionctx.Context, replicaCount uint64) error {
// Check the tiflash replica count should be less than the total tiflash stores.
tiflashStoreCnt, err := infoschema.GetTiFlashStoreCount(ctx)
if err != nil {
return errors.Trace(err)
}
if replicaCount > tiflashStoreCnt {
return errors.Errorf("the tiflash replica count: %d should be less than the total tiflash server count: %d", replicaCount, tiflashStoreCnt)
}
return nil
}

// UpdateTableReplicaInfo updates the table flash replica infos.
func (d *ddl) UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error {
is := d.infoHandle.Get()
Expand Down
2 changes: 1 addition & 1 deletion ddl/ddl_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64,
case model.ActionUnlockTable:
ver, err = onUnlockTables(t, job)
case model.ActionSetTiFlashReplica:
ver, err = onSetTableFlashReplica(t, job)
ver, err = w.onSetTableFlashReplica(t, job)
case model.ActionUpdateTiFlashReplicaStatus:
ver, err = onUpdateFlashReplicaStatus(t, job)
case model.ActionCreateSequence:
Expand Down
33 changes: 32 additions & 1 deletion ddl/ddl_worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,10 @@ func buildCancelJobTests(firstID int64) []testCancelJob {
{act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 35}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 35)}, cancelState: model.StatePublic},
{act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 36}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly},
{act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 37}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 37)}, cancelState: model.StateDeleteOnly},

{act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 42}, cancelRetErrs: noErrs, cancelState: model.StateNone},
{act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 43}, cancelRetErrs: noErrs, cancelState: model.StateReplicaOnly},
{act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 44}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob}, cancelState: model.StatePublic},
}

return tests
Expand Down Expand Up @@ -792,7 +796,7 @@ func (s *testDDLSuite) TestCancelJob(c *C) {

// test truncate table partition failed caused by canceled.
test = &tests[24]
truncateTblPartitionArgs := []interface{}{partitionTblInfo.Partition.Definitions[0].ID}
truncateTblPartitionArgs := []interface{}{[]int64{partitionTblInfo.Partition.Definitions[0].ID}}
doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, partitionTblInfo.ID, test.act, truncateTblPartitionArgs, &test.cancelState)
c.Check(checkErr, IsNil)
changedTable = testGetTable(c, d, dbInfo.ID, partitionTblInfo.ID)
Expand Down Expand Up @@ -862,6 +866,33 @@ func (s *testDDLSuite) TestCancelJob(c *C) {
testDropIndex(c, ctx, d, dbInfo, tblInfo, idxOrigName)
c.Check(errors.ErrorStack(checkErr), Equals, "")
s.checkDropIdx(c, d, dbInfo.ID, tblInfo.ID, idxOrigName, true)

// Cancel add table partition.
baseTableInfo := testTableInfoWithPartitionLessThan(c, d, "empty_table", 5, "1000")
testCreateTable(c, ctx, d, dbInfo, baseTableInfo)

cancelState = model.StateNone
updateTest(&tests[34])
addedPartInfo := testAddedNewTablePartitionInfo(c, d, baseTableInfo, "p1", "maxvalue")
addPartitionArgs := []interface{}{addedPartInfo}
doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs, &cancelState)
c.Check(checkErr, IsNil)
baseTable := testGetTable(c, d, dbInfo.ID, baseTableInfo.ID)
c.Assert(len(baseTable.Meta().Partition.Definitions), Equals, 1)

updateTest(&tests[35])
doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs, &cancelState)
c.Check(checkErr, IsNil)
baseTable = testGetTable(c, d, dbInfo.ID, baseTableInfo.ID)
c.Assert(len(baseTable.Meta().Partition.Definitions), Equals, 1)

updateTest(&tests[36])
doDDLJobSuccess(ctx, d, c, dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs)
c.Check(checkErr, IsNil)
baseTable = testGetTable(c, d, dbInfo.ID, baseTableInfo.ID)
c.Assert(len(baseTable.Meta().Partition.Definitions), Equals, 2)
c.Assert(baseTable.Meta().Partition.Definitions[1].ID, Equals, addedPartInfo.Definitions[0].ID)
c.Assert(baseTable.Meta().Partition.Definitions[1].LessThan[0], Equals, addedPartInfo.Definitions[0].LessThan[0])
}

func (s *testDDLSuite) TestIgnorableSpec(c *C) {
Expand Down
14 changes: 9 additions & 5 deletions ddl/delete_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,13 +289,17 @@ func insertJobIntoDeleteRangeTable(ctx sessionctx.Context, job *model.Job) error
endKey := tablecodec.EncodeTablePrefix(tableID + 1)
return doInsert(s, job.ID, tableID, startKey, endKey, now)
case model.ActionDropTablePartition, model.ActionTruncateTablePartition:
var physicalTableID int64
if err := job.DecodeArgs(&physicalTableID); err != nil {
var physicalTableIDs []int64
if err := job.DecodeArgs(&physicalTableIDs); err != nil {
return errors.Trace(err)
}
startKey := tablecodec.EncodeTablePrefix(physicalTableID)
endKey := tablecodec.EncodeTablePrefix(physicalTableID + 1)
return doInsert(s, job.ID, physicalTableID, startKey, endKey, now)
for _, physicalTableID := range physicalTableIDs {
startKey := tablecodec.EncodeTablePrefix(physicalTableID)
endKey := tablecodec.EncodeTablePrefix(physicalTableID + 1)
if err := doInsert(s, job.ID, physicalTableID, startKey, endKey, now); err != nil {
return errors.Trace(err)
}
}
// ActionAddIndex, ActionAddPrimaryKey needs do it, because it needs to be rolled back when it's canceled.
case model.ActionAddIndex, model.ActionAddPrimaryKey:
tableID := job.TableID
Expand Down
Empty file modified ddl/index.go
100755 → 100644
Empty file.
Loading

0 comments on commit f9d4153

Please sign in to comment.