diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 0c2f851da4413..8a49182e92f08 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -1233,7 +1233,7 @@ func (s *testIntegrationSuite5) TestBackwardCompatibility(c *C) { // Split the table. tableStart := tablecodec.GenTableRecordPrefix(tbl.Meta().ID) - s.cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 100) + s.cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 10) unique := false indexName := model.NewCIStr("idx_b") @@ -1275,7 +1275,6 @@ func (s *testIntegrationSuite5) TestBackwardCompatibility(c *C) { historyJob, err := getHistoryDDLJob(s.store, job.ID) c.Assert(err, IsNil) if historyJob == nil { - continue } c.Assert(historyJob.Error, IsNil) diff --git a/ddl/index.go b/ddl/index.go index fdba6c65008f6..622e93213929c 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -1217,10 +1217,9 @@ func (w *worker) updateReorgInfo(t table.PartitionedTable, reorg *reorgInfo) (bo failpoint.Inject("mockUpdateCachedSafePoint", func(val failpoint.Value) { if val.(bool) { - // 18 is for the logical time. - ts := oracle.GetPhysical(time.Now()) << 18 + ts := oracle.GoTimeToTS(time.Now()) s := reorg.d.store.(tikv.Storage) - s.UpdateSPCache(uint64(ts), time.Now()) + s.UpdateSPCache(ts, time.Now()) time.Sleep(time.Millisecond * 3) } }) diff --git a/docs/design/2021-04-26-lock-view.md b/docs/design/2021-04-26-lock-view.md index 56d16e8e86194..3ed0e5902c146 100644 --- a/docs/design/2021-04-26-lock-view.md +++ b/docs/design/2021-04-26-lock-view.md @@ -1,7 +1,7 @@ # TiDB Design Documents - Author(s): [longfangsong](https://github.com/longfangsong), [MyonKeminta](http://github.com/MyonKeminta) -- Last updated: May 6, 2021 +- Last updated: May 18, 2021 - Discussion PR: N/A - Tracking Issue: https://github.com/pingcap/tidb/issues/24199 @@ -35,14 +35,14 @@ Several tables will be provided in `information_schema`. Some tables has both lo | Field | Type | Comment | |------------|------------|---------| -|`TRX_ID` | `unsigned bigint` | The transaction ID (aka. start ts) | -|`TRX_STARTED`|`time`| Human readable start time of the transaction | -|`DIGEST`|`text`| The digest of the current executing SQL statement | -|`SQLS` | `text` | A list of all executed SQL statements' digests | -|`STATE`| `enum('Running', 'Lock waiting', 'Committing', 'RollingBack')`| The state of the transaction | +| `TRX_ID` | `unsigned bigint` | The transaction ID (aka. start ts) | +| `TRX_STARTED`|`time`| Human readable start time of the transaction | +| `DIGEST`|`text`| The digest of the current executing SQL statement | +| `ALL_SQLS` | `text` | A list of all executed SQL statements' digests | +| `STATE`| `enum('Running', 'Lock waiting', 'Committing', 'RollingBack')`| The state of the transaction | | `WAITING_START_TIME` | `time` | The elapsed time since the start of the current lock waiting (if any) | | `SCOPE` | `enum('Global', 'Local')` | The scope of the transaction | -| `ISOLATION_LEVEL` | `enum('RR', 'RC')` | | +| `ISOLATION_LEVEL` | `enum('REPEATABLE-READ', 'READ-COMMITTED')` | | | `AUTOCOMMIT` | `bool` | | | `SESSION_ID` | `unsigned bigint` | | | `USER` | `varchar` | | @@ -79,24 +79,28 @@ Several tables will be provided in `information_schema`. Some tables has both lo * Permission: * `PROCESS` privilege is needed to access this table. -### Table `(CLUSTER_)DEAD_LOCK` +### Table `(CLUSTER_)DEADLOCKS` | Field | Type | Comment | |------------|------------|---------| | `DEADLOCK_ID` | `int` | There needs multiple rows to represent information of a single deadlock event. This field is used to distinguish different events. | | `OCCUR_TIME` | `time` | The physical time when the deadlock occurs | +| `RETRYABLE` | `bool` | Is the deadlock retryable. TiDB tries to determine if the current statement is (indirectly) waiting for a lock locked by the current statement. | | `TRY_LOCK_TRX_ID` | `unsigned bigint` | The transaction ID (start ts) of the transaction that's trying to acquire the lock | | `CURRENT_SQL_DIGEST` | `text` | The SQL that's being blocked | | `KEY` | `varchar` | The key that's being locked, but locked by another transaction in the deadlock event | -| `SQLS` | `text` | A list of the digest of SQL statements that the transaction has executed | +| `ALL_SQLS` | `text` | A list of the digest of SQL statements that the transaction has executed | | `TRX_HOLDING_LOCK` | `unsigned bigint` | The transaction that's currently holding the lock. There will be another record in the table with the same `DEADLOCK_ID` for that transaction. | * Life span of rows: * Create after TiDB receive a deadlock error * FIFO,clean the oldest after buffer is full * Collecting, storing and querying: - * All of these information can be collected on TiDB side. It just need to add the information to the table when receives deadlock error from TiKV. The information of other transactions involved in the deadlock circle needed to be fetched from elsewhere (the `TIDB_TRX` table) when handling the deadlock error. - * Currently there are no much information in the deadlock error (it doesn't has the SQLs and keys' information), which needs to be improved. + * All of these information can be collected on TiDB side. It just need to add the information to the table when receives deadlock error from TiKV. The information of other transactions involved in the deadlock circle needed to be fetched from elsewhere (the `CLUSTER_TIDB_TRX` table) when handling the deadlock error. + * TiKV needs to report more rich information in the deadlock error for collecting. + * There are two types of deadlock errors internally: retryable or non-retryable. The transaction will internally retry on retryable deadlocks and won't report error to the client. Therefore, the user are typically more interested in the non-retryable deadlocks. + * Retryable deadlock errors are by default not collected, and can be enabled with configuration. + * Collecting `CLUSTER_TIDB_TRX` for more rich information for retryable deadlock is possible to make the performance worse. Whether it will be collected for retryable deadlock will be decided after some tests. * Permission: * `PROCESS` privilege is needed to access this table. @@ -151,9 +155,25 @@ The locking key and `resource_group_tag` that comes from the `Context` of the pe The wait chain will be added to the `Deadlock` error which is returned by the `PessimisticLock` request, so that when deadlock happens, the full wait chain information can be passed to TiDB. +### Configurations + +#### TiDB Config File `pessimistic-txn.tidb_deadlock_history_capacity` + +Specifies how many recent deadlock events each TiDB node should keep. +Dynamically changeable via HTTP API. +Value: 0 to 10000 +Default: 10 + +#### TiDB Config File `pessimistic-txn.tidb_deadlock_history_collect_retryable` + +Specifies whether to collect retryable deadlock errors to the `(CLUSTER_)DEADLOCKS` table. +Dynamically changeable via HTTP API. +Value: 0 (do not collect) or 1 (collect) +Default: 0 + ## Compatibility -This feature is not expected to be incompatible with other features. During upgrading, when there are different versions of TiDB nodes exists at the same time, it's possible that the `CLUSTER_` prefixed tables may encounter errors. But since this feature is typically used by user manually, this shouldn't be a severe problem. So we don't need to care much about that. +This feature is not expected to be incompatible with other features. During upgrading, when there are different versions of TiDB nodes exists at the same time, it's possible that the `CLUSTER_` prefixed tables may encounter errors. However, since this feature is typically used by user manually, this shouldn't be a severe problem. So we don't need to care much about that. ## Test Design @@ -190,7 +210,7 @@ This feature is not expected to be incompatible with other features. During upgr * Since lock waiting on TiKV may timeout and retry, it's possible that in a single query to `DATA_LOCK_WAIT` table doesn't shows all (logical) lock waiting. * Information about internal transactions may not be collected in our first version of implementation. -* Since TiDB need to query transaction information after it receives the deadlock error, the transactions' status may be changed during that time. As a result the information in `(CLUSTER_)DEAD_LOCK` table can't be promised to be accurate and complete. +* Since TiDB need to query transaction information after it receives the deadlock error, the transactions' status may be changed during that time. As a result the information in `(CLUSTER_)DEADLOCKS` table can't be promised to be accurate and complete. * Statistics about transaction conflicts is still not enough. * Historical information of `TIDB_TRX` and `DATA_LOCK_WAITS` is not kept, which possibly makes it still difficult to investigate some kind of problems. * The SQL digest that's holding lock and blocking the current transaction is hard to retrieve and is not included in the current design. diff --git a/domain/domain_test.go b/domain/domain_test.go index 51e0948d30715..099e3234c8279 100644 --- a/domain/domain_test.go +++ b/domain/domain_test.go @@ -287,7 +287,7 @@ func (*testSuite) TestT(c *C) { c.Assert(dd, NotNil) c.Assert(dd.GetLease(), Equals, 80*time.Millisecond) - snapTS := oracle.EncodeTSO(oracle.GetPhysical(time.Now())) + snapTS := oracle.GoTimeToTS(time.Now()) cs := &ast.CharsetOpt{ Chs: "utf8", Col: "utf8_bin", @@ -317,7 +317,7 @@ func (*testSuite) TestT(c *C) { c.Assert(err, IsNil) // for GetSnapshotInfoSchema - currSnapTS := oracle.EncodeTSO(oracle.GetPhysical(time.Now())) + currSnapTS := oracle.GoTimeToTS(time.Now()) currSnapIs, err := dom.GetSnapshotInfoSchema(currSnapTS) c.Assert(err, IsNil) c.Assert(currSnapIs, NotNil) diff --git a/domain/infosync/info.go b/domain/infosync/info.go index be8d80246e96b..a94a15fb7e212 100644 --- a/domain/infosync/info.go +++ b/domain/infosync/info.go @@ -559,7 +559,7 @@ func (is *InfoSyncer) ReportMinStartTS(store kv.Storage) { logutil.BgLogger().Error("update minStartTS failed", zap.Error(err)) return } - now := time.Unix(0, oracle.ExtractPhysical(currentVer.Ver)*1e6) + now := oracle.GetTimeFromTS(currentVer.Ver) startTSLowerLimit := oracle.GoTimeToLowerLimitStartTS(now, tikv.MaxTxnTimeUse) minStartTS := oracle.GoTimeToTS(now) diff --git a/errno/errcode.go b/errno/errcode.go index 2ed488242dd10..1cb4889eccbc1 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -855,6 +855,11 @@ const ( ErrGrantRole = 3523 ErrRoleNotGranted = 3530 ErrLockAcquireFailAndNoWaitSet = 3572 + ErrCTERecursiveRequiresUnion = 3573 + ErrCTERecursiveRequiresNonRecursiveFirst = 3574 + ErrCTERecursiveForbidsAggregation = 3575 + ErrCTERecursiveForbiddenJoinOrder = 3576 + ErrInvalidRequiresSingleReference = 3577 ErrWindowNoSuchWindow = 3579 ErrWindowCircularityInWindowGraph = 3580 ErrWindowNoChildPartitioning = 3581 @@ -877,6 +882,7 @@ const ( ErrWindowExplainJSON = 3598 ErrWindowFunctionIgnoresFrame = 3599 ErrIllegalPrivilegeLevel = 3619 + ErrCTEMaxRecursionDepth = 3636 ErrNotHintUpdatable = 3637 ErrDataTruncatedFunctionalIndex = 3751 ErrDataOutOfRangeFunctionalIndex = 3752 diff --git a/errno/errname.go b/errno/errname.go index 62662ce5ac934..98cbb17cd6b25 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -355,7 +355,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrViewSelectClause: mysql.Message("View's SELECT contains a '%s' clause", nil), ErrViewSelectVariable: mysql.Message("View's SELECT contains a variable or parameter", nil), ErrViewSelectTmptable: mysql.Message("View's SELECT refers to a temporary table '%-.192s'", nil), - ErrViewWrongList: mysql.Message("View's SELECT and view's field list have different column counts", nil), + ErrViewWrongList: mysql.Message("In definition of view, derived table or common table expression, SELECT list and column names list have different column counts", nil), ErrWarnViewMerge: mysql.Message("View merge algorithm can't be used here for now (assumed undefined algorithm)", nil), ErrWarnViewWithoutKey: mysql.Message("View being updated does not have complete key of underlying table in it", nil), ErrViewInvalid: mysql.Message("View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them", nil), @@ -902,6 +902,12 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrUnsupportedConstraintCheck: mysql.Message("%s is not supported", nil), ErrDynamicPrivilegeNotRegistered: mysql.Message("Dynamic privilege '%s' is not registered with the server.", nil), ErrIllegalPrivilegeLevel: mysql.Message("Illegal privilege level specified for %s", nil), + ErrCTERecursiveRequiresUnion: mysql.Message("Recursive Common Table Expression '%s' should contain a UNION", nil), + ErrCTERecursiveRequiresNonRecursiveFirst: mysql.Message("Recursive Common Table Expression '%s' should have one or more non-recursive query blocks followed by one or more recursive ones", nil), + ErrCTERecursiveForbidsAggregation: mysql.Message("Recursive Common Table Expression '%s' can contain neither aggregation nor window functions in recursive query block", nil), + ErrCTERecursiveForbiddenJoinOrder: mysql.Message("In recursive query block of Recursive Common Table Expression '%s', the recursive table must neither be in the right argument of a LEFT JOIN, nor be forced to be non-first with join order hints", nil), + ErrInvalidRequiresSingleReference: mysql.Message("In recursive query block of Recursive Common Table Expression '%s', the recursive table must be referenced only once, and not in any subquery", nil), + ErrCTEMaxRecursionDepth: mysql.Message("Recursive query aborted after %d iterations. Try increasing @@cte_max_recursion_depth to a larger value", nil), // MariaDB errors. ErrOnlyOneDefaultPartionAllowed: mysql.Message("Only one DEFAULT partition allowed", nil), ErrWrongPartitionTypeExpectedSystemTime: mysql.Message("Wrong partitioning type, expected type: `SYSTEM_TIME`", nil), diff --git a/errors.toml b/errors.toml index 926823909f96e..3d8b98f2368bb 100644 --- a/errors.toml +++ b/errors.toml @@ -193,7 +193,7 @@ View's SELECT contains a '%s' clause ["ddl:1353"] error = ''' -View's SELECT and view's field list have different column counts +In definition of view, derived table or common table expression, SELECT list and column names list have different column counts ''' ["ddl:1481"] @@ -561,6 +561,11 @@ error = ''' Illegal privilege level specified for %s ''' +["executor:3636"] +error = ''' +Recursive query aborted after %d iterations. Try increasing @@cte_max_recursion_depth to a larger value +''' + ["executor:3929"] error = ''' Dynamic privilege '%s' is not registered with the server. @@ -1016,6 +1021,31 @@ error = ''' Unresolved name '%s' for %s hint ''' +["planner:3573"] +error = ''' +Recursive Common Table Expression '%s' should contain a UNION +''' + +["planner:3574"] +error = ''' +Recursive Common Table Expression '%s' should have one or more non-recursive query blocks followed by one or more recursive ones +''' + +["planner:3575"] +error = ''' +Recursive Common Table Expression '%s' can contain neither aggregation nor window functions in recursive query block +''' + +["planner:3576"] +error = ''' +In recursive query block of Recursive Common Table Expression '%s', the recursive table must neither be in the right argument of a LEFT JOIN, nor be forced to be non-first with join order hints +''' + +["planner:3577"] +error = ''' +In recursive query block of Recursive Common Table Expression '%s', the recursive table must be referenced only once, and not in any subquery +''' + ["planner:3579"] error = ''' Window name '%s' is not defined. diff --git a/executor/batch_point_get_test.go b/executor/batch_point_get_test.go index 926834dc9281b..8f8c39d4b0eed 100644 --- a/executor/batch_point_get_test.go +++ b/executor/batch_point_get_test.go @@ -156,6 +156,16 @@ func (s *testBatchPointGetSuite) TestIssue18843(c *C) { tk.MustQuery("select * from t18843 where f is null").Check(testkit.Rows("2 ")) } +func (s *testBatchPointGetSuite) TestIssue24562(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists ttt") + tk.MustExec("create table ttt(a enum(\"a\",\"b\",\"c\",\"d\"), primary key(a));") + tk.MustExec("insert into ttt values(1)") + tk.MustQuery("select * from ttt where ttt.a in (\"1\",\"b\")").Check(testkit.Rows()) + tk.MustQuery("select * from ttt where ttt.a in (1,\"b\")").Check(testkit.Rows("a")) +} + func (s *testBatchPointGetSuite) TestBatchPointGetUnsignedHandleWithSort(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/executor/builder.go b/executor/builder.go index 54e2dfb93012a..b04dfe45f4bab 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -1534,7 +1534,9 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo strings.ToLower(infoschema.TableClientErrorsSummaryByUser), strings.ToLower(infoschema.TableClientErrorsSummaryByHost), strings.ToLower(infoschema.TableTiDBTrx), - strings.ToLower(infoschema.ClusterTableTiDBTrx): + strings.ToLower(infoschema.ClusterTableTiDBTrx), + strings.ToLower(infoschema.TableDeadlocks), + strings.ToLower(infoschema.ClusterTableDeadlocks): return &MemTableReaderExec{ baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, diff --git a/executor/errors.go b/executor/errors.go index ad8104a96e7ee..94237808d1562 100644 --- a/executor/errors.go +++ b/executor/errors.go @@ -50,8 +50,9 @@ var ( ErrIllegalPrivilegeLevel = dbterror.ClassExecutor.NewStd(mysql.ErrIllegalPrivilegeLevel) ErrInvalidSplitRegionRanges = dbterror.ClassExecutor.NewStd(mysql.ErrInvalidSplitRegionRanges) - ErrBRIEBackupFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEBackupFailed) - ErrBRIERestoreFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIERestoreFailed) - ErrBRIEImportFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEImportFailed) - ErrBRIEExportFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEExportFailed) + ErrBRIEBackupFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEBackupFailed) + ErrBRIERestoreFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIERestoreFailed) + ErrBRIEImportFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEImportFailed) + ErrBRIEExportFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEExportFailed) + ErrCTEMaxRecursionDepth = dbterror.ClassExecutor.NewStd(mysql.ErrCTEMaxRecursionDepth) ) diff --git a/executor/executor.go b/executor/executor.go index 1666f6955bba9..2b9b8f0f52954 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -49,6 +49,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/tikv" + tikverr "github.com/pingcap/tidb/store/tikv/error" tikvstore "github.com/pingcap/tidb/store/tikv/kv" tikvutil "github.com/pingcap/tidb/store/tikv/util" "github.com/pingcap/tidb/table" @@ -58,6 +59,7 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/deadlockhistory" "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" @@ -983,6 +985,13 @@ func newLockCtx(seVars *variable.SessionVars, lockWaitTime int64) *tikvstore.Loc LockKeysCount: &seVars.StmtCtx.LockKeysCount, LockExpired: &seVars.TxnCtx.LockExpire, ResourceGroupTag: resourcegrouptag.EncodeResourceGroupTag(sqlDigest), + OnDeadlock: func(deadlock *tikverr.ErrDeadlock) { + // TODO: Support collecting retryable deadlocks according to the config. + if !deadlock.IsRetryable { + rec := deadlockhistory.ErrDeadlockToDeadlockRecord(deadlock) + deadlockhistory.GlobalDeadlockHistory.Push(rec) + } + }, } } diff --git a/executor/executor_test.go b/executor/executor_test.go index dde9511a8410d..8836b4f59fbd3 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -70,6 +70,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/deadlockhistory" "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" @@ -2726,7 +2727,7 @@ func (s *testSuiteP2) TestHistoryRead(c *C) { tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 ", "4 ", "8 8", "9 9")) tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'") tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4")) - tsoStr := strconv.FormatUint(oracle.EncodeTSO(snapshotTime.UnixNano()/int64(time.Millisecond)), 10) + tsoStr := strconv.FormatUint(oracle.GoTimeToTS(snapshotTime), 10) tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'") tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4")) @@ -8151,7 +8152,71 @@ func (s *testSerialSuite) TestIssue24210(c *C) { c.Assert(err.Error(), Equals, "mock SelectionExec.baseExecutor.Open returned error") err = failpoint.Disable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError") c.Assert(err, IsNil) +} + +func (s *testSerialSuite) TestDeadlockTable(c *C) { + deadlockhistory.GlobalDeadlockHistory.Clear() + occurTime := time.Date(2021, 5, 10, 1, 2, 3, 456789000, time.UTC) + rec := &deadlockhistory.DeadlockRecord{ + OccurTime: occurTime, + IsRetryable: false, + WaitChain: []deadlockhistory.WaitChainItem{ + { + TryLockTxn: 101, + SQLDigest: "aabbccdd", + Key: []byte("k1"), + AllSQLs: nil, + TxnHoldingLock: 102, + }, + { + TryLockTxn: 102, + SQLDigest: "ddccbbaa", + Key: []byte("k2"), + AllSQLs: []string{"sql1"}, + TxnHoldingLock: 101, + }, + }, + } + deadlockhistory.GlobalDeadlockHistory.Push(rec) + + occurTime2 := time.Date(2022, 6, 11, 2, 3, 4, 987654000, time.UTC) + rec2 := &deadlockhistory.DeadlockRecord{ + OccurTime: occurTime2, + IsRetryable: true, + WaitChain: []deadlockhistory.WaitChainItem{ + { + TryLockTxn: 201, + AllSQLs: []string{}, + TxnHoldingLock: 202, + }, + { + TryLockTxn: 202, + AllSQLs: []string{"sql1", "sql2, sql3"}, + TxnHoldingLock: 203, + }, + { + TryLockTxn: 203, + TxnHoldingLock: 201, + }, + }, + } + deadlockhistory.GlobalDeadlockHistory.Push(rec2) + + // `Push` sets the record's ID, and ID in a single DeadlockHistory is monotonically increasing. We must get it here + // to know what it is. + id1 := strconv.FormatUint(rec.ID, 10) + id2 := strconv.FormatUint(rec2.ID, 10) + + tk := testkit.NewTestKit(c, s.store) + tk.MustQuery("select * from information_schema.deadlocks").Check( + testutil.RowsWithSep("/", + id1+"/2021-05-10 01:02:03.456789/0/101/aabbccdd/6B31//102", + id1+"/2021-05-10 01:02:03.456789/0/102/ddccbbaa/6B32/[sql1]/101", + id2+"/2022-06-11 02:03:04.987654/1/201///[]/202", + id2+"/2022-06-11 02:03:04.987654/1/202///[sql1, sql2, sql3]/203", + id2+"/2022-06-11 02:03:04.987654/1/203////201", + )) } func (s testSerialSuite) TestExprBlackListForEnum(c *C) { diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 4f788a3d7bd1d..491eb3a3fe26f 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -52,6 +52,7 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" + "github.com/pingcap/tidb/util/deadlockhistory" "github.com/pingcap/tidb/util/pdapi" "github.com/pingcap/tidb/util/sem" "github.com/pingcap/tidb/util/set" @@ -153,6 +154,10 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex e.setDataForTiDBTrx(sctx) case infoschema.ClusterTableTiDBTrx: err = e.setDataForClusterTiDBTrx(sctx) + case infoschema.TableDeadlocks: + err = e.setDataForDeadlock(sctx) + case infoschema.ClusterTableDeadlocks: + err = e.setDataForClusterDeadlock(sctx) } if err != nil { return nil, err @@ -2048,6 +2053,33 @@ func (e *memtableRetriever) setDataForClusterTiDBTrx(ctx sessionctx.Context) err return nil } +func (e *memtableRetriever) setDataForDeadlock(ctx sessionctx.Context) error { + hasPriv := false + if pm := privilege.GetPrivilegeManager(ctx); pm != nil { + hasPriv = pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, "", "", "", mysql.ProcessPriv) + } + + if !hasPriv { + return plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS") + } + + e.rows = deadlockhistory.GlobalDeadlockHistory.GetAllDatum() + return nil +} + +func (e *memtableRetriever) setDataForClusterDeadlock(ctx sessionctx.Context) error { + err := e.setDataForDeadlock(ctx) + if err != nil { + return err + } + rows, err := infoschema.AppendHostInfoToRows(ctx, e.rows) + if err != nil { + return err + } + e.rows = rows + return nil +} + type hugeMemTableRetriever struct { dummyCloser table *model.TableInfo diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index cc0e4074b39a0..446e689184086 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -240,8 +240,8 @@ func (s *partitionTableSuite) TestOrderByandLimit(c *C) { // range partition table tk.MustExec(`create table trange(a int, b int, index idx_a(a)) partition by range(a) ( - partition p0 values less than(300), - partition p1 values less than (500), + partition p0 values less than(300), + partition p1 values less than (500), partition p2 values less than(1100));`) // hash partition table @@ -1298,6 +1298,146 @@ func (s *partitionTableSuite) TestSplitRegion(c *C) { tk.MustPartition(`select * from thash where a in (1, 10001, 20001)`, "p1").Sort().Check(result) } +func (s *partitionTableSuite) TestParallelApply(c *C) { + if israce.RaceEnabled { + c.Skip("exhaustive types test, skip race test") + } + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create database test_parallel_apply") + tk.MustExec("use test_parallel_apply") + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + tk.MustExec("set tidb_enable_parallel_apply=true") + + tk.MustExec(`create table touter (a int, b int)`) + tk.MustExec(`create table tinner (a int, b int, key(a))`) + tk.MustExec(`create table thash (a int, b int, key(a)) partition by hash(a) partitions 4`) + tk.MustExec(`create table trange (a int, b int, key(a)) partition by range(a) ( + partition p0 values less than(10000), + partition p1 values less than(20000), + partition p2 values less than(30000), + partition p3 values less than(40000))`) + + vouter := make([]string, 0, 100) + for i := 0; i < 100; i++ { + vouter = append(vouter, fmt.Sprintf("(%v, %v)", rand.Intn(40000), rand.Intn(40000))) + } + tk.MustExec("insert into touter values " + strings.Join(vouter, ", ")) + + vals := make([]string, 0, 2000) + for i := 0; i < 100; i++ { + vals = append(vals, fmt.Sprintf("(%v, %v)", rand.Intn(40000), rand.Intn(40000))) + } + tk.MustExec("insert into tinner values " + strings.Join(vals, ", ")) + tk.MustExec("insert into thash values " + strings.Join(vals, ", ")) + tk.MustExec("insert into trange values " + strings.Join(vals, ", ")) + + // parallel apply + hash partition + IndexReader as its inner child + tk.MustQuery(`explain format='brief' select * from touter where touter.a > (select sum(thash.a) from thash use index(a) where thash.a>touter.b)`).Check(testkit.Rows( + `Projection 10000.00 root test_parallel_apply.touter.a, test_parallel_apply.touter.b`, + `└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(cast(test_parallel_apply.touter.a, decimal(20,0) BINARY), Column#7)`, + ` ├─TableReader(Build) 10000.00 root data:TableFullScan`, + ` │ └─TableFullScan 10000.00 cop[tikv] table:touter keep order:false, stats:pseudo`, + ` └─StreamAgg(Probe) 1.00 root funcs:sum(Column#9)->Column#7`, + ` └─IndexReader 1.00 root partition:all index:StreamAgg`, // IndexReader is a inner child of Apply + ` └─StreamAgg 1.00 cop[tikv] funcs:sum(test_parallel_apply.thash.a)->Column#9`, + ` └─Selection 8000.00 cop[tikv] gt(test_parallel_apply.thash.a, test_parallel_apply.touter.b)`, + ` └─IndexFullScan 10000.00 cop[tikv] table:thash, index:a(a) keep order:false, stats:pseudo`)) + tk.MustQuery(`select * from touter where touter.a > (select sum(thash.a) from thash use index(a) where thash.a>touter.b)`).Sort().Check( + tk.MustQuery(`select * from touter where touter.a > (select sum(tinner.a) from tinner use index(a) where tinner.a>touter.b)`).Sort().Rows()) + + // parallel apply + hash partition + TableReader as its inner child + tk.MustQuery(`explain format='brief' select * from touter where touter.a > (select sum(thash.b) from thash ignore index(a) where thash.a>touter.b)`).Check(testkit.Rows( + `Projection 10000.00 root test_parallel_apply.touter.a, test_parallel_apply.touter.b`, + `└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(cast(test_parallel_apply.touter.a, decimal(20,0) BINARY), Column#7)`, + ` ├─TableReader(Build) 10000.00 root data:TableFullScan`, + ` │ └─TableFullScan 10000.00 cop[tikv] table:touter keep order:false, stats:pseudo`, + ` └─StreamAgg(Probe) 1.00 root funcs:sum(Column#9)->Column#7`, + ` └─TableReader 1.00 root partition:all data:StreamAgg`, // TableReader is a inner child of Apply + ` └─StreamAgg 1.00 cop[tikv] funcs:sum(test_parallel_apply.thash.b)->Column#9`, + ` └─Selection 8000.00 cop[tikv] gt(test_parallel_apply.thash.a, test_parallel_apply.touter.b)`, + ` └─TableFullScan 10000.00 cop[tikv] table:thash keep order:false, stats:pseudo`)) + tk.MustQuery(`select * from touter where touter.a > (select sum(thash.b) from thash ignore index(a) where thash.a>touter.b)`).Sort().Check( + tk.MustQuery(`select * from touter where touter.a > (select sum(tinner.b) from tinner ignore index(a) where tinner.a>touter.b)`).Sort().Rows()) + + // parallel apply + hash partition + IndexLookUp as its inner child + tk.MustQuery(`explain format='brief' select * from touter where touter.a > (select sum(tinner.b) from tinner use index(a) where tinner.a>touter.b)`).Check(testkit.Rows( + `Projection 10000.00 root test_parallel_apply.touter.a, test_parallel_apply.touter.b`, + `└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(cast(test_parallel_apply.touter.a, decimal(20,0) BINARY), Column#7)`, + ` ├─TableReader(Build) 10000.00 root data:TableFullScan`, + ` │ └─TableFullScan 10000.00 cop[tikv] table:touter keep order:false, stats:pseudo`, + ` └─HashAgg(Probe) 1.00 root funcs:sum(Column#9)->Column#7`, + ` └─IndexLookUp 1.00 root `, // IndexLookUp is a inner child of Apply + ` ├─Selection(Build) 8000.00 cop[tikv] gt(test_parallel_apply.tinner.a, test_parallel_apply.touter.b)`, + ` │ └─IndexFullScan 10000.00 cop[tikv] table:tinner, index:a(a) keep order:false, stats:pseudo`, + ` └─HashAgg(Probe) 1.00 cop[tikv] funcs:sum(test_parallel_apply.tinner.b)->Column#9`, + ` └─TableRowIDScan 8000.00 cop[tikv] table:tinner keep order:false, stats:pseudo`)) + tk.MustQuery(`select * from touter where touter.a > (select sum(thash.b) from thash use index(a) where thash.a>touter.b)`).Sort().Check( + tk.MustQuery(`select * from touter where touter.a > (select sum(tinner.b) from tinner use index(a) where tinner.a>touter.b)`).Sort().Rows()) + + // parallel apply + range partition + IndexReader as its inner child + tk.MustQuery(`explain format='brief' select * from touter where touter.a > (select sum(trange.a) from trange use index(a) where trange.a>touter.b)`).Check(testkit.Rows( + `Projection 10000.00 root test_parallel_apply.touter.a, test_parallel_apply.touter.b`, + `└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(cast(test_parallel_apply.touter.a, decimal(20,0) BINARY), Column#7)`, + ` ├─TableReader(Build) 10000.00 root data:TableFullScan`, + ` │ └─TableFullScan 10000.00 cop[tikv] table:touter keep order:false, stats:pseudo`, + ` └─StreamAgg(Probe) 1.00 root funcs:sum(Column#9)->Column#7`, + ` └─IndexReader 1.00 root partition:all index:StreamAgg`, // IndexReader is a inner child of Apply + ` └─StreamAgg 1.00 cop[tikv] funcs:sum(test_parallel_apply.trange.a)->Column#9`, + ` └─Selection 8000.00 cop[tikv] gt(test_parallel_apply.trange.a, test_parallel_apply.touter.b)`, + ` └─IndexFullScan 10000.00 cop[tikv] table:trange, index:a(a) keep order:false, stats:pseudo`)) + tk.MustQuery(`select * from touter where touter.a > (select sum(trange.a) from trange use index(a) where trange.a>touter.b)`).Sort().Check( + tk.MustQuery(`select * from touter where touter.a > (select sum(tinner.a) from tinner use index(a) where tinner.a>touter.b)`).Sort().Rows()) + + // parallel apply + range partition + TableReader as its inner child + tk.MustQuery(`explain format='brief' select * from touter where touter.a > (select sum(trange.b) from trange ignore index(a) where trange.a>touter.b)`).Check(testkit.Rows( + `Projection 10000.00 root test_parallel_apply.touter.a, test_parallel_apply.touter.b`, + `└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(cast(test_parallel_apply.touter.a, decimal(20,0) BINARY), Column#7)`, + ` ├─TableReader(Build) 10000.00 root data:TableFullScan`, + ` │ └─TableFullScan 10000.00 cop[tikv] table:touter keep order:false, stats:pseudo`, + ` └─StreamAgg(Probe) 1.00 root funcs:sum(Column#9)->Column#7`, + ` └─TableReader 1.00 root partition:all data:StreamAgg`, // TableReader is a inner child of Apply + ` └─StreamAgg 1.00 cop[tikv] funcs:sum(test_parallel_apply.trange.b)->Column#9`, + ` └─Selection 8000.00 cop[tikv] gt(test_parallel_apply.trange.a, test_parallel_apply.touter.b)`, + ` └─TableFullScan 10000.00 cop[tikv] table:trange keep order:false, stats:pseudo`)) + tk.MustQuery(`select * from touter where touter.a > (select sum(trange.b) from trange ignore index(a) where trange.a>touter.b)`).Sort().Check( + tk.MustQuery(`select * from touter where touter.a > (select sum(tinner.b) from tinner ignore index(a) where tinner.a>touter.b)`).Sort().Rows()) + + // parallel apply + range partition + IndexLookUp as its inner child + tk.MustQuery(`explain format='brief' select * from touter where touter.a > (select sum(tinner.b) from tinner use index(a) where tinner.a>touter.b)`).Check(testkit.Rows( + `Projection 10000.00 root test_parallel_apply.touter.a, test_parallel_apply.touter.b`, + `└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(cast(test_parallel_apply.touter.a, decimal(20,0) BINARY), Column#7)`, + ` ├─TableReader(Build) 10000.00 root data:TableFullScan`, + ` │ └─TableFullScan 10000.00 cop[tikv] table:touter keep order:false, stats:pseudo`, + ` └─HashAgg(Probe) 1.00 root funcs:sum(Column#9)->Column#7`, + ` └─IndexLookUp 1.00 root `, // IndexLookUp is a inner child of Apply + ` ├─Selection(Build) 8000.00 cop[tikv] gt(test_parallel_apply.tinner.a, test_parallel_apply.touter.b)`, + ` │ └─IndexFullScan 10000.00 cop[tikv] table:tinner, index:a(a) keep order:false, stats:pseudo`, + ` └─HashAgg(Probe) 1.00 cop[tikv] funcs:sum(test_parallel_apply.tinner.b)->Column#9`, + ` └─TableRowIDScan 8000.00 cop[tikv] table:tinner keep order:false, stats:pseudo`)) + tk.MustQuery(`select * from touter where touter.a > (select sum(trange.b) from trange use index(a) where trange.a>touter.b)`).Sort().Check( + tk.MustQuery(`select * from touter where touter.a > (select sum(tinner.b) from tinner use index(a) where tinner.a>touter.b)`).Sort().Rows()) + + // random queries + ops := []string{"!=", ">", "<", ">=", "<="} + aggFuncs := []string{"sum", "count", "max", "min"} + tbls := []string{"tinner", "thash", "trange"} + for i := 0; i < 50; i++ { + var r [][]interface{} + op := ops[rand.Intn(len(ops))] + agg := aggFuncs[rand.Intn(len(aggFuncs))] + x := rand.Intn(10000) + for _, tbl := range tbls { + q := fmt.Sprintf(`select * from touter where touter.a > (select %v(%v.b) from %v where %v.a%vtouter.b-%v)`, agg, tbl, tbl, tbl, op, x) + if r == nil { + r = tk.MustQuery(q).Sort().Rows() + } else { + tk.MustQuery(q).Sort().Check(r) + } + } + } +} + func (s *partitionTableSuite) TestDirectReadingWithAgg(c *C) { if israce.RaceEnabled { c.Skip("exhaustive types test, skip race test") @@ -1567,6 +1707,38 @@ func (s *globalIndexSuite) TestIssue21731(c *C) { tk.MustExec("create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2));") } +type testOutput struct { + SQL string + Plan []string + Res []string +} + +func (s *testSuiteWithData) verifyPartitionResult(tk *testkit.TestKit, input []string, output []testOutput) { + for i, tt := range input { + var isSelect bool = false + if strings.HasPrefix(strings.ToLower(tt), "select ") { + isSelect = true + } + s.testData.OnRecord(func() { + output[i].SQL = tt + if isSelect { + output[i].Plan = s.testData.ConvertRowsToStrings(tk.UsedPartitions(tt).Rows()) + output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows()) + } else { + // Just verify SELECT (also avoid double INSERTs during record) + output[i].Res = nil + output[i].Plan = nil + } + }) + if isSelect { + tk.UsedPartitions(tt).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...)) + } else { + tk.MustExec(tt) + } + } +} + func (s *testSuiteWithData) TestRangePartitionBoundariesEq(c *C) { tk := testkit.NewTestKit(c, s.store) @@ -1589,12 +1761,6 @@ PARTITION BY RANGE (a) ( s.verifyPartitionResult(tk, input, output) } -type testOutput struct { - SQL string - Plan []string - Res []string -} - func (s *testSuiteWithData) TestRangePartitionBoundariesNe(c *C) { tk := testkit.NewTestKit(c, s.store) @@ -1620,26 +1786,46 @@ PARTITION BY RANGE (a) ( s.verifyPartitionResult(tk, input, output) } -func (s *testSuiteWithData) verifyPartitionResult(tk *testkit.TestKit, input []string, output []testOutput) { - for i, tt := range input { - var isSelect bool = false - if strings.HasPrefix(strings.ToLower(tt), "select ") { - isSelect = true - } - s.testData.OnRecord(func() { - output[i].SQL = tt - if isSelect { - output[i].Plan = s.testData.ConvertRowsToStrings(tk.UsedPartitions(tt).Rows()) - output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows()) - } else { - // to avoid double execution of INSERT (and INSERT does not return anything) - output[i].Res = nil - output[i].Plan = nil - } - }) - if isSelect { - tk.UsedPartitions(tt).Check(testkit.Rows(output[i].Plan...)) - } - tk.MayQuery(tt).Sort().Check(testkit.Rows(output[i].Res...)) - } +func (s *testSuiteWithData) TestRangePartitionBoundariesBetweenM(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("CREATE DATABASE IF NOT EXISTS TestRangePartitionBoundariesBetweenM") + defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesBetweenM") + tk.MustExec("USE TestRangePartitionBoundariesBetweenM") + tk.MustExec("DROP TABLE IF EXISTS t") + tk.MustExec(`CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000))`) + + var input []string + var output []testOutput + s.testData.GetTestCases(c, &input, &output) + s.verifyPartitionResult(tk, input, output) +} + +func (s *testSuiteWithData) TestRangePartitionBoundariesBetweenS(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("CREATE DATABASE IF NOT EXISTS TestRangePartitionBoundariesBetweenS") + defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesBetweenS") + tk.MustExec("USE TestRangePartitionBoundariesBetweenS") + tk.MustExec("DROP TABLE IF EXISTS t") + tk.MustExec(`CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7))`) + + var input []string + var output []testOutput + s.testData.GetTestCases(c, &input, &output) + s.verifyPartitionResult(tk, input, output) } diff --git a/executor/point_get.go b/executor/point_get.go index a6c2c7034d38c..76132623e621f 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -435,6 +435,18 @@ func EncodeUniqueIndexValuesForKey(ctx sessionctx.Context, tblInfo *model.TableI var str string str, err = idxVals[i].ToString() idxVals[i].SetString(str, colInfo.FieldType.Collate) + } else if colInfo.Tp == mysql.TypeEnum && (idxVals[i].Kind() == types.KindString || idxVals[i].Kind() == types.KindBytes || idxVals[i].Kind() == types.KindBinaryLiteral) { + var str string + var e types.Enum + str, err = idxVals[i].ToString() + if err != nil { + return nil, kv.ErrNotExist + } + e, err = types.ParseEnumName(colInfo.FieldType.Elems, str, colInfo.FieldType.Collate) + if err != nil { + return nil, kv.ErrNotExist + } + idxVals[i].SetMysqlEnum(e, colInfo.FieldType.Collate) } else { // If a truncated error or an overflow error is thrown when converting the type of `idxVal[i]` to // the type of `colInfo`, the `idxVal` does not exist in the `idxInfo` for sure. diff --git a/executor/show_stats.go b/executor/show_stats.go index 3a1cf0cb48adc..b6449863b0c1f 100644 --- a/executor/show_stats.go +++ b/executor/show_stats.go @@ -17,7 +17,6 @@ import ( "fmt" "sort" "strings" - "time" "github.com/pingcap/errors" "github.com/pingcap/parser/ast" @@ -203,7 +202,7 @@ func (e *ShowExec) histogramToRow(dbName, tblName, partitionName, colName string } func (e *ShowExec) versionToTime(version uint64) types.Time { - t := time.Unix(0, oracle.ExtractPhysical(version)*int64(time.Millisecond)) + t := oracle.GetTimeFromTS(version) return types.NewTime(types.FromGoTime(t), mysql.TypeDatetime, 0) } diff --git a/executor/simple.go b/executor/simple.go index 7270f12aecdd0..3c45ee05ada4e 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -136,6 +136,8 @@ func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { err = e.executeAlterUser(x) case *ast.DropUserStmt: err = e.executeDropUser(x) + case *ast.RenameUserStmt: + err = e.executeRenameUser(x) case *ast.SetPwdStmt: err = e.executeSetPwd(x) case *ast.KillStmt: @@ -630,7 +632,7 @@ func (e *SimpleExec) executeStartTransactionReadOnlyWithTimestampBound(ctx conte if err != nil { return err } - startTS := oracle.ComposeTS(gt.Unix()*1000, 0) + startTS := oracle.GoTimeToTS(gt) opt.StartTS = startTS case ast.TimestampBoundExactStaleness: // TODO: support funcCallExpr in future @@ -666,7 +668,7 @@ func (e *SimpleExec) executeStartTransactionReadOnlyWithTimestampBound(ctx conte if err != nil { return err } - startTS := oracle.ComposeTS(gt.Unix()*1000, 0) + startTS := oracle.GoTimeToTS(gt) opt.StartTS = startTS } err := e.ctx.NewTxnWithStalenessOption(ctx, opt) @@ -1026,6 +1028,123 @@ func (e *SimpleExec) executeGrantRole(s *ast.GrantRoleStmt) error { return nil } +// Should cover same internal mysql.* tables as DROP USER, so this function is very similar +func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { + + var failedUser string + sysSession, err := e.getSysSession() + defer e.releaseSysSession(sysSession) + if err != nil { + return err + } + sqlExecutor := sysSession.(sqlexec.SQLExecutor) + + if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + return err + } + + for _, userToUser := range s.UserToUsers { + oldUser, newUser := userToUser.OldUser, userToUser.NewUser + exists, err := userExistsInternal(sqlExecutor, oldUser.Username, oldUser.Hostname) + if err != nil { + return err + } + if !exists { + failedUser = oldUser.String() + " TO " + newUser.String() + " old did not exist" + break + } + + exists, err = userExistsInternal(sqlExecutor, newUser.Username, newUser.Hostname) + if err != nil { + return err + } + if exists { + // MySQL reports the old user, even when the issue is the new user. + failedUser = oldUser.String() + " TO " + newUser.String() + " new did exist" + break + } + + if err = renameUserHostInSystemTable(sqlExecutor, mysql.UserTable, "User", "Host", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.UserTable + " error" + break + } + + // rename privileges from mysql.global_priv + if err = renameUserHostInSystemTable(sqlExecutor, mysql.GlobalPrivTable, "User", "Host", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.GlobalPrivTable + " error" + break + } + + // rename privileges from mysql.db + if err = renameUserHostInSystemTable(sqlExecutor, mysql.DBTable, "User", "Host", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.DBTable + " error" + break + } + + // rename privileges from mysql.tables_priv + if err = renameUserHostInSystemTable(sqlExecutor, mysql.TablePrivTable, "User", "Host", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.TablePrivTable + " error" + break + } + + // rename relationship from mysql.role_edges + if err = renameUserHostInSystemTable(sqlExecutor, mysql.RoleEdgeTable, "TO_USER", "TO_HOST", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.RoleEdgeTable + " (to) error" + break + } + + if err = renameUserHostInSystemTable(sqlExecutor, mysql.RoleEdgeTable, "FROM_USER", "FROM_HOST", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.RoleEdgeTable + " (from) error" + break + } + + // rename relationship from mysql.default_roles + if err = renameUserHostInSystemTable(sqlExecutor, mysql.DefaultRoleTable, "DEFAULT_ROLE_USER", "DEFAULT_ROLE_HOST", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.DefaultRoleTable + " (default role user) error" + break + } + + if err = renameUserHostInSystemTable(sqlExecutor, mysql.DefaultRoleTable, "USER", "HOST", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " " + mysql.DefaultRoleTable + " error" + break + } + + // rename relationship from mysql.global_grants + // TODO: add global_grants into the parser + if err = renameUserHostInSystemTable(sqlExecutor, "global_grants", "User", "Host", userToUser); err != nil { + failedUser = oldUser.String() + " TO " + newUser.String() + " mysql.global_grants error" + break + } + + //TODO: need update columns_priv once we implement columns_priv functionality. + // When that is added, please refactor both executeRenameUser and executeDropUser to use an array of tables + // to loop over, so it is easier to maintain. + } + + if failedUser == "" { + if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + return err + } + } else { + if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + return err + } + return ErrCannotUser.GenWithStackByArgs("RENAME USER", failedUser) + } + domain.GetDomain(e.ctx).NotifyUpdatePrivilege(e.ctx) + return nil +} + +func renameUserHostInSystemTable(sqlExecutor sqlexec.SQLExecutor, tableName, usernameColumn, hostColumn string, users *ast.UserToUser) error { + sql := new(strings.Builder) + sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET %n = %?, %n = %? WHERE %n = %? and %n = %?;`, + mysql.SystemDB, tableName, + usernameColumn, users.NewUser.Username, hostColumn, users.NewUser.Hostname, + usernameColumn, users.OldUser.Username, hostColumn, users.OldUser.Hostname) + _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + return err +} + func (e *SimpleExec) executeDropUser(s *ast.DropUserStmt) error { // Check privileges. // Check `CREATE USER` privilege. @@ -1181,6 +1300,27 @@ func userExists(ctx sessionctx.Context, name string, host string) (bool, error) return len(rows) > 0, nil } +// use the same internal executor to read within the same transaction, otherwise same as userExists +func userExistsInternal(sqlExecutor sqlexec.SQLExecutor, name string, host string) (bool, error) { + sql := new(strings.Builder) + sqlexec.MustFormatSQL(sql, `SELECT * FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, host) + recordSet, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + if err != nil { + return false, err + } + req := recordSet.NewChunk() + err = recordSet.Next(context.TODO(), req) + var rows int = 0 + if err == nil { + rows = req.NumRows() + } + errClose := recordSet.Close() + if errClose != nil { + return false, errClose + } + return rows > 0, err +} + func (e *SimpleExec) executeSetPwd(s *ast.SetPwdStmt) error { var u, h string if s.User == nil { @@ -1389,7 +1529,7 @@ func (e *SimpleExec) executeDropStats(s *ast.DropStatsStmt) (err error) { func (e *SimpleExec) autoNewTxn() bool { switch e.Statement.(type) { - case *ast.CreateUserStmt, *ast.AlterUserStmt, *ast.DropUserStmt: + case *ast.CreateUserStmt, *ast.AlterUserStmt, *ast.DropUserStmt, *ast.RenameUserStmt: return true } return false diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 64b334b15bf94..db2b55a9a1637 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -196,8 +196,7 @@ func (s *testStaleTxnSerialSuite) TestTimeBoundedStalenessTxn(c *C) { name: "max 20 seconds ago, safeTS 10 secs ago", sql: `START TRANSACTION READ ONLY WITH TIMESTAMP BOUND MAX STALENESS '00:00:20'`, injectSafeTS: func() uint64 { - phy := time.Now().Add(-10*time.Second).Unix() * 1000 - return oracle.ComposeTS(phy, 0) + return oracle.GoTimeToTS(time.Now().Add(-10 * time.Second)) }(), useSafeTS: true, }, @@ -205,8 +204,7 @@ func (s *testStaleTxnSerialSuite) TestTimeBoundedStalenessTxn(c *C) { name: "max 10 seconds ago, safeTS 20 secs ago", sql: `START TRANSACTION READ ONLY WITH TIMESTAMP BOUND MAX STALENESS '00:00:10'`, injectSafeTS: func() uint64 { - phy := time.Now().Add(-20*time.Second).Unix() * 1000 - return oracle.ComposeTS(phy, 0) + return oracle.GoTimeToTS(time.Now().Add(-20 * time.Second)) }(), useSafeTS: false, }, @@ -217,8 +215,7 @@ func (s *testStaleTxnSerialSuite) TestTimeBoundedStalenessTxn(c *C) { time.Now().Add(-20*time.Second).Format("2006-01-02 15:04:05")) }(), injectSafeTS: func() uint64 { - phy := time.Now().Add(-10*time.Second).Unix() * 1000 - return oracle.ComposeTS(phy, 0) + return oracle.GoTimeToTS(time.Now().Add(-10 * time.Second)) }(), useSafeTS: true, }, @@ -229,8 +226,7 @@ func (s *testStaleTxnSerialSuite) TestTimeBoundedStalenessTxn(c *C) { time.Now().Add(-10*time.Second).Format("2006-01-02 15:04:05")) }(), injectSafeTS: func() uint64 { - phy := time.Now().Add(-20*time.Second).Unix() * 1000 - return oracle.ComposeTS(phy, 0) + return oracle.GoTimeToTS(time.Now().Add(-20 * time.Second)) }(), useSafeTS: false, }, diff --git a/executor/testdata/executor_suite_in.json b/executor/testdata/executor_suite_in.json index fff3187717f0a..a8db9425a7078 100644 --- a/executor/testdata/executor_suite_in.json +++ b/executor/testdata/executor_suite_in.json @@ -142,5 +142,136 @@ "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7)", "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7" ] + }, + { + "name": "TestRangePartitionBoundariesBetweenM", + "cases": [ + "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", + "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", + "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", + "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", + "ANALYZE TABLE t", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647", + "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646", + "SELECT * FROM t WHERE a BETWEEN 0 AND -1", + "SELECT * FROM t WHERE a BETWEEN 0 AND 0", + "SELECT * FROM t WHERE a BETWEEN 0 AND 1", + "SELECT * FROM t WHERE a BETWEEN 0 AND 2", + "SELECT * FROM t WHERE a BETWEEN 0 AND 10", + "SELECT * FROM t WHERE a BETWEEN 0 AND 999998", + "SELECT * FROM t WHERE a BETWEEN 0 AND 999999", + "SELECT * FROM t WHERE a BETWEEN 0 AND 1000000", + "SELECT * FROM t WHERE a BETWEEN 0 AND 1000001", + "SELECT * FROM t WHERE a BETWEEN 0 AND 1000002", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 999997", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 999998", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 999999", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999", + "SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 999998", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 999999", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000", + "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001", + "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002", + "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003", + "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001", + "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002", + "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003" + ] + }, + { + "name": "TestRangePartitionBoundariesBetweenS", + "cases": [ + "INSERT INTO t VALUES (0, '0 Filler...')", + "INSERT INTO t VALUES (1, '1 Filler...')", + "INSERT INTO t VALUES (2, '2 Filler...')", + "INSERT INTO t VALUES (3, '3 Filler...')", + "INSERT INTO t VALUES (4, '4 Filler...')", + "INSERT INTO t VALUES (5, '5 Filler...')", + "INSERT INTO t VALUES (6, '6 Filler...')", + "ANALYZE TABLE t", + "SELECT * FROM t WHERE a BETWEEN 2 AND -1", + "SELECT * FROM t WHERE a BETWEEN -1 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 0", + "SELECT * FROM t WHERE a BETWEEN 0 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 1", + "SELECT * FROM t WHERE a BETWEEN 1 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 2", + "SELECT * FROM t WHERE a BETWEEN 2 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 3", + "SELECT * FROM t WHERE a BETWEEN 3 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 4", + "SELECT * FROM t WHERE a BETWEEN 4 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 5", + "SELECT * FROM t WHERE a BETWEEN 5 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 6", + "SELECT * FROM t WHERE a BETWEEN 6 AND 4", + "SELECT * FROM t WHERE a BETWEEN 2 AND 7", + "SELECT * FROM t WHERE a BETWEEN 7 AND 4" + ] } ] diff --git a/executor/testdata/executor_suite_out.json b/executor/testdata/executor_suite_out.json index caa5c4f948966..bd5fbdb486ac0 100644 --- a/executor/testdata/executor_suite_out.json +++ b/executor/testdata/executor_suite_out.json @@ -1395,5 +1395,1172 @@ ] } ] + }, + { + "Name": "TestRangePartitionBoundariesBetweenM", + "Cases": [ + { + "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "ANALYZE TABLE t", + "Plan": null, + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649", + "Plan": [ + "p0" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646", + "Plan": [ + "p0" + ], + "Res": [ + "-2147483648 MIN_INT filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND -1", + "Plan": [ + "p0" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 0", + "Plan": [ + "p0" + ], + "Res": [ + "0 0 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1", + "Plan": [ + "p0" + ], + "Res": [ + "0 0 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 2", + "Plan": [ + "p0" + ], + "Res": [ + "0 0 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 10", + "Plan": [ + "p0" + ], + "Res": [ + "0 0 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 999998", + "Plan": [ + "p0" + ], + "Res": [ + "0 0 Filler...", + "999998 999998 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 999999", + "Plan": [ + "p0" + ], + "Res": [ + "0 0 Filler...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000000", + "Plan": [ + "p0 p1" + ], + "Res": [ + "0 0 Filler...", + "1000000 1000000 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000001", + "Plan": [ + "p0 p1" + ], + "Res": [ + "0 0 Filler...", + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000002", + "Plan": [ + "p0 p1" + ], + "Res": [ + "0 0 Filler...", + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999997", + "Plan": [ + "p0" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999998", + "Plan": [ + "p0" + ], + "Res": [ + "999998 999998 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999999", + "Plan": [ + "p0" + ], + "Res": [ + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000", + "Plan": [ + "all" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "999998 999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 999998", + "Plan": [ + "p0" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 999999", + "Plan": [ + "p0" + ], + "Res": [ + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999", + "Plan": [ + "p0 p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000", + "Plan": [ + "all" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001", + "Plan": [ + "all" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "999999 999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000", + "Plan": [ + "p1" + ], + "Res": [ + "1000000 1000000 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001", + "Plan": [ + "p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002", + "Plan": [ + "p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010", + "Plan": [ + "p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998", + "Plan": [ + "p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999", + "Plan": [ + "p1" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000000 1000000 Filler ...", + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "2000002 2000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000", + "Plan": [ + "p1" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001", + "Plan": [ + "p1" + ], + "Res": [ + "1000001 1000001 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002", + "Plan": [ + "p1" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003", + "Plan": [ + "p1" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011", + "Plan": [ + "p1" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999", + "Plan": [ + "p1" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "2000002 2000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000001 1000001 Filler ...", + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "2000002 2000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001", + "Plan": [ + "p1" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002", + "Plan": [ + "p1" + ], + "Res": [ + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003", + "Plan": [ + "p1" + ], + "Res": [ + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004", + "Plan": [ + "p1" + ], + "Res": [ + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012", + "Plan": [ + "p1" + ], + "Res": [ + "1000002 1000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "2000002 2000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "2000002 2000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004", + "Plan": [ + "p1 p2" + ], + "Res": [ + "1000002 1000002 Filler ...", + "1999998 1999998 Filler ...", + "1999999 1999999 Filler ...", + "2000000 2000000 Filler ...", + "2000001 2000001 Filler ...", + "2000002 2000002 Filler ..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003", + "Plan": [ + "dual" + ], + "Res": null + } + ] + }, + { + "Name": "TestRangePartitionBoundariesBetweenS", + "Cases": [ + { + "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", + "Plan": null, + "Res": null + }, + { + "SQL": "ANALYZE TABLE t", + "Plan": null, + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND -1", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN -1 AND 4", + "Plan": [ + "p0 p1 p2 p3 p4" + ], + "Res": [ + "0 0 Filler...", + "1 1 Filler...", + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 0", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 4", + "Plan": [ + "p0 p1 p2 p3 p4" + ], + "Res": [ + "0 0 Filler...", + "1 1 Filler...", + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 1", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 1 AND 4", + "Plan": [ + "p1 p2 p3 p4" + ], + "Res": [ + "1 1 Filler...", + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 2", + "Plan": [ + "p2" + ], + "Res": [ + "2 2 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 4", + "Plan": [ + "p2 p3 p4" + ], + "Res": [ + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 3", + "Plan": [ + "p2 p3" + ], + "Res": [ + "2 2 Filler...", + "3 3 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 3 AND 4", + "Plan": [ + "p3 p4" + ], + "Res": [ + "3 3 Filler...", + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 4", + "Plan": [ + "p2 p3 p4" + ], + "Res": [ + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 4 AND 4", + "Plan": [ + "p4" + ], + "Res": [ + "4 4 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 5", + "Plan": [ + "p2 p3 p4 p5" + ], + "Res": [ + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler...", + "5 5 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 5 AND 4", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 6", + "Plan": [ + "p2 p3 p4 p5 p6" + ], + "Res": [ + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler...", + "5 5 Filler...", + "6 6 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 6 AND 4", + "Plan": [ + "dual" + ], + "Res": null + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 7", + "Plan": [ + "p2 p3 p4 p5 p6" + ], + "Res": [ + "2 2 Filler...", + "3 3 Filler...", + "4 4 Filler...", + "5 5 Filler...", + "6 6 Filler..." + ] + }, + { + "SQL": "SELECT * FROM t WHERE a BETWEEN 7 AND 4", + "Plan": [ + "dual" + ], + "Res": null + } + ] } ] diff --git a/infoschema/cluster.go b/infoschema/cluster.go index 2d196fe5b0023..20589ad7a0c67 100644 --- a/infoschema/cluster.go +++ b/infoschema/cluster.go @@ -39,6 +39,8 @@ const ( ClusterTableStatementsSummaryHistory = "CLUSTER_STATEMENTS_SUMMARY_HISTORY" // ClusterTableTiDBTrx is the string constant of cluster transaction running table. ClusterTableTiDBTrx = "CLUSTER_TIDB_TRX" + // ClusterTableDeadlocks is the string constant of cluster dead lock table. + ClusterTableDeadlocks = "CLUSTER_DEADLOCKS" ) // memTableToClusterTables means add memory table to cluster table. @@ -48,6 +50,7 @@ var memTableToClusterTables = map[string]string{ TableStatementsSummary: ClusterTableStatementsSummary, TableStatementsSummaryHistory: ClusterTableStatementsSummaryHistory, TableTiDBTrx: ClusterTableTiDBTrx, + TableDeadlocks: ClusterTableDeadlocks, } func init() { diff --git a/infoschema/tables.go b/infoschema/tables.go index 2d5112ada05c0..40451046fe8ec 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -165,6 +165,8 @@ const ( TableClientErrorsSummaryByHost = "CLIENT_ERRORS_SUMMARY_BY_HOST" // TableTiDBTrx is current running transaction status table. TableTiDBTrx = "TIDB_TRX" + // TableDeadlocks is the string constatnt of deadlock table. + TableDeadlocks = "DEADLOCKS" ) var tableIDMap = map[string]int64{ @@ -239,6 +241,8 @@ var tableIDMap = map[string]int64{ TableClientErrorsSummaryByHost: autoid.InformationSchemaDBID + 69, TableTiDBTrx: autoid.InformationSchemaDBID + 70, ClusterTableTiDBTrx: autoid.InformationSchemaDBID + 71, + TableDeadlocks: autoid.InformationSchemaDBID + 72, + ClusterTableDeadlocks: autoid.InformationSchemaDBID + 73, } type columnInfo struct { @@ -1353,6 +1357,17 @@ var tableTiDBTrxCols = []columnInfo{ {name: "DB", tp: mysql.TypeVarchar, size: 64, comment: "The schema this transaction works on"}, } +var tableDeadlocksCols = []columnInfo{ + {name: "DEADLOCK_ID", tp: mysql.TypeLonglong, size: 21, flag: mysql.NotNullFlag, comment: "The ID to dinstinguish different deadlock events"}, + {name: "OCCUR_TIME", tp: mysql.TypeTimestamp, decimal: 6, size: 26, comment: "The physical time when the deadlock occurs"}, + {name: "RETRYABLE", tp: mysql.TypeTiny, size: 1, flag: mysql.NotNullFlag, comment: "Whether the deadlock is retryable. Retryable deadlocks are usually not reported to the client"}, + {name: "TRY_LOCK_TRX_ID", tp: mysql.TypeLonglong, size: 21, flag: mysql.NotNullFlag, comment: "The transaction ID (start ts) of the transaction that's trying to acquire the lock"}, + {name: "CURRENT_SQL_DIGEST", tp: mysql.TypeVarchar, size: 64, comment: "The digest of the SQL that's being blocked"}, + {name: "KEY", tp: mysql.TypeBlob, size: types.UnspecifiedLength, comment: "The key on which a transaction is waiting for another"}, + {name: "ALL_SQLS", tp: mysql.TypeBlob, size: types.UnspecifiedLength, comment: "A list of the digests of SQL statements that the transaction has executed"}, + {name: "TRX_HOLDING_LOCK", tp: mysql.TypeLonglong, size: 21, flag: mysql.NotNullFlag, comment: "The transaction ID (start ts) of the transaction that's currently holding the lock"}, +} + // GetShardingInfo returns a nil or description string for the sharding information of given TableInfo. // The returned description string may be: // - "NOT_SHARDED": for tables that SHARD_ROW_ID_BITS is not specified. @@ -1723,6 +1738,7 @@ var tableNameToColumns = map[string][]columnInfo{ TableClientErrorsSummaryByUser: tableClientErrorsSummaryByUserCols, TableClientErrorsSummaryByHost: tableClientErrorsSummaryByHostCols, TableTiDBTrx: tableTiDBTrxCols, + TableDeadlocks: tableDeadlocksCols, } func createInfoSchemaTable(_ autoid.Allocators, meta *model.TableInfo) (table.Table, error) { diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 1e5687928f3ad..2d6506b56d5f4 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1531,3 +1531,24 @@ func (s *testTableSuite) TestTrx(c *C) { testkit.Rows("424768545227014155 2021-05-07 12:56:48 " + digest + " Normal 1 19 2 root test"), ) } + +func (s *testTableSuite) TestInfoschemaDeadlockPrivilege(c *C) { + tk := s.newTestKitWithRoot(c) + tk.MustExec("create user 'testuser'@'localhost'") + c.Assert(tk.Se.Auth(&auth.UserIdentity{ + Username: "testuser", + Hostname: "localhost", + }, nil, nil), IsTrue) + err := tk.QueryToErr("select * from information_schema.deadlocks") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + tk = s.newTestKitWithRoot(c) + tk.MustExec("create user 'testuser2'@'localhost'") + tk.MustExec("grant process on *.* to 'testuser2'@'localhost'") + c.Assert(tk.Se.Auth(&auth.UserIdentity{ + Username: "testuser2", + Hostname: "localhost", + }, nil, nil), IsTrue) + _ = tk.MustQuery("select * from information_schema.deadlocks") +} diff --git a/planner/core/errors.go b/planner/core/errors.go index c713aab6367c1..66d7c17e8a7a7 100644 --- a/planner/core/errors.go +++ b/planner/core/errors.go @@ -20,74 +20,79 @@ import ( // error definitions. var ( - ErrUnsupportedType = dbterror.ClassOptimizer.NewStd(mysql.ErrUnsupportedType) - ErrAnalyzeMissIndex = dbterror.ClassOptimizer.NewStd(mysql.ErrAnalyzeMissIndex) - ErrWrongParamCount = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongParamCount) - ErrSchemaChanged = dbterror.ClassOptimizer.NewStd(mysql.ErrSchemaChanged) - ErrTablenameNotAllowedHere = dbterror.ClassOptimizer.NewStd(mysql.ErrTablenameNotAllowedHere) - ErrNotSupportedYet = dbterror.ClassOptimizer.NewStd(mysql.ErrNotSupportedYet) - ErrWrongUsage = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongUsage) - ErrUnknown = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknown) - ErrUnknownTable = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknownTable) - ErrNoSuchTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNoSuchTable) - ErrViewRecursive = dbterror.ClassOptimizer.NewStd(mysql.ErrViewRecursive) - ErrWrongArguments = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongArguments) - ErrWrongNumberOfColumnsInSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongNumberOfColumnsInSelect) - ErrBadGeneratedColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrBadGeneratedColumn) - ErrFieldNotInGroupBy = dbterror.ClassOptimizer.NewStd(mysql.ErrFieldNotInGroupBy) - ErrAggregateOrderNonAggQuery = dbterror.ClassOptimizer.NewStd(mysql.ErrAggregateOrderNonAggQuery) - ErrFieldInOrderNotSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrFieldInOrderNotSelect) - ErrAggregateInOrderNotSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrAggregateInOrderNotSelect) - ErrBadTable = dbterror.ClassOptimizer.NewStd(mysql.ErrBadTable) - ErrKeyDoesNotExist = dbterror.ClassOptimizer.NewStd(mysql.ErrKeyDoesNotExist) - ErrOperandColumns = dbterror.ClassOptimizer.NewStd(mysql.ErrOperandColumns) - ErrInvalidGroupFuncUse = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidGroupFuncUse) - ErrIllegalReference = dbterror.ClassOptimizer.NewStd(mysql.ErrIllegalReference) - ErrNoDB = dbterror.ClassOptimizer.NewStd(mysql.ErrNoDB) - ErrUnknownExplainFormat = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknownExplainFormat) - ErrWrongGroupField = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongGroupField) - ErrDupFieldName = dbterror.ClassOptimizer.NewStd(mysql.ErrDupFieldName) - ErrNonUpdatableTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNonUpdatableTable) - ErrMultiUpdateKeyConflict = dbterror.ClassOptimizer.NewStd(mysql.ErrMultiUpdateKeyConflict) - ErrInternal = dbterror.ClassOptimizer.NewStd(mysql.ErrInternal) - ErrNonUniqTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNonuniqTable) - ErrWindowInvalidWindowFuncUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowInvalidWindowFuncUse) - ErrWindowInvalidWindowFuncAliasUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowInvalidWindowFuncAliasUse) - ErrWindowNoSuchWindow = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoSuchWindow) - ErrWindowCircularityInWindowGraph = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowCircularityInWindowGraph) - ErrWindowNoChildPartitioning = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoChildPartitioning) - ErrWindowNoInherentFrame = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoInherentFrame) - ErrWindowNoRedefineOrderBy = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoRedefineOrderBy) - ErrWindowDuplicateName = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowDuplicateName) - ErrPartitionClauseOnNonpartitioned = dbterror.ClassOptimizer.NewStd(mysql.ErrPartitionClauseOnNonpartitioned) - ErrWindowFrameStartIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameStartIllegal) - ErrWindowFrameEndIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameEndIllegal) - ErrWindowFrameIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameIllegal) - ErrWindowRangeFrameOrderType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameOrderType) - ErrWindowRangeFrameTemporalType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameTemporalType) - ErrWindowRangeFrameNumericType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameNumericType) - ErrWindowRangeBoundNotConstant = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeBoundNotConstant) - ErrWindowRowsIntervalUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRowsIntervalUse) - ErrWindowFunctionIgnoresFrame = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFunctionIgnoresFrame) - ErrUnsupportedOnGeneratedColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrUnsupportedOnGeneratedColumn) - ErrPrivilegeCheckFail = dbterror.ClassOptimizer.NewStd(mysql.ErrPrivilegeCheckFail) - ErrInvalidWildCard = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidWildCard) - ErrMixOfGroupFuncAndFields = dbterror.ClassOptimizer.NewStd(mysql.ErrMixOfGroupFuncAndFieldsIncompatible) - errTooBigPrecision = dbterror.ClassExpression.NewStd(mysql.ErrTooBigPrecision) - ErrDBaccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrDBaccessDenied) - ErrTableaccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrTableaccessDenied) - ErrSpecificAccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrSpecificAccessDenied) - ErrViewNoExplain = dbterror.ClassOptimizer.NewStd(mysql.ErrViewNoExplain) - ErrWrongValueCountOnRow = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongValueCountOnRow) - ErrViewInvalid = dbterror.ClassOptimizer.NewStd(mysql.ErrViewInvalid) - ErrNoSuchThread = dbterror.ClassOptimizer.NewStd(mysql.ErrNoSuchThread) - ErrUnknownColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrBadField) - ErrCartesianProductUnsupported = dbterror.ClassOptimizer.NewStd(mysql.ErrCartesianProductUnsupported) - ErrStmtNotFound = dbterror.ClassOptimizer.NewStd(mysql.ErrPreparedStmtNotFound) - ErrAmbiguous = dbterror.ClassOptimizer.NewStd(mysql.ErrNonUniq) - ErrUnresolvedHintName = dbterror.ClassOptimizer.NewStd(mysql.ErrUnresolvedHintName) - ErrNotHintUpdatable = dbterror.ClassOptimizer.NewStd(mysql.ErrNotHintUpdatable) - ErrWarnConflictingHint = dbterror.ClassOptimizer.NewStd(mysql.ErrWarnConflictingHint) + ErrUnsupportedType = dbterror.ClassOptimizer.NewStd(mysql.ErrUnsupportedType) + ErrAnalyzeMissIndex = dbterror.ClassOptimizer.NewStd(mysql.ErrAnalyzeMissIndex) + ErrWrongParamCount = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongParamCount) + ErrSchemaChanged = dbterror.ClassOptimizer.NewStd(mysql.ErrSchemaChanged) + ErrTablenameNotAllowedHere = dbterror.ClassOptimizer.NewStd(mysql.ErrTablenameNotAllowedHere) + ErrNotSupportedYet = dbterror.ClassOptimizer.NewStd(mysql.ErrNotSupportedYet) + ErrWrongUsage = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongUsage) + ErrUnknown = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknown) + ErrUnknownTable = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknownTable) + ErrNoSuchTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNoSuchTable) + ErrViewRecursive = dbterror.ClassOptimizer.NewStd(mysql.ErrViewRecursive) + ErrWrongArguments = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongArguments) + ErrWrongNumberOfColumnsInSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongNumberOfColumnsInSelect) + ErrBadGeneratedColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrBadGeneratedColumn) + ErrFieldNotInGroupBy = dbterror.ClassOptimizer.NewStd(mysql.ErrFieldNotInGroupBy) + ErrAggregateOrderNonAggQuery = dbterror.ClassOptimizer.NewStd(mysql.ErrAggregateOrderNonAggQuery) + ErrFieldInOrderNotSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrFieldInOrderNotSelect) + ErrAggregateInOrderNotSelect = dbterror.ClassOptimizer.NewStd(mysql.ErrAggregateInOrderNotSelect) + ErrBadTable = dbterror.ClassOptimizer.NewStd(mysql.ErrBadTable) + ErrKeyDoesNotExist = dbterror.ClassOptimizer.NewStd(mysql.ErrKeyDoesNotExist) + ErrOperandColumns = dbterror.ClassOptimizer.NewStd(mysql.ErrOperandColumns) + ErrInvalidGroupFuncUse = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidGroupFuncUse) + ErrIllegalReference = dbterror.ClassOptimizer.NewStd(mysql.ErrIllegalReference) + ErrNoDB = dbterror.ClassOptimizer.NewStd(mysql.ErrNoDB) + ErrUnknownExplainFormat = dbterror.ClassOptimizer.NewStd(mysql.ErrUnknownExplainFormat) + ErrWrongGroupField = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongGroupField) + ErrDupFieldName = dbterror.ClassOptimizer.NewStd(mysql.ErrDupFieldName) + ErrNonUpdatableTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNonUpdatableTable) + ErrMultiUpdateKeyConflict = dbterror.ClassOptimizer.NewStd(mysql.ErrMultiUpdateKeyConflict) + ErrInternal = dbterror.ClassOptimizer.NewStd(mysql.ErrInternal) + ErrNonUniqTable = dbterror.ClassOptimizer.NewStd(mysql.ErrNonuniqTable) + ErrWindowInvalidWindowFuncUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowInvalidWindowFuncUse) + ErrWindowInvalidWindowFuncAliasUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowInvalidWindowFuncAliasUse) + ErrWindowNoSuchWindow = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoSuchWindow) + ErrWindowCircularityInWindowGraph = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowCircularityInWindowGraph) + ErrWindowNoChildPartitioning = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoChildPartitioning) + ErrWindowNoInherentFrame = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoInherentFrame) + ErrWindowNoRedefineOrderBy = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowNoRedefineOrderBy) + ErrWindowDuplicateName = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowDuplicateName) + ErrPartitionClauseOnNonpartitioned = dbterror.ClassOptimizer.NewStd(mysql.ErrPartitionClauseOnNonpartitioned) + ErrWindowFrameStartIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameStartIllegal) + ErrWindowFrameEndIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameEndIllegal) + ErrWindowFrameIllegal = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFrameIllegal) + ErrWindowRangeFrameOrderType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameOrderType) + ErrWindowRangeFrameTemporalType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameTemporalType) + ErrWindowRangeFrameNumericType = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeFrameNumericType) + ErrWindowRangeBoundNotConstant = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRangeBoundNotConstant) + ErrWindowRowsIntervalUse = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowRowsIntervalUse) + ErrWindowFunctionIgnoresFrame = dbterror.ClassOptimizer.NewStd(mysql.ErrWindowFunctionIgnoresFrame) + ErrUnsupportedOnGeneratedColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrUnsupportedOnGeneratedColumn) + ErrPrivilegeCheckFail = dbterror.ClassOptimizer.NewStd(mysql.ErrPrivilegeCheckFail) + ErrInvalidWildCard = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidWildCard) + ErrMixOfGroupFuncAndFields = dbterror.ClassOptimizer.NewStd(mysql.ErrMixOfGroupFuncAndFieldsIncompatible) + errTooBigPrecision = dbterror.ClassExpression.NewStd(mysql.ErrTooBigPrecision) + ErrDBaccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrDBaccessDenied) + ErrTableaccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrTableaccessDenied) + ErrSpecificAccessDenied = dbterror.ClassOptimizer.NewStd(mysql.ErrSpecificAccessDenied) + ErrViewNoExplain = dbterror.ClassOptimizer.NewStd(mysql.ErrViewNoExplain) + ErrWrongValueCountOnRow = dbterror.ClassOptimizer.NewStd(mysql.ErrWrongValueCountOnRow) + ErrViewInvalid = dbterror.ClassOptimizer.NewStd(mysql.ErrViewInvalid) + ErrNoSuchThread = dbterror.ClassOptimizer.NewStd(mysql.ErrNoSuchThread) + ErrUnknownColumn = dbterror.ClassOptimizer.NewStd(mysql.ErrBadField) + ErrCartesianProductUnsupported = dbterror.ClassOptimizer.NewStd(mysql.ErrCartesianProductUnsupported) + ErrStmtNotFound = dbterror.ClassOptimizer.NewStd(mysql.ErrPreparedStmtNotFound) + ErrAmbiguous = dbterror.ClassOptimizer.NewStd(mysql.ErrNonUniq) + ErrUnresolvedHintName = dbterror.ClassOptimizer.NewStd(mysql.ErrUnresolvedHintName) + ErrNotHintUpdatable = dbterror.ClassOptimizer.NewStd(mysql.ErrNotHintUpdatable) + ErrWarnConflictingHint = dbterror.ClassOptimizer.NewStd(mysql.ErrWarnConflictingHint) + ErrCTERecursiveRequiresUnion = dbterror.ClassOptimizer.NewStd(mysql.ErrCTERecursiveRequiresUnion) + ErrCTERecursiveRequiresNonRecursiveFirst = dbterror.ClassOptimizer.NewStd(mysql.ErrCTERecursiveRequiresNonRecursiveFirst) + ErrCTERecursiveForbidsAggregation = dbterror.ClassOptimizer.NewStd(mysql.ErrCTERecursiveForbidsAggregation) + ErrCTERecursiveForbiddenJoinOrder = dbterror.ClassOptimizer.NewStd(mysql.ErrCTERecursiveForbiddenJoinOrder) + ErrInvalidRequiresSingleReference = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidRequiresSingleReference) // Since we cannot know if user logged in with a password, use message of ErrAccessDeniedNoPassword instead ErrAccessDenied = dbterror.ClassOptimizer.NewStdErr(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword]) ErrBadNull = dbterror.ClassOptimizer.NewStd(mysql.ErrBadNull) diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index 921f1c99b34ec..56652983ff8f8 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -1296,6 +1296,12 @@ func (s *testPlanSuite) TestVisitInfo(c *C) { {mysql.ExtendedPriv, "", "", "", ErrSpecificAccessDenied, false, "BACKUP_ADMIN", true}, }, }, + { + sql: "RENAME USER user1 to user1_tmp", + ans: []visitInfo{ + {mysql.CreateUserPriv, "", "", "", ErrSpecificAccessDenied, false, "", false}, + }, + }, } for _, tt := range tests { diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 7dc2459dace33..ccab0a28cc863 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -641,7 +641,8 @@ func (b *PlanBuilder) Build(ctx context.Context, node ast.Node) (Plan, error) { case *ast.BinlogStmt, *ast.FlushStmt, *ast.UseStmt, *ast.BRIEStmt, *ast.BeginStmt, *ast.CommitStmt, *ast.RollbackStmt, *ast.CreateUserStmt, *ast.SetPwdStmt, *ast.AlterInstanceStmt, *ast.GrantStmt, *ast.DropUserStmt, *ast.AlterUserStmt, *ast.RevokeStmt, *ast.KillStmt, *ast.DropStatsStmt, - *ast.GrantRoleStmt, *ast.RevokeRoleStmt, *ast.SetRoleStmt, *ast.SetDefaultRoleStmt, *ast.ShutdownStmt: + *ast.GrantRoleStmt, *ast.RevokeRoleStmt, *ast.SetRoleStmt, *ast.SetDefaultRoleStmt, *ast.ShutdownStmt, + *ast.RenameUserStmt: return b.buildSimple(node.(ast.StmtNode)) case ast.DDLNode: return b.buildDDL(ctx, x) @@ -2268,7 +2269,7 @@ func (b *PlanBuilder) buildSimple(node ast.StmtNode) (Plan, error) { case *ast.AlterInstanceStmt: err := ErrSpecificAccessDenied.GenWithStack("SUPER") b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", err) - case *ast.AlterUserStmt: + case *ast.AlterUserStmt, *ast.RenameUserStmt: err := ErrSpecificAccessDenied.GenWithStackByArgs("CREATE USER") b.visitInfo = appendVisitInfo(b.visitInfo, mysql.CreateUserPriv, "", "", "", err) case *ast.GrantStmt: diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index 2f6cbef8af2cf..3917d822aa403 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -1400,6 +1400,54 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeStatusVars(c *C) { }, nil, nil) } +func (s *testPrivilegeSuite) TestRenameUser(c *C) { + rootSe := newSession(c, s.store, s.dbName) + mustExec(c, rootSe, "DROP USER IF EXISTS 'ru1'@'localhost'") + mustExec(c, rootSe, "DROP USER IF EXISTS ru3") + mustExec(c, rootSe, "DROP USER IF EXISTS ru6@localhost") + mustExec(c, rootSe, "CREATE USER 'ru1'@'localhost'") + mustExec(c, rootSe, "CREATE USER ru3") + mustExec(c, rootSe, "CREATE USER ru6@localhost") + se1 := newSession(c, s.store, s.dbName) + c.Assert(se1.Auth(&auth.UserIdentity{Username: "ru1", Hostname: "localhost"}, nil, nil), IsTrue) + + // Check privileges (need CREATE USER) + _, err := se1.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru4") + c.Assert(err, ErrorMatches, ".*Access denied; you need .at least one of. the CREATE USER privilege.s. for this operation") + mustExec(c, rootSe, "GRANT UPDATE ON mysql.user TO 'ru1'@'localhost'") + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru4") + c.Assert(err, ErrorMatches, ".*Access denied; you need .at least one of. the CREATE USER privilege.s. for this operation") + mustExec(c, rootSe, "GRANT CREATE USER ON *.* TO 'ru1'@'localhost'") + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru4") + c.Assert(err, IsNil) + + // Test a few single rename (both Username and Hostname) + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER 'ru4'@'%' TO 'ru3'@'localhost'") + c.Assert(err, IsNil) + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER 'ru3'@'localhost' TO 'ru3'@'%'") + c.Assert(err, IsNil) + // Including negative tests, i.e. non existing from user and existing to user + _, err = rootSe.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru1@localhost") + c.Assert(err, ErrorMatches, ".*Operation RENAME USER failed for ru3@%.*") + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER ru4 TO ru5@localhost") + c.Assert(err, ErrorMatches, ".*Operation RENAME USER failed for ru4@%.*") + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru3") + c.Assert(err, ErrorMatches, ".*Operation RENAME USER failed for ru3@%.*") + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru5@localhost, ru4 TO ru7") + c.Assert(err, ErrorMatches, ".*Operation RENAME USER failed for ru4@%.*") + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER ru3 TO ru5@localhost, ru6@localhost TO ru1@localhost") + c.Assert(err, ErrorMatches, ".*Operation RENAME USER failed for ru6@localhost.*") + + // Test multi rename, this is a full swap of ru3 and ru6, i.e. need to read its previous state in the same transaction. + _, err = se1.ExecuteInternal(context.Background(), "RENAME USER 'ru3' TO 'ru3_tmp', ru6@localhost TO ru3, 'ru3_tmp' to ru6@localhost") + c.Assert(err, IsNil) + + // Cleanup + mustExec(c, rootSe, "DROP USER ru6@localhost") + mustExec(c, rootSe, "DROP USER ru3") + mustExec(c, rootSe, "DROP USER 'ru1'@'localhost'") +} + func (s *testPrivilegeSuite) TestSecurityEnhancedModeSysVars(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("CREATE USER svroot1, svroot2") diff --git a/session/pessimistic_test.go b/session/pessimistic_test.go index 2e8c01c75577b..58cf8a624fe54 100644 --- a/session/pessimistic_test.go +++ b/session/pessimistic_test.go @@ -16,6 +16,7 @@ package session_test import ( "context" "fmt" + "strconv" "strings" "sync" "sync/atomic" @@ -24,6 +25,7 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/parser" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" @@ -37,6 +39,7 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/deadlockhistory" "github.com/pingcap/tidb/util/testkit" ) @@ -171,27 +174,33 @@ func (s *testPessimisticSuite) TestTxnMode(c *C) { } func (s *testPessimisticSuite) TestDeadlock(c *C) { - tk := testkit.NewTestKitWithInit(c, s.store) - tk.MustExec("drop table if exists deadlock") - tk.MustExec("create table deadlock (k int primary key, v int)") - tk.MustExec("insert into deadlock values (1, 1), (2, 1)") + deadlockhistory.GlobalDeadlockHistory.Clear() + + tk1 := testkit.NewTestKitWithInit(c, s.store) + tk1.MustExec("drop table if exists deadlock") + tk1.MustExec("create table deadlock (k int primary key, v int)") + tk1.MustExec("insert into deadlock values (1, 1), (2, 1)") + tk1.MustExec("begin pessimistic") + tk1.MustExec("update deadlock set v = v + 1 where k = 1") + ts1, err := strconv.ParseUint(tk1.MustQuery("select @@tidb_current_ts").Rows()[0][0].(string), 10, 64) + c.Assert(err, IsNil) + + tk2 := testkit.NewTestKitWithInit(c, s.store) + tk2.MustExec("begin pessimistic") + ts2, err := strconv.ParseUint(tk2.MustQuery("select @@tidb_current_ts").Rows()[0][0].(string), 10, 64) + c.Assert(err, IsNil) syncCh := make(chan error) go func() { - tk1 := testkit.NewTestKitWithInit(c, s.store) - tk1.MustExec("begin pessimistic") - tk1.MustExec("update deadlock set v = v + 1 where k = 2") + tk2.MustExec("update deadlock set v = v + 1 where k = 2") syncCh <- nil - _, err := tk1.Exec("update deadlock set v = v + 1 where k = 1") + _, err := tk2.Exec("update deadlock set v = v + 1 where k = 1") syncCh <- err }() - tk.MustExec("begin pessimistic") - tk.MustExec("update deadlock set v = v + 1 where k = 1") <-syncCh - _, err1 := tk.Exec("update deadlock set v = v + 1 where k = 2") + _, err1 := tk1.Exec("update deadlock set v = v + 1 where k = 2") err2 := <-syncCh // Either err1 or err2 is deadlock error. - var err error if err1 != nil { c.Assert(err2, IsNil) err = err1 @@ -201,6 +210,21 @@ func (s *testPessimisticSuite) TestDeadlock(c *C) { e, ok := errors.Cause(err).(*terror.Error) c.Assert(ok, IsTrue) c.Assert(int(e.Code()), Equals, mysql.ErrLockDeadlock) + + _, digest := parser.NormalizeDigest("update deadlock set v = v + 1 where k = 1") + + expectedDeadlockInfo := []string{ + fmt.Sprintf("%v %v %v", ts1, ts2, digest), + fmt.Sprintf("%v %v %v", ts2, ts1, digest), + } + // The last one is the transaction that encountered the deadlock error. + if err1 != nil { + // Swap the two to match the correct order. + expectedDeadlockInfo[0], expectedDeadlockInfo[1] = expectedDeadlockInfo[1], expectedDeadlockInfo[0] + } + res := tk1.MustQuery("select deadlock_id, try_lock_trx_id, trx_holding_lock, current_sql_digest from information_schema.deadlocks") + res.CheckAt([]int{1, 2, 3}, testkit.Rows(expectedDeadlockInfo...)) + c.Assert(res.Rows()[0][0], Equals, res.Rows()[1][0]) } func (s *testPessimisticSuite) TestSingleStatementRollback(c *C) { diff --git a/session/session.go b/session/session.go index 902f35ca28e79..f116daf96dd04 100644 --- a/session/session.go +++ b/session/session.go @@ -2862,7 +2862,8 @@ func logStmt(execStmt *executor.ExecStmt, vars *variable.SessionVars) { switch stmt := execStmt.StmtNode.(type) { case *ast.CreateUserStmt, *ast.DropUserStmt, *ast.AlterUserStmt, *ast.SetPwdStmt, *ast.GrantStmt, *ast.RevokeStmt, *ast.AlterTableStmt, *ast.CreateDatabaseStmt, *ast.CreateIndexStmt, *ast.CreateTableStmt, - *ast.DropDatabaseStmt, *ast.DropIndexStmt, *ast.DropTableStmt, *ast.RenameTableStmt, *ast.TruncateTableStmt: + *ast.DropDatabaseStmt, *ast.DropIndexStmt, *ast.DropTableStmt, *ast.RenameTableStmt, *ast.TruncateTableStmt, + *ast.RenameUserStmt: user := vars.User schemaVersion := vars.GetInfoSchema().SchemaMetaVersion() if ss, ok := execStmt.StmtNode.(ast.SensitiveStmtNode); ok { diff --git a/session/session_fail_test.go b/session/session_fail_test.go index 12f49e0ed1abf..3488592051b9f 100644 --- a/session/session_fail_test.go +++ b/session/session_fail_test.go @@ -15,11 +15,11 @@ package session_test import ( "context" - "sync/atomic" + "strings" . "github.com/pingcap/check" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/util/testkit" ) @@ -79,20 +79,19 @@ func (s *testSessionSerialSuite) TestGetTSFailDirtyStateInretry(c *C) { func (s *testSessionSerialSuite) TestKillFlagInBackoff(c *C) { // This test checks the `killed` flag is passed down to the backoffer through - // session.KVVars. It works by setting the `killed = 3` first, then using - // failpoint to run backoff() and check the vars.Killed using the Hook() function. + // session.KVVars. tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("create table kill_backoff (id int)") - var killValue uint32 - tk.Se.GetSessionVars().KVVars.Hook = func(name string, vars *tikv.Variables) { - killValue = atomic.LoadUint32(vars.Killed) - } - c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/tikvStoreSendReqResult", `return("callBackofferHook")`), IsNil) + // Inject 1 time timeout. If `Killed` is not successfully passed, it will retry and complete query. + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/tikvStoreSendReqResult", `return("timeout")->return("")`), IsNil) defer failpoint.Disable("github.com/pingcap/tidb/store/tikv/tikvStoreSendReqResult") // Set kill flag and check its passed to backoffer. - tk.Se.GetSessionVars().Killed = 3 - tk.MustQuery("select * from kill_backoff") - c.Assert(killValue, Equals, uint32(3)) + tk.Se.GetSessionVars().Killed = 1 + rs, err := tk.Exec("select * from kill_backoff") + c.Assert(err, IsNil) + _, err = session.ResultSetToStringSlice(context.TODO(), tk.Se, rs) + // `interrupted` is returned when `Killed` is set. + c.Assert(strings.Contains(err.Error(), "Query execution was interrupted"), IsTrue) } func (s *testSessionSerialSuite) TestClusterTableSendError(c *C) { diff --git a/session/tidb.go b/session/tidb.go index 85732b457f7a6..583c5074e6805 100644 --- a/session/tidb.go +++ b/session/tidb.go @@ -239,7 +239,7 @@ func autoCommitAfterStmt(ctx context.Context, se *session, meetsErr error, sql s sessVars := se.sessionVars if meetsErr != nil { if !sessVars.InTxn() { - logutil.BgLogger().Info("rollbackTxn for ddl/autocommit failed") + logutil.BgLogger().Info("rollbackTxn called due to ddl/autocommit failure") se.RollbackTxn(ctx) recordAbortTxnDuration(sessVars) } else if se.txn.Valid() && se.txn.IsPessimistic() && executor.ErrDeadlock.Equal(meetsErr) { diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 99ff4a09f7af6..1df9e3c0f582c 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -491,12 +491,21 @@ func UnregisterSysVar(name string) { sysVarsLock.Unlock() } +// Clone deep copies the sysvar struct to avoid a race +func (sv *SysVar) Clone() *SysVar { + dst := *sv + return &dst +} + // GetSysVar returns sys var info for name as key. func GetSysVar(name string) *SysVar { name = strings.ToLower(name) sysVarsLock.RLock() defer sysVarsLock.RUnlock() - return sysVars[name] + if sysVars[name] == nil { + return nil + } + return sysVars[name].Clone() } // SetSysVar sets a sysvar. This will not propagate to the cluster, so it should only be @@ -514,7 +523,7 @@ func GetSysVars() map[string]*SysVar { defer sysVarsLock.RUnlock() copy := make(map[string]*SysVar, len(sysVars)) for name, sv := range sysVars { - copy[name] = sv + copy[name] = sv.Clone() } return copy } diff --git a/sessionctx/variable/sysvar_test.go b/sessionctx/variable/sysvar_test.go index 71979a57b7eef..d236dc142f1dd 100644 --- a/sessionctx/variable/sysvar_test.go +++ b/sessionctx/variable/sysvar_test.go @@ -555,3 +555,30 @@ func (*testSysVarSuite) TestInstanceScopedVars(c *C) { c.Assert(err, IsNil) c.Assert(val, Equals, vars.TxnScope.GetVarValue()) } + +// Calling GetSysVars/GetSysVar needs to return a deep copy, otherwise there will be data races. +// This is a bit unfortunate, since the only time the race occurs is in the testsuite (Enabling/Disabling SEM) and +// during startup (setting the .Value of ScopeNone variables). In future it might also be able +// to fix this by delaying the LoadSysVarCacheLoop start time until after the server is fully initialized. +func (*testSysVarSuite) TestDeepCopyGetSysVars(c *C) { + // Check GetSysVar + sv := SysVar{Scope: ScopeGlobal | ScopeSession, Name: "datarace", Value: On, Type: TypeBool} + RegisterSysVar(&sv) + svcopy := GetSysVar("datarace") + svcopy.Name = "datarace2" + c.Assert(sv.Name, Equals, "datarace") + c.Assert(GetSysVar("datarace").Name, Equals, "datarace") + UnregisterSysVar("datarace") + + // Check GetSysVars + sv = SysVar{Scope: ScopeGlobal | ScopeSession, Name: "datarace", Value: On, Type: TypeBool} + RegisterSysVar(&sv) + for name, svcopy := range GetSysVars() { + if name == "datarace" { + svcopy.Name = "datarace2" + } + } + c.Assert(sv.Name, Equals, "datarace") + c.Assert(GetSysVar("datarace").Name, Equals, "datarace") + UnregisterSysVar("datarace") +} diff --git a/statistics/handle/update_test.go b/statistics/handle/update_test.go index a1de28e78eeef..a8924021c5846 100644 --- a/statistics/handle/update_test.go +++ b/statistics/handle/update_test.go @@ -1453,7 +1453,7 @@ func (s *testStatsSuite) TestNeedAnalyzeTable(c *C) { }{ // table was never analyzed and has reach the limit { - tbl: &statistics.Table{Version: oracle.EncodeTSO(oracle.GetPhysical(time.Now()))}, + tbl: &statistics.Table{Version: oracle.GoTimeToTS(time.Now())}, limit: 0, ratio: 0, start: "00:00 +0800", @@ -1464,7 +1464,7 @@ func (s *testStatsSuite) TestNeedAnalyzeTable(c *C) { }, // table was never analyzed but has not reach the limit { - tbl: &statistics.Table{Version: oracle.EncodeTSO(oracle.GetPhysical(time.Now()))}, + tbl: &statistics.Table{Version: oracle.GoTimeToTS(time.Now())}, limit: time.Hour, ratio: 0, start: "00:00 +0800", diff --git a/store/gcworker/gc_worker_test.go b/store/gcworker/gc_worker_test.go index 39abe369f82fb..2beef12da62c0 100644 --- a/store/gcworker/gc_worker_test.go +++ b/store/gcworker/gc_worker_test.go @@ -259,10 +259,10 @@ func (s *testGCWorkerSuite) TestMinStartTS(c *C) { strconv.FormatUint(now, 10)) c.Assert(err, IsNil) err = spkv.Put(fmt.Sprintf("%s/%s", infosync.ServerMinStartTSPath, "b"), - strconv.FormatUint(now-oracle.EncodeTSO(20000), 10)) + strconv.FormatUint(now-oracle.ComposeTS(20000, 0), 10)) c.Assert(err, IsNil) - sp = s.gcWorker.calcSafePointByMinStartTS(ctx, now-oracle.EncodeTSO(10000)) - c.Assert(sp, Equals, now-oracle.EncodeTSO(20000)-1) + sp = s.gcWorker.calcSafePointByMinStartTS(ctx, now-oracle.ComposeTS(10000, 0)) + c.Assert(sp, Equals, now-oracle.ComposeTS(20000, 0)-1) } func (s *testGCWorkerSuite) TestPrepareGC(c *C) { diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 14609f5f77400..c01d97981dd09 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -1428,7 +1428,7 @@ func (c *twoPhaseCommitter) checkSchemaValid(ctx context.Context, checkTS uint64 func (c *twoPhaseCommitter) calculateMaxCommitTS(ctx context.Context) error { // Amend txn with current time first, then we can make sure we have another SafeWindow time to commit - currentTS := oracle.EncodeTSO(int64(time.Since(c.txn.startTime)/time.Millisecond)) + c.startTS + currentTS := oracle.ComposeTS(int64(time.Since(c.txn.startTime)/time.Millisecond), 0) + c.startTS _, _, err := c.checkSchemaValid(ctx, currentTS, c.txn.schemaVer, true) if err != nil { logutil.Logger(ctx).Info("Schema changed for async commit txn", @@ -1438,7 +1438,7 @@ func (c *twoPhaseCommitter) calculateMaxCommitTS(ctx context.Context) error { } safeWindow := config.GetGlobalConfig().TiKVClient.AsyncCommit.SafeWindow - maxCommitTS := oracle.EncodeTSO(int64(safeWindow/time.Millisecond)) + currentTS + maxCommitTS := oracle.ComposeTS(int64(safeWindow/time.Millisecond), 0) + currentTS logutil.BgLogger().Debug("calculate MaxCommitTS", zap.Time("startTime", c.txn.startTime), zap.Duration("safeWindow", safeWindow), diff --git a/store/tikv/kv/kv.go b/store/tikv/kv/kv.go index eea6fd796888b..0d900d6facddb 100644 --- a/store/tikv/kv/kv.go +++ b/store/tikv/kv/kv.go @@ -4,6 +4,7 @@ import ( "sync" "time" + tikverr "github.com/pingcap/tidb/store/tikv/error" "github.com/pingcap/tidb/store/tikv/util" ) @@ -28,6 +29,7 @@ type LockCtx struct { LockExpired *uint32 Stats *util.LockKeysDetails ResourceGroupTag []byte + OnDeadlock func(*tikverr.ErrDeadlock) } // InitReturnValues creates the map to store returned value. diff --git a/store/tikv/kv/variables.go b/store/tikv/kv/variables.go index b722023bcae08..5e7a4c83b669a 100644 --- a/store/tikv/kv/variables.go +++ b/store/tikv/kv/variables.go @@ -21,9 +21,6 @@ type Variables struct { // BackOffWeight specifies the weight of the max back off time duration. BackOffWeight int - // Hook is used for test to verify the variable take effect. - Hook func(name string, vars *Variables) - // Pointer to SessionVars.Killed // Killed is a flag to indicate that this query is killed. Killed *uint32 diff --git a/store/tikv/latch/latch_test.go b/store/tikv/latch/latch_test.go index 4b10c118883a6..ce53794d44f12 100644 --- a/store/tikv/latch/latch_test.go +++ b/store/tikv/latch/latch_test.go @@ -110,7 +110,7 @@ func (s *testLatchSuite) TestFirstAcquireFailedWithStale(c *C) { func (s *testLatchSuite) TestRecycle(c *C) { latches := NewLatches(8) now := time.Now() - startTS := oracle.ComposeTS(oracle.GetPhysical(now), 0) + startTS := oracle.GoTimeToTS(now) lock := latches.genLock(startTS, [][]byte{ []byte("a"), []byte("b"), }) @@ -142,7 +142,7 @@ func (s *testLatchSuite) TestRecycle(c *C) { } c.Assert(allEmpty, IsFalse) - currentTS := oracle.ComposeTS(oracle.GetPhysical(now.Add(expireDuration)), 3) + currentTS := oracle.GoTimeToTS(now.Add(expireDuration)) + 3 latches.recycle(currentTS) for i := 0; i < len(latches.slots); i++ { diff --git a/store/tikv/oracle/oracle.go b/store/tikv/oracle/oracle.go index 1b08129d412aa..fd95f1b357013 100644 --- a/store/tikv/oracle/oracle.go +++ b/store/tikv/oracle/oracle.go @@ -16,10 +16,6 @@ package oracle import ( "context" "time" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/store/tikv/logutil" - "go.uber.org/zap" ) // Option represents available options for the oracle.Oracle. @@ -53,19 +49,6 @@ const ( // ComposeTS creates a ts from physical and logical parts. func ComposeTS(physical, logical int64) uint64 { - failpoint.Inject("changeTSFromPD", func(val failpoint.Value) { - valInt, ok := val.(int) - if ok { - origPhyTS := physical - logical := logical - newPhyTs := origPhyTS + int64(valInt) - origTS := uint64((physical << physicalShiftBits) + logical) - newTS := uint64((newPhyTs << physicalShiftBits) + logical) - logutil.BgLogger().Warn("ComposeTS failpoint", zap.Uint64("origTS", origTS), - zap.Int("valInt", valInt), zap.Uint64("ts", newTS)) - failpoint.Return(newTS) - } - }) return uint64((physical << physicalShiftBits) + logical) } @@ -84,11 +67,6 @@ func GetPhysical(t time.Time) int64 { return t.UnixNano() / int64(time.Millisecond) } -// EncodeTSO encodes a millisecond into tso. -func EncodeTSO(ts int64) uint64 { - return uint64(ts) << physicalShiftBits -} - // GetTimeFromTS extracts time.Time from a timestamp. func GetTimeFromTS(ts uint64) time.Time { ms := ExtractPhysical(ts) diff --git a/store/tikv/oracle/oracles/local.go b/store/tikv/oracle/oracles/local.go index 4fcd7cbc51d78..8c7f8a30de645 100644 --- a/store/tikv/oracle/oracles/local.go +++ b/store/tikv/oracle/oracles/local.go @@ -42,7 +42,8 @@ func (l *localOracle) IsExpired(lockTS, TTL uint64, _ *oracle.Option) bool { if l.hook != nil { now = l.hook.currentTime } - return oracle.GetPhysical(now) >= oracle.ExtractPhysical(lockTS)+int64(TTL) + expire := oracle.GetTimeFromTS(lockTS).Add(time.Duration(TTL) * time.Millisecond) + return !now.Before(expire) } func (l *localOracle) GetTimestamp(ctx context.Context, _ *oracle.Option) (uint64, error) { @@ -52,8 +53,7 @@ func (l *localOracle) GetTimestamp(ctx context.Context, _ *oracle.Option) (uint6 if l.hook != nil { now = l.hook.currentTime } - physical := oracle.GetPhysical(now) - ts := oracle.ComposeTS(physical, 0) + ts := oracle.GoTimeToTS(now) if l.lastTimeStampTS == ts { l.n++ return ts + l.n, nil @@ -80,9 +80,7 @@ func (l *localOracle) GetLowResolutionTimestampAsync(ctx context.Context, opt *o // GetStaleTimestamp return physical func (l *localOracle) GetStaleTimestamp(ctx context.Context, txnScope string, prevSecond uint64) (ts uint64, err error) { - physical := oracle.GetPhysical(time.Now().Add(-time.Second * time.Duration(prevSecond))) - ts = oracle.ComposeTS(physical, 0) - return ts, nil + return oracle.GoTimeToTS(time.Now().Add(-time.Second * time.Duration(prevSecond))), nil } type future struct { diff --git a/store/tikv/oracle/oracles/mock.go b/store/tikv/oracle/oracles/mock.go index 2afd962fb5c42..b1eabe57feb37 100644 --- a/store/tikv/oracle/oracles/mock.go +++ b/store/tikv/oracle/oracles/mock.go @@ -62,9 +62,8 @@ func (o *MockOracle) GetTimestamp(ctx context.Context, _ *oracle.Option) (uint64 if o.stop { return 0, errors.Trace(errStopped) } - physical := oracle.GetPhysical(time.Now().Add(o.offset)) - ts := oracle.ComposeTS(physical, 0) - if oracle.ExtractPhysical(o.lastTS) == physical { + ts := oracle.GoTimeToTS(time.Now().Add(o.offset)) + if oracle.ExtractPhysical(o.lastTS) == oracle.ExtractPhysical(ts) { ts = o.lastTS + 1 } o.lastTS = ts @@ -73,9 +72,7 @@ func (o *MockOracle) GetTimestamp(ctx context.Context, _ *oracle.Option) (uint64 // GetStaleTimestamp implements oracle.Oracle interface. func (o *MockOracle) GetStaleTimestamp(ctx context.Context, txnScope string, prevSecond uint64) (ts uint64, err error) { - physical := oracle.GetPhysical(time.Now().Add(-time.Second * time.Duration(prevSecond))) - ts = oracle.ComposeTS(physical, 0) - return ts, nil + return oracle.GoTimeToTS(time.Now().Add(-time.Second * time.Duration(prevSecond))), nil } type mockOracleFuture struct { @@ -106,15 +103,16 @@ func (o *MockOracle) GetLowResolutionTimestampAsync(ctx context.Context, opt *or func (o *MockOracle) IsExpired(lockTimestamp, TTL uint64, _ *oracle.Option) bool { o.RLock() defer o.RUnlock() - - return oracle.GetPhysical(time.Now().Add(o.offset)) >= oracle.ExtractPhysical(lockTimestamp)+int64(TTL) + expire := oracle.GetTimeFromTS(lockTimestamp).Add(time.Duration(TTL) * time.Millisecond) + return !time.Now().Add(o.offset).Before(expire) } // UntilExpired implement oracle.Oracle interface. func (o *MockOracle) UntilExpired(lockTimeStamp, TTL uint64, _ *oracle.Option) int64 { o.RLock() defer o.RUnlock() - return oracle.ExtractPhysical(lockTimeStamp) + int64(TTL) - oracle.GetPhysical(time.Now().Add(o.offset)) + expire := oracle.GetTimeFromTS(lockTimeStamp).Add(time.Duration(TTL) * time.Millisecond) + return expire.Sub(time.Now().Add(o.offset)).Milliseconds() } // Close implements oracle.Oracle interface. diff --git a/store/tikv/oracle/oracles/pd.go b/store/tikv/oracle/oracles/pd.go index 063f73e343ce0..907dc278d71cb 100644 --- a/store/tikv/oracle/oracles/pd.go +++ b/store/tikv/oracle/oracles/pd.go @@ -135,7 +135,7 @@ func (o *pdOracle) getTimestamp(ctx context.Context, txnScope string) (uint64, e } func (o *pdOracle) getArrivalTimestamp() uint64 { - return oracle.ComposeTS(oracle.GetPhysical(time.Now()), 0) + return oracle.GoTimeToTS(time.Now()) } func (o *pdOracle) setLastTS(ts uint64, txnScope string) { @@ -288,7 +288,7 @@ func (o *pdOracle) getStaleTimestamp(txnScope string, prevSecond uint64) (uint64 staleTime := physicalTime.Add(-arrivalTime.Sub(time.Now().Add(-time.Duration(prevSecond) * time.Second))) - return oracle.ComposeTS(oracle.GetPhysical(staleTime), 0), nil + return oracle.GoTimeToTS(staleTime), nil } // GetStaleTimestamp generate a TSO which represents for the TSO prevSecond secs ago. diff --git a/store/tikv/oracle/oracles/pd_test.go b/store/tikv/oracle/oracles/pd_test.go index 4e881a82126b5..2894a782e505f 100644 --- a/store/tikv/oracle/oracles/pd_test.go +++ b/store/tikv/oracle/oracles/pd_test.go @@ -36,8 +36,8 @@ func (s *testPDSuite) TestPDOracle_UntilExpired(c *C) { lockAfter, lockExp := 10, 15 o := oracles.NewEmptyPDOracle() start := time.Now() - oracles.SetEmptyPDOracleLastTs(o, oracle.ComposeTS(oracle.GetPhysical(start), 0)) - lockTs := oracle.ComposeTS(oracle.GetPhysical(start.Add(time.Duration(lockAfter)*time.Millisecond)), 1) + oracles.SetEmptyPDOracleLastTs(o, oracle.GoTimeToTS(start)) + lockTs := oracle.GoTimeToTS((start.Add(time.Duration(lockAfter) * time.Millisecond))) + 1 waitTs := o.UntilExpired(lockTs, uint64(lockExp), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) c.Assert(waitTs, Equals, int64(lockAfter+lockExp), Commentf("waitTs shoulb be %d but got %d", int64(lockAfter+lockExp), waitTs)) } @@ -45,7 +45,7 @@ func (s *testPDSuite) TestPDOracle_UntilExpired(c *C) { func (s *testPDSuite) TestPdOracle_GetStaleTimestamp(c *C) { o := oracles.NewEmptyPDOracle() start := time.Now() - oracles.SetEmptyPDOracleLastTs(o, oracle.ComposeTS(oracle.GetPhysical(start), 0)) + oracles.SetEmptyPDOracleLastTs(o, oracle.GoTimeToTS(start)) ts, err := o.GetStaleTimestamp(context.Background(), oracle.GlobalTxnScope, 10) c.Assert(err, IsNil) @@ -75,7 +75,7 @@ func (s *testPDSuite) TestPdOracle_GetStaleTimestamp(c *C) { for _, testcase := range testcases { comment := Commentf("%s", testcase.name) start = time.Now() - oracles.SetEmptyPDOracleLastTs(o, oracle.ComposeTS(oracle.GetPhysical(start), 0)) + oracles.SetEmptyPDOracleLastTs(o, oracle.GoTimeToTS(start)) ts, err = o.GetStaleTimestamp(context.Background(), oracle.GlobalTxnScope, testcase.preSec) if testcase.expectErr == "" { c.Assert(err, IsNil, comment) diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go index b8b61aac05fc8..f42f7add092db 100644 --- a/store/tikv/region_request.go +++ b/store/tikv/region_request.go @@ -248,8 +248,6 @@ func (s *RegionRequestSender) SendReqCtx( Resp: &kvrpcpb.GCResponse{RegionError: &errorpb.Error{ServerIsBusy: &errorpb.ServerIsBusy{}}}, }, nil, nil) } - case "callBackofferHook": - bo.SetVarsHook("callBackofferHook", bo.GetVars()) case "requestTiDBStoreError": if et == tikvrpc.TiDB { failpoint.Return(nil, nil, tikverr.ErrTiKVServerTimeout) diff --git a/store/tikv/retry/backoff.go b/store/tikv/retry/backoff.go index a563ec7359d22..9e7a527c69caa 100644 --- a/store/tikv/retry/backoff.go +++ b/store/tikv/retry/backoff.go @@ -135,9 +135,6 @@ const ( ) func (t BackoffType) createFn(vars *kv.Variables) func(context.Context, int) int { - if vars.Hook != nil { - vars.Hook(t.String(), vars) - } switch t { case boTiKVRPC, BoTiFlashRPC: return NewBackoffFn(100, 2000, EqualJitter) @@ -431,10 +428,3 @@ func (b *Backoffer) GetBackoffSleepMS() map[BackoffType]int { func (b *Backoffer) ErrorsNum() int { return len(b.errors) } - -// SetVarsHook sets the vars.Hook is used for test to verify the variable take effect. -func (b *Backoffer) SetVarsHook(name string, vars *kv.Variables) { - if b.vars != nil && b.vars.Hook != nil { - b.vars.Hook(name, vars) - } -} diff --git a/store/tikv/tests/2pc_test.go b/store/tikv/tests/2pc_test.go index 75e554f42e9ae..8fea337bfd61b 100644 --- a/store/tikv/tests/2pc_test.go +++ b/store/tikv/tests/2pc_test.go @@ -723,7 +723,7 @@ func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) { func (s *testCommitterSuite) TestElapsedTTL(c *C) { key := []byte("key") txn := s.begin(c) - txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1)) + txn.SetStartTS(oracle.GoTimeToTS(time.Now().Add(time.Second*10)) + 1) txn.SetPessimistic(true) time.Sleep(time.Millisecond * 100) lockCtx := &kv.LockCtx{ diff --git a/store/tikv/tests/store_test.go b/store/tikv/tests/store_test.go index 659dc6ea8f226..f8055a96e4fb9 100644 --- a/store/tikv/tests/store_test.go +++ b/store/tikv/tests/store_test.go @@ -19,7 +19,6 @@ import ( "time" . "github.com/pingcap/check" - "github.com/pingcap/failpoint" pb "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" @@ -154,20 +153,3 @@ func (s *testStoreSuite) TestRequestPriority(c *C) { } iter.Close() } - -func (s *testStoreSerialSuite) TestOracleChangeByFailpoint(c *C) { - defer func() { - failpoint.Disable("github.com/pingcap/tidb/store/tikv/oracle/changeTSFromPD") - }() - c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/oracle/changeTSFromPD", - "return(10000)"), IsNil) - o := &oracles.MockOracle{} - s.store.SetOracle(o) - ctx := context.Background() - t1, err := s.store.GetTimestampWithRetry(tikv.NewBackofferWithVars(ctx, 100, nil), oracle.GlobalTxnScope) - c.Assert(err, IsNil) - c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/oracle/changeTSFromPD"), IsNil) - t2, err := s.store.GetTimestampWithRetry(tikv.NewBackofferWithVars(ctx, 100, nil), oracle.GlobalTxnScope) - c.Assert(err, IsNil) - c.Assert(t1, Greater, t2) -} diff --git a/store/tikv/txn.go b/store/tikv/txn.go index d228b834e00dc..988f6501be553 100644 --- a/store/tikv/txn.go +++ b/store/tikv/txn.go @@ -642,8 +642,13 @@ func (txn *KVTxn) LockKeys(ctx context.Context, lockCtx *tikv.LockCtx, keysInput keyMayBeLocked := !(tikverr.IsErrWriteConflict(err) || tikverr.IsErrKeyExist(err)) // If there is only 1 key and lock fails, no need to do pessimistic rollback. if len(keys) > 1 || keyMayBeLocked { + dl, ok := errors.Cause(err).(*tikverr.ErrDeadlock) + if ok && lockCtx.OnDeadlock != nil { + // Call OnDeadlock before pessimistic rollback. + lockCtx.OnDeadlock(dl) + } wg := txn.asyncPessimisticRollback(ctx, keys) - if dl, ok := errors.Cause(err).(*tikverr.ErrDeadlock); ok { + if ok { logutil.Logger(ctx).Debug("deadlock error received", zap.Uint64("startTS", txn.startTS), zap.Stringer("deadlockInfo", dl)) if hashInKeys(dl.DeadlockKeyHash, keys) { dl.IsRetryable = true diff --git a/util/deadlockhistory/deadlock_history.go b/util/deadlockhistory/deadlock_history.go new file mode 100644 index 0000000000000..ddb78067ffe7c --- /dev/null +++ b/util/deadlockhistory/deadlock_history.go @@ -0,0 +1,198 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package deadlockhistory + +import ( + "encoding/hex" + "strings" + "sync" + "time" + + "github.com/pingcap/parser/mysql" + tikverr "github.com/pingcap/tidb/store/tikv/error" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/resourcegrouptag" + "go.uber.org/zap" +) + +// WaitChainItem represents an entry in a deadlock's wait chain. +type WaitChainItem struct { + TryLockTxn uint64 + SQLDigest string + Key []byte + AllSQLs []string + TxnHoldingLock uint64 +} + +// DeadlockRecord represents a deadlock events, and contains multiple transactions' information. +type DeadlockRecord struct { + // The ID doesn't need to be set manually and it's set when it's added into the DeadlockHistory by invoking its Push + // method. + ID uint64 + OccurTime time.Time + IsRetryable bool + WaitChain []WaitChainItem +} + +// DeadlockHistory is a collection for maintaining recent several deadlock events. +type DeadlockHistory struct { + sync.RWMutex + + deadlocks []*DeadlockRecord + + // The `head` and `size` makes the `deadlocks` array behaves like a deque. The valid elements are + // deadlocks[head:head+size], or deadlocks[head:] + deadlocks[:head+size-len] if `head+size` exceeds the array's + // length. + head int + size int + + // currentID is used to allocate IDs for deadlock records pushed to the queue that's unique in the deadlock + // history queue instance. + currentID uint64 +} + +// NewDeadlockHistory creates an instance of DeadlockHistory +func NewDeadlockHistory(capacity int) *DeadlockHistory { + return &DeadlockHistory{ + deadlocks: make([]*DeadlockRecord, capacity), + currentID: 1, + } +} + +// GlobalDeadlockHistory is the global instance of DeadlockHistory, which is used to maintain recent several recent +// deadlock events globally. +// TODO: Make the capacity configurable +var GlobalDeadlockHistory = NewDeadlockHistory(10) + +// Push pushes an element into the queue. It will set the `ID` field of the record, and add the pointer directly to +// the collection. Be aware that do not modify the record's content after pushing. +func (d *DeadlockHistory) Push(record *DeadlockRecord) { + d.Lock() + defer d.Unlock() + + capacity := len(d.deadlocks) + if capacity == 0 { + return + } + + record.ID = d.currentID + d.currentID++ + + if d.size == capacity { + // The current head is popped and it's cell becomes the latest pushed item. + d.deadlocks[d.head] = record + d.head = (d.head + 1) % capacity + } else if d.size < capacity { + d.deadlocks[(d.head+d.size)%capacity] = record + d.size++ + } else { + panic("unreachable") + } +} + +// GetAll gets all collected deadlock events. +func (d *DeadlockHistory) GetAll() []*DeadlockRecord { + d.RLock() + defer d.RUnlock() + + res := make([]*DeadlockRecord, 0, d.size) + capacity := len(d.deadlocks) + if d.head+d.size <= capacity { + res = append(res, d.deadlocks[d.head:d.head+d.size]...) + } else { + res = append(res, d.deadlocks[d.head:]...) + res = append(res, d.deadlocks[:(d.head+d.size)%capacity]...) + } + return res +} + +// GetAllDatum gets all collected deadlock events, and make it into datum that matches the definition of the table +// `INFORMATION_SCHEMA.DEADLOCKS`. +func (d *DeadlockHistory) GetAllDatum() [][]types.Datum { + records := d.GetAll() + rowsCount := 0 + for _, rec := range records { + rowsCount += len(rec.WaitChain) + } + + rows := make([][]types.Datum, 0, rowsCount) + + row := make([]interface{}, 8) + for _, rec := range records { + row[0] = rec.ID + row[1] = types.NewTime(types.FromGoTime(rec.OccurTime), mysql.TypeTimestamp, types.MaxFsp) + row[2] = rec.IsRetryable + + for _, item := range rec.WaitChain { + row[3] = item.TryLockTxn + + row[4] = nil + if len(item.SQLDigest) > 0 { + row[4] = item.SQLDigest + } + + row[5] = nil + if len(item.Key) > 0 { + row[5] = strings.ToUpper(hex.EncodeToString(item.Key)) + } + + row[6] = nil + if item.AllSQLs != nil { + row[6] = "[" + strings.Join(item.AllSQLs, ", ") + "]" + } + + row[7] = item.TxnHoldingLock + + rows = append(rows, types.MakeDatums(row...)) + } + } + + return rows +} + +// Clear clears content from deadlock histories +func (d *DeadlockHistory) Clear() { + d.Lock() + defer d.Unlock() + for i := 0; i < len(d.deadlocks); i++ { + d.deadlocks[i] = nil + } + d.head = 0 + d.size = 0 +} + +// ErrDeadlockToDeadlockRecord generates a DeadlockRecord from the information in a `tikverr.ErrDeadlock` error. +func ErrDeadlockToDeadlockRecord(dl *tikverr.ErrDeadlock) *DeadlockRecord { + waitChain := make([]WaitChainItem, 0, len(dl.WaitChain)) + for _, rawItem := range dl.WaitChain { + sqlDigest, err := resourcegrouptag.DecodeResourceGroupTag(rawItem.ResourceGroupTag) + if err != nil { + logutil.BgLogger().Warn("decoding resource group tag encounters error", zap.Error(err)) + } + waitChain = append(waitChain, WaitChainItem{ + TryLockTxn: rawItem.Txn, + SQLDigest: sqlDigest, + Key: rawItem.Key, + AllSQLs: nil, + TxnHoldingLock: rawItem.WaitForTxn, + }) + } + rec := &DeadlockRecord{ + OccurTime: time.Now(), + IsRetryable: dl.IsRetryable, + WaitChain: waitChain, + } + return rec +} diff --git a/util/deadlockhistory/deadlock_history_test.go b/util/deadlockhistory/deadlock_history_test.go new file mode 100644 index 0000000000000..35cbb6c8513cd --- /dev/null +++ b/util/deadlockhistory/deadlock_history_test.go @@ -0,0 +1,277 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package deadlockhistory + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/deadlock" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + tikverr "github.com/pingcap/tidb/store/tikv/error" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/resourcegrouptag" +) + +type testDeadlockHistorySuite struct{} + +var _ = Suite(&testDeadlockHistorySuite{}) + +func TestT(t *testing.T) { + TestingT(t) +} + +func (s *testDeadlockHistorySuite) TestDeadlockHistoryCollection(c *C) { + h := NewDeadlockHistory(1) + c.Assert(len(h.GetAll()), Equals, 0) + c.Assert(h.head, Equals, 0) + c.Assert(h.size, Equals, 0) + + rec1 := &DeadlockRecord{ + OccurTime: time.Now(), + } + h.Push(rec1) + res := h.GetAll() + c.Assert(len(res), Equals, 1) + c.Assert(res[0], Equals, rec1) // Checking pointer equals is ok. + c.Assert(res[0].ID, Equals, uint64(1)) + c.Assert(h.head, Equals, 0) + c.Assert(h.size, Equals, 1) + + rec2 := &DeadlockRecord{ + OccurTime: time.Now(), + } + h.Push(rec2) + res = h.GetAll() + c.Assert(len(res), Equals, 1) + c.Assert(res[0], Equals, rec2) + c.Assert(res[0].ID, Equals, uint64(2)) + c.Assert(h.head, Equals, 0) + c.Assert(h.size, Equals, 1) + + h.Clear() + c.Assert(len(h.GetAll()), Equals, 0) + + h = NewDeadlockHistory(3) + rec1 = &DeadlockRecord{ + OccurTime: time.Now(), + } + h.Push(rec1) + res = h.GetAll() + c.Assert(len(res), Equals, 1) + c.Assert(res[0], Equals, rec1) // Checking pointer equals is ok. + c.Assert(res[0].ID, Equals, uint64(1)) + c.Assert(h.head, Equals, 0) + c.Assert(h.size, Equals, 1) + + rec2 = &DeadlockRecord{ + OccurTime: time.Now(), + } + h.Push(rec2) + res = h.GetAll() + c.Assert(len(res), Equals, 2) + c.Assert(res[0], Equals, rec1) + c.Assert(res[0].ID, Equals, uint64(1)) + c.Assert(res[1], Equals, rec2) + c.Assert(res[1].ID, Equals, uint64(2)) + c.Assert(h.head, Equals, 0) + c.Assert(h.size, Equals, 2) + + rec3 := &DeadlockRecord{ + OccurTime: time.Now(), + } + h.Push(rec3) + res = h.GetAll() + c.Assert(len(res), Equals, 3) + c.Assert(res[0], Equals, rec1) + c.Assert(res[0].ID, Equals, uint64(1)) + c.Assert(res[1], Equals, rec2) + c.Assert(res[1].ID, Equals, uint64(2)) + c.Assert(res[2], Equals, rec3) + c.Assert(res[2].ID, Equals, uint64(3)) + c.Assert(h.head, Equals, 0) + c.Assert(h.size, Equals, 3) + + // Continuously pushing items to check the correctness of the deque + expectedItems := []*DeadlockRecord{rec1, rec2, rec3} + expectedIDs := []uint64{1, 2, 3} + expectedDequeHead := 0 + for i := 0; i < 6; i++ { + newRec := &DeadlockRecord{ + OccurTime: time.Now(), + } + h.Push(newRec) + + expectedItems = append(expectedItems[1:], newRec) + for idx := range expectedIDs { + expectedIDs[idx]++ + } + expectedDequeHead = (expectedDequeHead + 1) % 3 + + res = h.GetAll() + c.Assert(len(res), Equals, 3) + for idx, item := range res { + c.Assert(item, Equals, expectedItems[idx]) + c.Assert(item.ID, Equals, expectedIDs[idx]) + } + c.Assert(h.head, Equals, expectedDequeHead) + c.Assert(h.size, Equals, 3) + } + + h.Clear() + c.Assert(len(h.GetAll()), Equals, 0) +} + +func (s *testDeadlockHistorySuite) TestGetDatum(c *C) { + time1 := time.Date(2021, 05, 14, 15, 28, 30, 123456000, time.UTC) + time2 := time.Date(2022, 06, 15, 16, 29, 31, 123457000, time.UTC) + + h := NewDeadlockHistory(10) + h.Push(&DeadlockRecord{ + OccurTime: time1, + IsRetryable: false, + WaitChain: []WaitChainItem{ + { + TryLockTxn: 101, + SQLDigest: "sql1", + Key: []byte("k1"), + AllSQLs: []string{"sql1", "sql2"}, + TxnHoldingLock: 102, + }, + // It should work even some information are missing. + { + TryLockTxn: 102, + TxnHoldingLock: 101, + }, + }, + }) + h.Push(&DeadlockRecord{ + OccurTime: time2, + IsRetryable: true, + WaitChain: []WaitChainItem{ + { + TryLockTxn: 201, + AllSQLs: []string{}, + TxnHoldingLock: 202, + }, + { + TryLockTxn: 202, + AllSQLs: []string{"sql1"}, + TxnHoldingLock: 201, + }, + }, + }) + // A deadlock error without wait chain shows nothing in the query result. + h.Push(&DeadlockRecord{ + OccurTime: time.Now(), + IsRetryable: false, + WaitChain: nil, + }) + + res := h.GetAllDatum() + c.Assert(len(res), Equals, 4) + for _, row := range res { + c.Assert(len(row), Equals, 8) + } + + toGoTime := func(d types.Datum) time.Time { + v, ok := d.GetValue().(types.Time) + c.Assert(ok, IsTrue) + t, err := v.GoTime(time.UTC) + c.Assert(err, IsNil) + return t + } + + c.Assert(res[0][0].GetValue(), Equals, uint64(1)) // ID + c.Assert(toGoTime(res[0][1]), Equals, time1) // OCCUR_TIME + c.Assert(res[0][2].GetValue(), Equals, int64(0)) // RETRYABLE + c.Assert(res[0][3].GetValue(), Equals, uint64(101)) // TRY_LOCK_TRX_ID + c.Assert(res[0][4].GetValue(), Equals, "sql1") // SQL_DIGEST + c.Assert(res[0][5].GetValue(), Equals, "6B31") // KEY + c.Assert(res[0][6].GetValue(), Equals, "[sql1, sql2]") // ALL_SQLS + c.Assert(res[0][7].GetValue(), Equals, uint64(102)) // TRX_HOLDING_LOCK + + c.Assert(res[1][0].GetValue(), Equals, uint64(1)) // ID + c.Assert(toGoTime(res[1][1]), Equals, time1) // OCCUR_TIME + c.Assert(res[1][2].GetValue(), Equals, int64(0)) // RETRYABLE + c.Assert(res[1][3].GetValue(), Equals, uint64(102)) // TRY_LOCK_TRX_ID + c.Assert(res[1][4].GetValue(), Equals, nil) // SQL_DIGEST + c.Assert(res[1][5].GetValue(), Equals, nil) // KEY + c.Assert(res[1][6].GetValue(), Equals, nil) // ALL_SQLS + c.Assert(res[1][7].GetValue(), Equals, uint64(101)) // TRX_HOLDING_LOCK + + c.Assert(res[2][0].GetValue(), Equals, uint64(2)) // ID + c.Assert(toGoTime(res[2][1]), Equals, time2) // OCCUR_TIME + c.Assert(res[2][2].GetValue(), Equals, int64(1)) // RETRYABLE + c.Assert(res[2][3].GetValue(), Equals, uint64(201)) // TRY_LOCK_TRX_ID + c.Assert(res[2][6].GetValue(), Equals, "[]") // ALL_SQLS + c.Assert(res[2][7].GetValue(), Equals, uint64(202)) // TRX_HOLDING_LOCK + + c.Assert(res[3][0].GetValue(), Equals, uint64(2)) // ID + c.Assert(toGoTime(res[3][1]), Equals, time2) // OCCUR_TIME + c.Assert(res[3][2].GetValue(), Equals, int64(1)) // RETRYABLE + c.Assert(res[3][3].GetValue(), Equals, uint64(202)) // TRY_LOCK_TRX_ID + c.Assert(res[3][6].GetValue(), Equals, "[sql1]") // ALL_SQLS + c.Assert(res[3][7].GetValue(), Equals, uint64(201)) // TRX_HOLDING_LOCK +} + +func (s *testDeadlockHistorySuite) TestErrDeadlockToDeadlockRecord(c *C) { + err := &tikverr.ErrDeadlock{ + Deadlock: &kvrpcpb.Deadlock{ + LockTs: 101, + LockKey: []byte("k1"), + DeadlockKeyHash: 1234567, + WaitChain: []*deadlock.WaitForEntry{ + { + Txn: 100, + WaitForTxn: 101, + Key: []byte("k2"), + ResourceGroupTag: resourcegrouptag.EncodeResourceGroupTag("aabbccdd"), + }, + { + Txn: 101, + WaitForTxn: 100, + Key: []byte("k1"), + ResourceGroupTag: resourcegrouptag.EncodeResourceGroupTag("ddccbbaa"), + }, + }, + }, + IsRetryable: true, + } + + expectedRecord := &DeadlockRecord{ + IsRetryable: true, + WaitChain: []WaitChainItem{ + { + TryLockTxn: 100, + SQLDigest: "aabbccdd", + Key: []byte("k2"), + TxnHoldingLock: 101, + }, + { + TryLockTxn: 101, + SQLDigest: "ddccbbaa", + Key: []byte("k1"), + TxnHoldingLock: 100, + }, + }, + } + + record := ErrDeadlockToDeadlockRecord(err) + // The OccurTime is set to time.Now + c.Assert(time.Since(record.OccurTime), Less, time.Millisecond*5) + expectedRecord.OccurTime = record.OccurTime + c.Assert(record, DeepEquals, expectedRecord) +} diff --git a/util/ranger/points.go b/util/ranger/points.go index 46a4283dd3222..3931a2c42b9fe 100644 --- a/util/ranger/points.go +++ b/util/ranger/points.go @@ -460,7 +460,7 @@ func handleEnumFromBinOp(sc *stmtctx.StatementContext, ft *types.FieldType, val for i := range ft.Elems { tmpEnum.Name = ft.Elems[i] tmpEnum.Value = uint64(i) + 1 - d := types.NewMysqlEnumDatum(tmpEnum) + d := types.NewCollateMysqlEnumDatum(tmpEnum, ft.Collate) if v, err := d.CompareDatum(sc, &val); err == nil { switch op { case ast.LT: diff --git a/util/ranger/ranger_test.go b/util/ranger/ranger_test.go index ed4722566033a..b0b66e8a469c1 100644 --- a/util/ranger/ranger_test.go +++ b/util/ranger/ranger_test.go @@ -347,12 +347,14 @@ create table t( d varchar(10), e binary(10), f varchar(10) collate utf8mb4_general_ci, + g enum('A','B','C') collate utf8mb4_general_ci, index idx_ab(a(50), b), index idx_cb(c, a), index idx_d(d(2)), index idx_e(e(2)), index idx_f(f), - index idx_de(d(2), e) + index idx_de(d(2), e), + index idx_g(g) )`) tests := []struct { @@ -628,6 +630,13 @@ create table t( filterConds: "[in(test.t.d, aab, aac)]", resultStr: "[[\"aa\" 0x61,\"aa\" 0x61]]", }, + { + indexPos: 6, + exprStr: "g = 'a'", + accessConds: "[eq(test.t.g, a)]", + filterConds: "[]", + resultStr: "[[\"A\",\"A\"]]", + }, } collate.SetNewCollationEnabledForTest(true)