diff --git a/config/config.go b/config/config.go index 297b24708cebd..8a12976a6dad3 100644 --- a/config/config.go +++ b/config/config.go @@ -289,6 +289,7 @@ var defaultConf = Config{ Level: "info", Format: "text", File: logutil.NewFileLogConfig(true, logutil.DefaultLogMaxSize), + SlowQueryFile: "tidb-slow.log", SlowThreshold: logutil.DefaultSlowThreshold, ExpensiveThreshold: 10000, QueryLogMaxLen: logutil.DefaultQueryLogMaxLen, diff --git a/config/config.toml.example b/config/config.toml.example index 47895d3efa783..e4b5eed642e7a 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -59,7 +59,7 @@ format = "text" disable-timestamp = false # Stores slow query log into separated files. -slow-query-file = "" +slow-query-file = "tidb-slow.log" # Queries with execution time greater than this value will be logged. (Milliseconds) slow-threshold = 300 diff --git a/executor/adapter.go b/executor/adapter.go index a1ef38f17e194..0c19f0410a3d7 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -379,41 +379,24 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { sessVars := a.Ctx.GetSessionVars() sql = QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo() - connID := sessVars.ConnectionID - currentDB := sessVars.CurrentDB var tableIDs, indexIDs string if len(sessVars.StmtCtx.TableIDs) > 0 { - tableIDs = strings.Replace(fmt.Sprintf("table_ids:%v ", a.Ctx.GetSessionVars().StmtCtx.TableIDs), " ", ",", -1) + tableIDs = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.TableIDs), " ", ",", -1) } if len(sessVars.StmtCtx.IndexIDs) > 0 { - indexIDs = strings.Replace(fmt.Sprintf("index_ids:%v ", a.Ctx.GetSessionVars().StmtCtx.IndexIDs), " ", ",", -1) - } - user := sessVars.User - var internal string - if sessVars.InRestrictedSQL { - internal = "[INTERNAL] " + indexIDs = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.IndexIDs), " ", ",", -1) } execDetail := sessVars.StmtCtx.GetExecDetails() if costTime < threshold { - logutil.SlowQueryLogger.Debugf( - "[QUERY] %vcost_time:%vs %s succ:%v con:%v user:%s txn_start_ts:%v database:%v %v%vsql:%v", - internal, costTime.Seconds(), execDetail, succ, connID, user, txnTS, currentDB, tableIDs, indexIDs, sql) + logutil.SlowQueryLogger.Debugf(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, sql)) } else { - logutil.SlowQueryLogger.Warnf( - "[SLOW_QUERY] %vcost_time:%vs %s succ:%v con:%v user:%s txn_start_ts:%v database:%v %v%vsql:%v", - internal, costTime.Seconds(), execDetail, succ, connID, user, txnTS, currentDB, tableIDs, indexIDs, sql) + logutil.SlowQueryLogger.Warnf(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, sql)) metrics.TotalQueryProcHistogram.Observe(costTime.Seconds()) metrics.TotalCopProcHistogram.Observe(execDetail.ProcessTime.Seconds()) metrics.TotalCopWaitHistogram.Observe(execDetail.WaitTime.Seconds()) var userString string - if user != nil { - userString = user.String() - } - if len(tableIDs) > 10 { - tableIDs = tableIDs[10 : len(tableIDs)-1] // Remove "table_ids:" and the last "," - } - if len(indexIDs) > 10 { - indexIDs = indexIDs[10 : len(indexIDs)-1] // Remove "index_ids:" and the last "," + if sessVars.User != nil { + userString = sessVars.User.String() } domain.GetDomain(a.Ctx).LogSlowQuery(&domain.SlowQueryInfo{ SQL: sql, @@ -421,10 +404,10 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { Duration: costTime, Detail: sessVars.StmtCtx.GetExecDetails(), Succ: succ, - ConnID: connID, + ConnID: sessVars.ConnectionID, TxnTS: txnTS, User: userString, - DB: currentDB, + DB: sessVars.CurrentDB, TableIDs: tableIDs, IndexIDs: indexIDs, Internal: sessVars.InRestrictedSQL, diff --git a/infoschema/slow_log.go b/infoschema/slow_log.go new file mode 100644 index 0000000000000..292a7f02314a4 --- /dev/null +++ b/infoschema/slow_log.go @@ -0,0 +1,252 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "bufio" + "os" + "strconv" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/execdetails" + "github.com/pingcap/tidb/util/hack" + "github.com/pingcap/tidb/util/logutil" + log "github.com/sirupsen/logrus" +) + +var slowQueryCols = []columnInfo{ + {variable.SlowLogTimeStr, mysql.TypeTimestamp, 26, 0, nil, nil}, + {variable.SlowLogTxnStartTSStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil}, + {variable.SlowLogUserStr, mysql.TypeVarchar, 64, 0, nil, nil}, + {variable.SlowLogConnIDStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil}, + {variable.SlowLogQueryTimeStr, mysql.TypeDouble, 22, 0, nil, nil}, + {execdetails.ProcessTimeStr, mysql.TypeDouble, 22, 0, nil, nil}, + {execdetails.WaitTimeStr, mysql.TypeDouble, 22, 0, nil, nil}, + {execdetails.BackoffTimeStr, mysql.TypeDouble, 22, 0, nil, nil}, + {execdetails.RequestCountStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil}, + {execdetails.TotalKeysStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil}, + {execdetails.ProcessKeysStr, mysql.TypeLonglong, 20, mysql.UnsignedFlag, nil, nil}, + {variable.SlowLogDBStr, mysql.TypeVarchar, 64, 0, nil, nil}, + {variable.SlowLogIndexIDsStr, mysql.TypeVarchar, 100, 0, nil, nil}, + {variable.SlowLogIsInternalStr, mysql.TypeTiny, 1, 0, nil, nil}, + {variable.SlowLogQuerySQLStr, mysql.TypeVarchar, 4096, 0, nil, nil}, +} + +func dataForSlowLog(ctx sessionctx.Context) ([][]types.Datum, error) { + return parseSlowLogFile(ctx.GetSessionVars().Location(), ctx.GetSessionVars().SlowQueryFile) +} + +// parseSlowLogFile uses to parse slow log file. +// TODO: Support parse multiple log-files. +func parseSlowLogFile(tz *time.Location, filePath string) ([][]types.Datum, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, errors.Trace(err) + } + defer func() { + if err = file.Close(); err != nil { + log.Error(err) + } + }() + + return ParseSlowLog(tz, bufio.NewScanner(file)) +} + +// ParseSlowLog exports for testing. +// TODO: optimize for parse huge log-file. +func ParseSlowLog(tz *time.Location, scanner *bufio.Scanner) ([][]types.Datum, error) { + var rows [][]types.Datum + startFlag := false + var st *slowQueryTuple + var err error + for scanner.Scan() { + line := scanner.Text() + // Check slow log entry start flag. + if !startFlag && strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { + st = &slowQueryTuple{} + err = st.setFieldValue(tz, variable.SlowLogTimeStr, line[len(variable.SlowLogStartPrefixStr):]) + if err != nil { + return rows, err + } + startFlag = true + continue + } + + if startFlag { + // Parse slow log field. + if strings.Contains(line, variable.SlowLogPrefixStr) { + line = line[len(variable.SlowLogPrefixStr):] + fieldValues := strings.Split(line, " ") + for i := 0; i < len(fieldValues)-1; i += 2 { + field := fieldValues[i] + if strings.HasSuffix(field, ":") { + field = field[:len(field)-1] + } + err = st.setFieldValue(tz, field, fieldValues[i+1]) + if err != nil { + return rows, err + } + } + } else if strings.HasSuffix(line, variable.SlowLogSQLSuffixStr) { + // Get the sql string, and mark the start flag to false. + err = st.setFieldValue(tz, variable.SlowLogQuerySQLStr, string(hack.Slice(line))) + if err != nil { + return rows, err + } + rows = append(rows, st.convertToDatumRow()) + startFlag = false + } + } + } + if err := scanner.Err(); err != nil { + return nil, errors.AddStack(err) + } + return rows, nil +} + +type slowQueryTuple struct { + time time.Time + txnStartTs uint64 + user string + connID uint64 + queryTime float64 + processTime float64 + waitTime float64 + backOffTime float64 + requestCount uint64 + totalKeys uint64 + processKeys uint64 + db string + indexNames string + isInternal bool + sql string +} + +func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string) error { + switch field { + case variable.SlowLogTimeStr: + t, err := ParseTime(value) + if err != nil { + return err + } + if t.Location() != tz { + t = t.In(tz) + } + st.time = t + case variable.SlowLogTxnStartTSStr: + num, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return errors.AddStack(err) + } + st.txnStartTs = num + case variable.SlowLogUserStr: + st.user = value + case variable.SlowLogConnIDStr: + num, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return errors.AddStack(err) + } + st.connID = num + case variable.SlowLogQueryTimeStr: + num, err := strconv.ParseFloat(value, 64) + if err != nil { + return errors.AddStack(err) + } + st.queryTime = num + case execdetails.ProcessTimeStr: + num, err := strconv.ParseFloat(value, 64) + if err != nil { + return errors.AddStack(err) + } + st.processTime = num + case execdetails.WaitTimeStr: + num, err := strconv.ParseFloat(value, 64) + if err != nil { + return errors.AddStack(err) + } + st.waitTime = num + case execdetails.BackoffTimeStr: + num, err := strconv.ParseFloat(value, 64) + if err != nil { + return errors.AddStack(err) + } + st.backOffTime = num + case execdetails.RequestCountStr: + num, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return errors.AddStack(err) + } + st.requestCount = num + case execdetails.TotalKeysStr: + num, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return errors.AddStack(err) + } + st.totalKeys = num + case execdetails.ProcessKeysStr: + num, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return errors.AddStack(err) + } + st.processKeys = num + case variable.SlowLogDBStr: + st.db = value + case variable.SlowLogIndexIDsStr: + st.indexNames = value + case variable.SlowLogIsInternalStr: + st.isInternal = value == "true" + case variable.SlowLogQuerySQLStr: + st.sql = value + } + return nil +} + +func (st *slowQueryTuple) convertToDatumRow() []types.Datum { + record := make([]types.Datum, 0, len(slowQueryCols)) + record = append(record, types.NewTimeDatum(types.Time{ + Time: types.FromGoTime(st.time), + Type: mysql.TypeDatetime, + Fsp: types.MaxFsp, + })) + record = append(record, types.NewUintDatum(st.txnStartTs)) + record = append(record, types.NewStringDatum(st.user)) + record = append(record, types.NewUintDatum(st.connID)) + record = append(record, types.NewFloat64Datum(st.queryTime)) + record = append(record, types.NewFloat64Datum(st.processTime)) + record = append(record, types.NewFloat64Datum(st.waitTime)) + record = append(record, types.NewFloat64Datum(st.backOffTime)) + record = append(record, types.NewUintDatum(st.requestCount)) + record = append(record, types.NewUintDatum(st.totalKeys)) + record = append(record, types.NewUintDatum(st.processKeys)) + record = append(record, types.NewStringDatum(st.db)) + record = append(record, types.NewStringDatum(st.indexNames)) + record = append(record, types.NewDatum(st.isInternal)) + record = append(record, types.NewStringDatum(st.sql)) + return record +} + +// ParseTime exports for testing. +func ParseTime(s string) (time.Time, error) { + t, err := time.Parse(logutil.SlowLogTimeFormat, s) + if err != nil { + err = errors.Errorf("string \"%v\" doesn't has a prefix that matches format \"%v\", err: %v", s, logutil.SlowLogTimeFormat, err) + } + return t, err +} diff --git a/infoschema/slow_log_test.go b/infoschema/slow_log_test.go new file mode 100644 index 0000000000000..462b684692f85 --- /dev/null +++ b/infoschema/slow_log_test.go @@ -0,0 +1,65 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema_test + +import ( + "bufio" + "bytes" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/util/logutil" +) + +func (s *testSuite) TestParseSlowLogFile(c *C) { + slowLog := bytes.NewBufferString( + `# Time: 2019-01-24-22:32:29.313255 +0800 +# Txn_start_ts: 405888132465033227 +# Query_time: 0.216905 +# Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436 +# Is_internal: true +select * from t;`) + scanner := bufio.NewScanner(slowLog) + loc, err := time.LoadLocation("Asia/Shanghai") + c.Assert(err, IsNil) + rows, err := infoschema.ParseSlowLog(loc, scanner) + c.Assert(err, IsNil) + c.Assert(len(rows), Equals, 1) + recordString := "" + for i, value := range rows[0] { + str, err := value.ToString() + c.Assert(err, IsNil) + if i > 0 { + recordString += "," + } + recordString += str + } + expectRecordString := "2019-01-24 22:32:29.313255,405888132465033227,,0,0.216905,0.021,0,0,1,637,0,,,1,select * from t;" + c.Assert(expectRecordString, Equals, recordString) +} + +func (s *testSuite) TestSlowLogParseTime(c *C) { + t1Str := "2019-01-24-22:32:29.313255 +0800" + t2Str := "2019-01-24-22:32:29.313255" + t1, err := infoschema.ParseTime(t1Str) + c.Assert(err, IsNil) + loc, err := time.LoadLocation("Asia/Shanghai") + c.Assert(err, IsNil) + t2, err := time.ParseInLocation("2006-01-02-15:04:05.999999999", t2Str, loc) + c.Assert(err, IsNil) + c.Assert(t1.Unix(), Equals, t2.Unix()) + t1Format := t1.In(loc).Format(logutil.SlowLogTimeFormat) + c.Assert(t1Format, Equals, t1Str) +} diff --git a/infoschema/tables.go b/infoschema/tables.go index f147513642724..b402ab47c5d11 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -67,6 +67,7 @@ const ( tableCollationCharacterSetApplicability = "COLLATION_CHARACTER_SET_APPLICABILITY" tableProcesslist = "PROCESSLIST" tableTiDBIndexes = "TIDB_INDEXES" + tableSlowLog = "SLOW_QUERY" ) type columnInfo struct { @@ -1469,6 +1470,7 @@ var tableNameToColumns = map[string][]columnInfo{ tableCollationCharacterSetApplicability: tableCollationCharacterSetApplicabilityCols, tableProcesslist: tableProcesslistCols, tableTiDBIndexes: tableTiDBIndexesCols, + tableSlowLog: slowQueryCols, } func createInfoSchemaTable(handle *Handle, meta *model.TableInfo) *infoschemaTable { @@ -1560,6 +1562,8 @@ func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) fullRows = dataForCollationCharacterSetApplicability() case tableProcesslist: fullRows = dataForProcesslist(ctx) + case tableSlowLog: + fullRows, err = dataForSlowLog(ctx) } if err != nil { return nil, errors.Trace(err) diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index fd450cf134a1f..7a32c9789142d 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -14,6 +14,8 @@ package infoschema_test import ( + "fmt" + "os" "strconv" . "github.com/pingcap/check" @@ -24,6 +26,7 @@ import ( "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" ) func (s *testSuite) TestInfoschemaFieldValue(c *C) { @@ -228,3 +231,40 @@ func (s *testSuite) TestTableIDAndIndexID(c *C) { c.Assert(tblID, Greater, 0) tk.MustQuery("select * from information_schema.tidb_indexes where table_schema = 'test' and table_name = 't'").Check(testkit.Rows("test t 0 PRIMARY 1 a 0", "test t 1 k1 1 b 1")) } + +func (s *testSuite) TestSlowQuery(c *C) { + testleak.BeforeTest() + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + session.SetStatsLease(0) + do, err := session.BootstrapSession(store) + c.Assert(err, IsNil) + defer do.Close() + tk := testkit.NewTestKit(c, store) + // Prepare slow log file. + slowLogFileName := "tidb_slow.log" + f, err := os.OpenFile(slowLogFileName, os.O_CREATE|os.O_WRONLY, 0644) + c.Assert(err, IsNil) + defer os.Remove(slowLogFileName) + _, err = f.Write([]byte(`# Time: 2019-02-12-19:33:56.571953 +0800 +# Txn_start_ts: 406315658548871171 +# User: root@127.0.0.1 +# Conn_ID: 6 +# Query_time: 4.895492 +# Process_time: 0.161 Request_count: 1 Total_keys: 100001 Process_keys: 100000 +# DB: test +# Is_internal: false +select * from t_slim;`)) + c.Assert(f.Close(), IsNil) + c.Assert(err, IsNil) + + tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", slowLogFileName)) + tk.MustExec("set time_zone = '+08:00';") + re := tk.MustQuery("select * from information_schema.slow_query") + re.Check(testutil.RowsWithSep("|", "2019-02-12 19:33:56.571953|406315658548871171|root@127.0.0.1|6|4.895492|0.161|0|0|1|100001|100000|test||0|select * from t_slim;")) + tk.MustExec("set time_zone = '+00:00';") + re = tk.MustQuery("select * from information_schema.slow_query") + re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root@127.0.0.1|6|4.895492|0.161|0|0|1|100001|100000|test||0|select * from t_slim;")) +} diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 816b3e327694b..aa322eaf04656 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -14,6 +14,7 @@ package variable import ( + "bytes" "crypto/tls" "fmt" "strconv" @@ -36,6 +37,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/timeutil" ) @@ -332,6 +334,9 @@ type SessionVars struct { // CommandValue indicates which command current session is doing. CommandValue uint32 + + // SlowQueryFile indicates which slow query log file for SLOW_QUERY table to parse. + SlowQueryFile string } // NewSessionVars creates a session vars object. @@ -357,6 +362,7 @@ func NewSessionVars() *SessionVars { EnableRadixJoin: false, L2CacheSize: cpuid.CPU.Cache.L2, CommandValue: uint32(mysql.ComSleep), + SlowQueryFile: config.GetGlobalConfig().Log.SlowQueryFile, } vars.Concurrency = Concurrency{ IndexLookupConcurrency: DefIndexLookupConcurrency, @@ -693,6 +699,8 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { s.EnableWindowFunction = TiDBOptOn(val) case TiDBCheckMb4ValueInUtf8: config.GetGlobalConfig().CheckMb4ValueInUtf8 = TiDBOptOn(val) + case TiDBSlowQueryFile: + s.SlowQueryFile = val } s.systems[name] = val return nil @@ -805,3 +813,75 @@ type BatchSize struct { // MaxChunkSize defines max row count of a Chunk during query execution. MaxChunkSize int } + +const ( + // SlowLogPrefixStr is slow log row prefix. + SlowLogPrefixStr = "# " + // SlowLogSpaceMarkStr is slow log space mark. + SlowLogSpaceMarkStr = ": " + // SlowLogSQLSuffixStr is slow log suffix. + SlowLogSQLSuffixStr = ";" + // SlowLogTimeStr is slow log field name. + SlowLogTimeStr = "Time" + // SlowLogStartPrefixStr is slow log start row prefix. + SlowLogStartPrefixStr = SlowLogPrefixStr + SlowLogTimeStr + SlowLogSpaceMarkStr + // SlowLogTxnStartTSStr is slow log field name. + SlowLogTxnStartTSStr = "Txn_start_ts" + // SlowLogUserStr is slow log field name. + SlowLogUserStr = "User" + // SlowLogConnIDStr is slow log field name. + SlowLogConnIDStr = "Conn_ID" + // SlowLogQueryTimeStr is slow log field name. + SlowLogQueryTimeStr = "Query_time" + // SlowLogDBStr is slow log field name. + SlowLogDBStr = "DB" + // SlowLogIsInternalStr is slow log field name. + SlowLogIsInternalStr = "Is_internal" + // SlowLogIndexIDsStr is slow log field name. + SlowLogIndexIDsStr = "Index_ids" + // SlowLogQuerySQLStr is slow log field name. + SlowLogQuerySQLStr = "Query" // use for slow log table, slow log will not print this field name but print sql directly. +) + +// SlowLogFormat uses for formatting slow log. +// The slow log output is like below: +// # Time: 2019-02-12-19:33:56.571953 +0800 +// # Txn_start_ts: 406315658548871171 +// # User: root@127.0.0.1 +// # Conn_ID: 6 +// # Query_time: 4.895492 +// # Process_time: 0.161 Request_count: 1 Total_keys: 100001 Processed_keys: 100000 +// # DB: test +// # Index_ids: [1,2] +// # Is_internal: false +// select * from t_slim; +func (s *SessionVars) SlowLogFormat(txnTS uint64, costTime time.Duration, execDetail execdetails.ExecDetails, indexIDs string, sql string) string { + var buf bytes.Buffer + execDetailStr := execDetail.String() + buf.WriteString(SlowLogPrefixStr + SlowLogTxnStartTSStr + SlowLogSpaceMarkStr + strconv.FormatUint(txnTS, 10) + "\n") + if s.User != nil { + buf.WriteString(SlowLogPrefixStr + SlowLogUserStr + SlowLogSpaceMarkStr + s.User.String() + "\n") + } + if s.ConnectionID != 0 { + buf.WriteString(SlowLogPrefixStr + SlowLogConnIDStr + SlowLogSpaceMarkStr + strconv.FormatUint(s.ConnectionID, 10) + "\n") + } + buf.WriteString(SlowLogPrefixStr + SlowLogQueryTimeStr + SlowLogSpaceMarkStr + strconv.FormatFloat(costTime.Seconds(), 'f', -1, 64) + "\n") + if len(execDetailStr) > 0 { + buf.WriteString(SlowLogPrefixStr + execDetailStr + "\n") + } + if len(s.CurrentDB) > 0 { + buf.WriteString(SlowLogPrefixStr + SlowLogDBStr + SlowLogSpaceMarkStr + s.CurrentDB + "\n") + } + if len(indexIDs) > 0 { + buf.WriteString(SlowLogPrefixStr + SlowLogIndexIDsStr + SlowLogSpaceMarkStr + indexIDs + "\n") + } + buf.WriteString(SlowLogPrefixStr + SlowLogIsInternalStr + SlowLogSpaceMarkStr + strconv.FormatBool(s.InRestrictedSQL) + "\n") + if len(sql) == 0 { + sql = ";" + } + buf.WriteString(sql) + if sql[len(sql)-1] != ';' { + buf.WriteString(";") + } + return buf.String() +} diff --git a/sessionctx/variable/session_test.go b/sessionctx/variable/session_test.go index 72a045e7667ed..35520ff6fd7c3 100644 --- a/sessionctx/variable/session_test.go +++ b/sessionctx/variable/session_test.go @@ -14,7 +14,11 @@ package variable_test import ( + "time" + . "github.com/pingcap/check" + "github.com/pingcap/parser/auth" + "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/mock" ) @@ -78,3 +82,36 @@ func (*testSessionSuite) TestSession(c *C) { c.Assert(ss.CopiedRows(), Equals, uint64(0)) c.Assert(ss.WarningCount(), Equals, uint16(0)) } + +func (*testSessionSuite) TestSlowLogFormat(c *C) { + ctx := mock.NewContext() + + seVar := ctx.GetSessionVars() + c.Assert(seVar, NotNil) + + seVar.User = &auth.UserIdentity{Username: "root", Hostname: "192.168.0.1"} + seVar.ConnectionID = 1 + seVar.CurrentDB = "test" + seVar.InRestrictedSQL = true + txnTS := uint64(406649736972468225) + costTime := time.Second + execDetail := execdetails.ExecDetails{ + ProcessTime: time.Second * time.Duration(2), + WaitTime: time.Minute, + BackoffTime: time.Millisecond, + RequestCount: 2, + TotalKeys: 10000, + ProcessedKeys: 20001, + } + resultString := `# Txn_start_ts: 406649736972468225 +# User: root@192.168.0.1 +# Conn_ID: 1 +# Query_time: 1 +# Process_time: 2 Wait_time: 60 Backoff_time: 0.001 Request_count: 2 Total_keys: 10000 Process_keys: 20001 +# DB: test +# Index_ids: [1,2] +# Is_internal: true +select * from t;` + logString := seVar.SlowLogFormat(txnTS, costTime, execDetail, "[1,2]", "select * from t") + c.Assert(logString, Equals, resultString) +} diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index ecbad582c5014..c2248ed24aa92 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -685,6 +685,7 @@ var defaultSysVars = []*SysVar{ {ScopeSession, TiDBForcePriority, mysql.Priority2Str[DefTiDBForcePriority]}, {ScopeSession, TiDBEnableRadixJoin, BoolToIntStr(DefTiDBUseRadixJoin)}, {ScopeSession, TiDBCheckMb4ValueInUtf8, BoolToIntStr(config.GetGlobalConfig().CheckMb4ValueInUtf8)}, + {ScopeSession, TiDBSlowQueryFile, ""}, } // SynonymsSysVariables is synonyms of system variables. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index b6d98ea282c6c..6ad308e435e7f 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -236,6 +236,9 @@ const ( // tidb_enable_window_function is used to control whether to enable the window function. TiDBEnableWindowFunction = "tidb_enable_window_function" + + // SlowQueryFile indicates which slow query log file for SLOW_QUERY table to parse. + TiDBSlowQueryFile = "tidb_slow_query_file" ) // Default TiDB system variable values. diff --git a/tidb-server/main.go b/tidb-server/main.go index 5a3cb52dddf24..be9d60ccec7d2 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -457,6 +457,7 @@ func setGlobalVars() { variable.SysVars[variable.Port].Value = fmt.Sprintf("%d", cfg.Port) variable.SysVars[variable.Socket].Value = cfg.Socket variable.SysVars[variable.DataDir].Value = cfg.Path + variable.SysVars[variable.TiDBSlowQueryFile].Value = cfg.Log.SlowQueryFile // For CI environment we default enable prepare-plan-cache. plannercore.SetPreparedPlanCache(config.CheckTableBeforeDrop || cfg.PreparedPlanCache.Enabled) diff --git a/util/execdetails/execdetails.go b/util/execdetails/execdetails.go index 9b285b1c2863b..07a6e8a9090f5 100644 --- a/util/execdetails/execdetails.go +++ b/util/execdetails/execdetails.go @@ -16,6 +16,7 @@ package execdetails import ( "fmt" "sort" + "strconv" "strings" "sync" "sync/atomic" @@ -53,60 +54,75 @@ type CommitDetails struct { TxnRetry int } +const ( + // ProcessTimeStr represents the sum of process time of all the coprocessor tasks. + ProcessTimeStr = "Process_time" + // WaitTimeStr means the time of all coprocessor wait. + WaitTimeStr = "Wait_time" + // BackoffTimeStr means the time of all back-off. + BackoffTimeStr = "Backoff_time" + // RequestCountStr means the request count. + RequestCountStr = "Request_count" + // TotalKeysStr means the total scan keys. + TotalKeysStr = "Total_keys" + // ProcessKeysStr means the total processed keys. + ProcessKeysStr = "Process_keys" +) + // String implements the fmt.Stringer interface. func (d ExecDetails) String() string { parts := make([]string, 0, 6) if d.ProcessTime > 0 { - parts = append(parts, fmt.Sprintf("process_time:%vs", d.ProcessTime.Seconds())) + parts = append(parts, ProcessTimeStr+": "+strconv.FormatFloat(d.ProcessTime.Seconds(), 'f', -1, 64)) } if d.WaitTime > 0 { - parts = append(parts, fmt.Sprintf("wait_time:%vs", d.WaitTime.Seconds())) + parts = append(parts, WaitTimeStr+": "+strconv.FormatFloat(d.WaitTime.Seconds(), 'f', -1, 64)) } if d.BackoffTime > 0 { - parts = append(parts, fmt.Sprintf("backoff_time:%vs", d.BackoffTime.Seconds())) + parts = append(parts, BackoffTimeStr+": "+strconv.FormatFloat(d.BackoffTime.Seconds(), 'f', -1, 64)) } if d.RequestCount > 0 { - parts = append(parts, fmt.Sprintf("request_count:%d", d.RequestCount)) + parts = append(parts, RequestCountStr+": "+strconv.FormatInt(int64(d.RequestCount), 10)) } if d.TotalKeys > 0 { - parts = append(parts, fmt.Sprintf("total_keys:%d", d.TotalKeys)) + parts = append(parts, TotalKeysStr+": "+strconv.FormatInt(d.TotalKeys, 10)) } if d.ProcessedKeys > 0 { - parts = append(parts, fmt.Sprintf("processed_keys:%d", d.ProcessedKeys)) + parts = append(parts, ProcessKeysStr+": "+strconv.FormatInt(d.ProcessedKeys, 10)) } commitDetails := d.CommitDetail if commitDetails != nil { if commitDetails.PrewriteTime > 0 { - parts = append(parts, fmt.Sprintf("prewrite_time:%vs", commitDetails.PrewriteTime.Seconds())) + parts = append(parts, fmt.Sprintf("Prewrite_time: %v", commitDetails.PrewriteTime.Seconds())) } if commitDetails.CommitTime > 0 { - parts = append(parts, fmt.Sprintf("commit_time:%vs", commitDetails.CommitTime.Seconds())) + parts = append(parts, fmt.Sprintf("Commit_time: %v", commitDetails.CommitTime.Seconds())) } if commitDetails.GetCommitTsTime > 0 { - parts = append(parts, fmt.Sprintf("get_commit_ts_time:%vs", commitDetails.GetCommitTsTime.Seconds())) + parts = append(parts, fmt.Sprintf("Get_commit_ts_time: %v", commitDetails.GetCommitTsTime.Seconds())) } if commitDetails.TotalBackoffTime > 0 { - parts = append(parts, fmt.Sprintf("total_backoff_time:%vs", commitDetails.TotalBackoffTime.Seconds())) + parts = append(parts, fmt.Sprintf("Total_backoff_time: %v", commitDetails.TotalBackoffTime.Seconds())) } resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) if resolveLockTime > 0 { - parts = append(parts, fmt.Sprintf("resolve_lock_time:%vs", time.Duration(resolveLockTime).Seconds())) + parts = append(parts, fmt.Sprintf("Resolve_lock_time: %v", time.Duration(resolveLockTime).Seconds())) } if commitDetails.LocalLatchTime > 0 { - parts = append(parts, fmt.Sprintf("local_latch_wait_time:%vs", commitDetails.LocalLatchTime.Seconds())) + parts = append(parts, fmt.Sprintf("Local_latch_wait_time: %v", commitDetails.LocalLatchTime.Seconds())) } if commitDetails.WriteKeys > 0 { - parts = append(parts, fmt.Sprintf("write_keys:%d", commitDetails.WriteKeys)) + parts = append(parts, fmt.Sprintf("Write_keys: %d", commitDetails.WriteKeys)) } if commitDetails.WriteSize > 0 { - parts = append(parts, fmt.Sprintf("write_size:%d", commitDetails.WriteSize)) + parts = append(parts, fmt.Sprintf("Write_size: %d", commitDetails.WriteSize)) } prewriteRegionNum := atomic.LoadInt32(&commitDetails.PrewriteRegionNum) if prewriteRegionNum > 0 { - parts = append(parts, fmt.Sprintf("prewrite_region:%d", prewriteRegionNum)) + parts = append(parts, fmt.Sprintf("Prewrite_region: %d", prewriteRegionNum)) } if commitDetails.TxnRetry > 0 { - parts = append(parts, fmt.Sprintf("txn_retry:%d", commitDetails.TxnRetry)) + parts = append(parts, fmt.Sprintf("Txn_retry: %d", commitDetails.TxnRetry)) } } return strings.Join(parts, " ") diff --git a/util/execdetails/execdetails_test.go b/util/execdetails/execdetails_test.go index 73935185c5e15..4ab867983163c 100644 --- a/util/execdetails/execdetails_test.go +++ b/util/execdetails/execdetails_test.go @@ -41,7 +41,8 @@ func TestString(t *testing.T) { TxnRetry: 1, }, } - expected := "process_time:2.005s wait_time:1s backoff_time:1s request_count:1 total_keys:100 processed_keys:10 prewrite_time:1s commit_time:1s get_commit_ts_time:1s total_backoff_time:1s resolve_lock_time:1s local_latch_wait_time:1s write_keys:1 write_size:1 prewrite_region:1 txn_retry:1" + expected := "Process_time: 2.005 Wait_time: 1 Backoff_time: 1 Request_count: 1 Total_keys: 100 Process_keys: 10 Prewrite_time: 1 Commit_time: 1 " + + "Get_commit_ts_time: 1 Total_backoff_time: 1 Resolve_lock_time: 1 Local_latch_wait_time: 1 Write_keys: 1 Write_size: 1 Prewrite_region: 1 Txn_retry: 1" if str := detail.String(); str != expected { t.Errorf("got:\n%s\nexpected:\n%s", str, expected) } diff --git a/util/logutil/log.go b/util/logutil/log.go index 2f3b034b61365..192cf294ca6b3 100644 --- a/util/logutil/log.go +++ b/util/logutil/log.go @@ -206,6 +206,26 @@ func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { return b.Bytes(), nil } +const ( + // SlowLogTimeFormat is the time format for slow log. + SlowLogTimeFormat = "2006-01-02-15:04:05.999999999 -0700" +) + +type slowLogFormatter struct{} + +func (f *slowLogFormatter) Format(entry *log.Entry) ([]byte, error) { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + fmt.Fprintf(b, "# Time: %s\n", entry.Time.Format(SlowLogTimeFormat)) + fmt.Fprintf(b, "%s\n", entry.Message) + return b.Bytes(), nil +} + func stringToLogFormatter(format string, disableTimestamp bool) log.Formatter { switch strings.ToLower(format) { case "text": @@ -291,15 +311,7 @@ func InitLogger(cfg *LogConfig) error { if err := initFileLog(&tmp, SlowQueryLogger); err != nil { return errors.Trace(err) } - hooks := make(log.LevelHooks) - hooks.Add(&contextHook{}) - SlowQueryLogger.Hooks = hooks - slowQueryFormatter := stringToLogFormatter(cfg.Format, cfg.DisableTimestamp) - ft, ok := slowQueryFormatter.(*textFormatter) - if ok { - ft.EnableEntryOrder = true - } - SlowQueryLogger.Formatter = slowQueryFormatter + SlowQueryLogger.Formatter = &slowLogFormatter{} } return nil diff --git a/util/logutil/log_test.go b/util/logutil/log_test.go index a02d675a7d6a5..5b012715bd8e7 100644 --- a/util/logutil/log_test.go +++ b/util/logutil/log_test.go @@ -111,21 +111,24 @@ func (s *testLogSuite) TestSlowQueryLogger(c *C) { if err != nil { break } - c.Assert(str, Matches, logPattern) + if strings.HasPrefix(str, "# ") { + c.Assert(str, Matches, `# Time: .*?\n`) + } else { + c.Assert(str, Matches, `.*? message\n`) + } } c.Assert(err, Equals, io.EOF) } -func (s *testLogSuite) TestSlowQueryLoggerKeepOrder(c *C) { - fileName := "slow_query" - conf := NewLogConfig("warn", DefaultLogFormat, fileName, EmptyFileLogConfig, true) +func (s *testLogSuite) TestLoggerKeepOrder(c *C) { + conf := NewLogConfig("warn", DefaultLogFormat, "", EmptyFileLogConfig, true) c.Assert(InitLogger(conf), IsNil) - defer os.Remove(fileName) - ft, ok := SlowQueryLogger.Formatter.(*textFormatter) + logger := log.StandardLogger() + ft, ok := logger.Formatter.(*textFormatter) c.Assert(ok, IsTrue) - c.Assert(ft.EnableEntryOrder, IsTrue) - SlowQueryLogger.Out = s.buf - logEntry := log.NewEntry(SlowQueryLogger) + ft.EnableEntryOrder = true + logger.Out = s.buf + logEntry := log.NewEntry(logger) logEntry.Data = log.Fields{ "connectionId": 1, "costTime": "1",