Skip to content

Commit

Permalink
executor: replace logger with zap logger (#9521)
Browse files Browse the repository at this point in the history
  • Loading branch information
alivxxx authored Mar 14, 2019
1 parent 4422a23 commit f738ba2
Show file tree
Hide file tree
Showing 21 changed files with 99 additions and 75 deletions.
20 changes: 10 additions & 10 deletions executor/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
Expand All @@ -40,7 +41,8 @@ import (
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/sqlexec"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)

// processinfoSetter is the interface use to set current running process info.
Expand Down Expand Up @@ -286,7 +288,7 @@ func (a *ExecStmt) handleNoDelayExecutor(ctx context.Context, sctx sessionctx.Co
txnTS := uint64(0)
// Don't active pending txn here.
if txn, err1 := sctx.Txn(false); err1 != nil {
log.Error(err1)
logutil.Logger(ctx).Error("get current transaction failed", zap.Error(err))
} else {
if txn.Valid() {
txnTS = txn.StartTS()
Expand All @@ -313,7 +315,7 @@ func (a *ExecStmt) buildExecutor(ctx sessionctx.Context) (Executor, error) {
return nil, errors.Trace(err)
}
if isPointGet {
log.Debugf("con:%d InitTxnWithStartTS %s", ctx.GetSessionVars().ConnectionID, a.Text)
logutil.Logger(context.Background()).Debug("init txnStartTS with MaxUint64", zap.Uint64("conn", ctx.GetSessionVars().ConnectionID), zap.String("text", a.Text))
err = ctx.InitTxnWithStartTS(math.MaxUint64)
} else if ctx.GetSessionVars().SnapshotTS != 0 {
if _, ok := a.Plan.(*plannercore.CheckTable); ok {
Expand Down Expand Up @@ -363,13 +365,13 @@ var QueryReplacer = strings.NewReplacer("\r", " ", "\n", " ", "\t", " ")
// LogSlowQuery is used to print the slow query in the log files.
func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
level := log.GetLevel()
if level < log.WarnLevel {
if level > zapcore.WarnLevel {
return
}
cfg := config.GetGlobalConfig()
costTime := time.Since(a.StartTime)
threshold := time.Duration(atomic.LoadUint64(&cfg.Log.SlowThreshold)) * time.Millisecond
if costTime < threshold && level < log.DebugLevel {
if costTime < threshold && level > zapcore.DebugLevel {
return
}
sql := a.Text
Expand All @@ -388,13 +390,11 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
}
execDetail := sessVars.StmtCtx.GetExecDetails()
if costTime < threshold {
if logutil.SlowQueryLogger.IsLevelEnabled(log.DebugLevel) {
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryLogger.Debugf(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, digest, sql))
}
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryZapLogger.Debug(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, digest, sql))
} else {
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryLogger.Warnf(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, digest, sql))
logutil.SlowQueryZapLogger.Warn(sessVars.SlowLogFormat(txnTS, costTime, execDetail, indexIDs, digest, sql))
metrics.TotalQueryProcHistogram.Observe(costTime.Seconds())
metrics.TotalCopProcHistogram.Observe(execDetail.ProcessTime.Seconds())
metrics.TotalCopWaitHistogram.Observe(execDetail.WaitTime.Seconds())
Expand Down
17 changes: 10 additions & 7 deletions executor/admin.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,11 @@ import (
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

var (
Expand Down Expand Up @@ -308,8 +309,9 @@ func (e *RecoverIndexExec) backfillIndex(ctx context.Context) (int64, int64, err
totalScanCnt += result.scanRowCount
if totalScanCnt-lastLogCnt >= 50000 {
lastLogCnt = totalScanCnt
log.Infof("[recover-index] recover table:%v, index:%v, totalAddedCnt:%v, totalScanCnt:%v, nextHandle: %v",
e.table.Meta().Name.O, e.index.Meta().Name.O, totalAddedCnt, totalScanCnt, result.nextHandle)
logutil.Logger(ctx).Info("recover index", zap.String("table", e.table.Meta().Name.O),
zap.String("index", e.index.Meta().Name.O), zap.Int64("totalAddedCnt", totalAddedCnt),
zap.Int64("totalScanCnt", totalScanCnt), zap.Int64("nextHandle", result.nextHandle))
}

// no more rows
Expand Down Expand Up @@ -393,8 +395,9 @@ func (e *RecoverIndexExec) batchMarkDup(txn kv.Transaction, rows []recoverRows)
}

if handle != rows[i].handle {
log.Warnf("[recover-index] The constraint of unique index:%v is broken, handle:%v is not equal handle:%v with idxKey:%v.",
e.index.Meta().Name.O, handle, rows[i].handle, key)
logutil.Logger(context.Background()).Warn("recover index: the constraint of unique index is broken, handle in index is not equal to handle in table",
zap.String("index", e.index.Meta().Name.O), zap.ByteString("indexKey", key),
zap.Int64("handleInTable", rows[i].handle), zap.Int64("handleInIndex", handle))
}
}
rows[i].skip = true
Expand Down Expand Up @@ -520,8 +523,8 @@ func (e *CleanupIndexExec) deleteDanglingIdx(txn kv.Transaction, values map[stri
}
e.removeCnt++
if e.removeCnt%e.batchSize == 0 {
log.Infof("[cleaning up dangling index] table: %v, index: %v, count: %v.",
e.table.Meta().Name.String(), e.index.Meta().Name.String(), e.removeCnt)
logutil.Logger(context.Background()).Info("clean up dangling index", zap.String("table", e.table.Meta().Name.String()),
zap.String("index", e.index.Meta().Name.String()), zap.Uint64("count", e.removeCnt))
}
}
}
Expand Down
10 changes: 5 additions & 5 deletions executor/aggregate.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/set"
log "github.com/sirupsen/logrus"
"github.com/spaolacci/murmur3"
"go.uber.org/zap"
)

type aggPartialResultMapper map[string][]aggfuncs.PartialResult
Expand Down Expand Up @@ -317,9 +317,9 @@ func (w *HashAggPartialWorker) getChildInput() bool {
}

func recoveryHashAgg(output chan *AfFinalResult, r interface{}) {
err := errors.Errorf("%v", r)
output <- &AfFinalResult{err: errors.Errorf("%v", r)}
buf := util.GetStack()
log.Errorf("panic in the recoverable goroutine: %v, stack trace:\n%s", r, buf)
logutil.Logger(context.Background()).Error("parallel hash aggregation panicked", zap.Error(err))
}

func (w *HashAggPartialWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGroup, finalConcurrency int) {
Expand Down Expand Up @@ -473,7 +473,7 @@ func (w *HashAggFinalWorker) getFinalResult(sctx sessionctx.Context) {
partialResults := w.getPartialResult(sctx.GetSessionVars().StmtCtx, []byte(groupKey), w.partialResultMap)
for i, af := range w.aggFuncs {
if err := af.AppendFinalResult2Chunk(sctx, partialResults[i], result); err != nil {
log.Error(errors.ErrorStack(err))
logutil.Logger(context.Background()).Error("HashAggFinalWorker failed to append final result to Chunk", zap.Error(err))
}
}
if len(w.aggFuncs) == 0 {
Expand Down
10 changes: 6 additions & 4 deletions executor/analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,10 @@ import (
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

var _ Executor = &AnalyzeExec{}
Expand Down Expand Up @@ -72,15 +73,16 @@ func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.RecordBatch) error {
err = result.Err
if errors.Trace(err) == errAnalyzeWorkerPanic {
panicCnt++
} else {
logutil.Logger(ctx).Error("analyze failed", zap.Error(err))
}
log.Error(errors.ErrorStack(err))
continue
}
for i, hg := range result.Hist {
err1 := statsHandle.SaveStatsToStorage(result.PhysicalTableID, result.Count, result.IsIndex, hg, result.Cms[i], 1)
if err1 != nil {
err = err1
log.Error(errors.ErrorStack(err))
logutil.Logger(ctx).Error("save stats to storage failed", zap.Error(err))
continue
}
}
Expand Down Expand Up @@ -122,7 +124,7 @@ func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultCh chan<-
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
log.Errorf("analyzeWorker panic stack is:\n%s", buf)
logutil.Logger(context.Background()).Error("analyze worker panicked", zap.String("stack", string(buf)))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- statistics.AnalyzeResult{
Err: errAnalyzeWorkerPanic,
Expand Down
5 changes: 3 additions & 2 deletions executor/checksum.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,10 @@ import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

var _ Executor = &ChecksumTableExec{}
Expand Down Expand Up @@ -70,7 +71,7 @@ func (e *ChecksumTableExec) Open(ctx context.Context) error {
result := <-resultCh
if result.Error != nil {
err = result.Error
log.Error(errors.ErrorStack(err))
logutil.Logger(ctx).Error("checksum failed", zap.Error(err))
continue
}
e.handleResult(result)
Expand Down
7 changes: 4 additions & 3 deletions executor/compiler.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ import (
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
log "github.com/sirupsen/logrus"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)

// Compiler compiles an ast.StmtNode to a physical plan.
Expand Down Expand Up @@ -76,7 +77,7 @@ func logExpensiveQuery(stmtNode ast.StmtNode, finalPlan plannercore.Plan) (expen
if len(sql) > logSQLLen {
sql = fmt.Sprintf("%s len(%d)", sql[:logSQLLen], len(sql))
}
log.Warnf("[EXPENSIVE_QUERY] %s", sql)
logutil.Logger(context.Background()).Warn("EXPENSIVE_QUERY", zap.String("SQL", sql))
return
}

Expand Down Expand Up @@ -305,7 +306,7 @@ func GetInfoSchema(ctx sessionctx.Context) infoschema.InfoSchema {
var is infoschema.InfoSchema
if snap := sessVar.SnapshotInfoschema; snap != nil {
is = snap.(infoschema.InfoSchema)
log.Infof("con:%d use snapshot schema %d", sessVar.ConnectionID, is.SchemaMetaVersion())
logutil.Logger(context.Background()).Info("use snapshot schema", zap.Uint64("conn", sessVar.ConnectionID), zap.Int64("schemaVersion", is.SchemaMetaVersion()))
} else {
is = sessVar.TxnCtx.InfoSchema.(infoschema.InfoSchema)
}
Expand Down
10 changes: 7 additions & 3 deletions executor/ddl.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ import (
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/sqlexec"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

// DDLExec represents a DDL executor.
Expand All @@ -60,7 +61,7 @@ func (e *DDLExec) toErr(err error) error {
checker := domain.NewSchemaChecker(dom, e.is.SchemaMetaVersion(), nil)
txn, err1 := e.ctx.Txn(true)
if err1 != nil {
log.Error(err)
logutil.Logger(context.Background()).Error("active txn failed", zap.Error(err))
return errors.Trace(err1)
}
schemaInfoErr := checker.Check(txn.StartTS())
Expand Down Expand Up @@ -252,7 +253,10 @@ func (e *DDLExec) executeDropTableOrView(s *ast.DropTableStmt) error {
}

if config.CheckTableBeforeDrop {
log.Warnf("admin check table `%s`.`%s` before drop.", fullti.Schema.O, fullti.Name.O)
logutil.Logger(context.Background()).Warn("admin check table before drop",
zap.String("database", fullti.Schema.O),
zap.String("table", fullti.Name.O),
)
sql := fmt.Sprintf("admin check table `%s`.`%s`", fullti.Schema.O, fullti.Name.O)
_, _, err = e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
if err != nil {
Expand Down
15 changes: 8 additions & 7 deletions executor/distsql.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,11 @@ import (
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

var (
Expand Down Expand Up @@ -464,7 +465,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, kvRanges []k
}
cancel()
if err := result.Close(); err != nil {
log.Error("close Select result failed:", errors.ErrorStack(err))
logutil.Logger(ctx).Error("close Select result failed", zap.Error(err))
}
if e.runtimeStats != nil {
copStats := e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetRootStats(e.idxPlans[len(e.idxPlans)-1].ExplainID())
Expand Down Expand Up @@ -516,7 +517,7 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, handles []in
}
tableReader, err := e.dataReaderBuilder.buildTableReaderFromHandles(ctx, tableReaderExec, handles)
if err != nil {
log.Error(err)
logutil.Logger(ctx).Error("build table reader from handles failed", zap.Error(err))
return nil, errors.Trace(err)
}
return tableReader, nil
Expand Down Expand Up @@ -612,7 +613,7 @@ func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectRes
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
log.Errorf("indexWorker panic stack is:\n%s", buf)
logutil.Logger(ctx).Error("indexWorker in IndexLookupExecutor panicked", zap.String("stack", string(buf)))
err4Panic := errors.Errorf("%v", r)
doneCh := make(chan error, 1)
doneCh <- err4Panic
Expand Down Expand Up @@ -713,7 +714,7 @@ func (w *tableWorker) pickAndExecTask(ctx context.Context) {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
log.Errorf("tableWorker panic stack is:\n%s", buf)
logutil.Logger(ctx).Error("tableWorker in IndexLookUpExecutor panicked", zap.String("stack", string(buf)))
task.doneCh <- errors.Errorf("%v", r)
}
}()
Expand All @@ -739,7 +740,7 @@ func (w *tableWorker) pickAndExecTask(ctx context.Context) {
func (w *tableWorker) executeTask(ctx context.Context, task *lookupTableTask) error {
tableReader, err := w.buildTblReader(ctx, task.handles)
if err != nil {
log.Error(err)
logutil.Logger(ctx).Error("build table reader failed", zap.Error(err))
return errors.Trace(err)
}
defer terror.Call(tableReader.Close)
Expand All @@ -754,7 +755,7 @@ func (w *tableWorker) executeTask(ctx context.Context, task *lookupTableTask) er
chk := tableReader.newFirstChunk()
err = tableReader.Next(ctx, chunk.NewRecordBatch(chk))
if err != nil {
log.Error(err)
logutil.Logger(ctx).Error("table reader fetch next chunk failed", zap.Error(err))
return errors.Trace(err)
}
if chk.NumRows() == 0 {
Expand Down
7 changes: 4 additions & 3 deletions executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,9 @@ import (
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

var (
Expand Down Expand Up @@ -466,7 +467,7 @@ func (e *CheckTableExec) Next(ctx context.Context, req *chunk.RecordBatch) error
err = e.doCheckTable(tb)
}
if err != nil {
log.Warnf("%v error:%v", t.Name, errors.ErrorStack(err))
logutil.Logger(ctx).Warn("check table failed", zap.String("tableName", t.Name.O), zap.Error(err))
if admin.ErrDataInConsistent.Equal(err) {
return ErrAdminCheckTable.GenWithStack("%v err:%v", t.Name, err)
}
Expand Down Expand Up @@ -1216,7 +1217,7 @@ func (e *UnionExec) resultPuller(ctx context.Context, childID int) {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
log.Errorf("resultPuller panic stack is:\n%s", buf)
logutil.Logger(ctx).Error("resultPuller panicked", zap.String("stack", string(buf)))
result.err = errors.Errorf("%v", r)
e.resultPool <- result
e.stopFetchData.Store(true)
Expand Down
Loading

0 comments on commit f738ba2

Please sign in to comment.