Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

executor,metrics: add a metric for observing execution phases #35906

Merged
merged 20 commits into from
Jul 21, 2022
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 165 additions & 5 deletions executor/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ func (a *recordSet) Next(ctx context.Context, req *chunk.Chunk) (err error) {
logutil.Logger(ctx).Error("execute sql panic", zap.String("sql", a.stmt.GetTextToLog()), zap.Stack("stack"))
}()

err = Next(ctx, a.executor, req)
err = a.stmt.next(ctx, a.executor, req)
if err != nil {
a.lastErr = err
return err
Expand Down Expand Up @@ -216,6 +216,17 @@ type ExecStmt struct {
retryCount uint
retryStartTime time.Time

// Phase durations are splited into two parts: 1. trying to lock keys (but
// failed); 2. the final iteration of the retry loop. Here we use
// [2]time.Duration to record such info for each phase. The first duration
// is increased only whithin the current iteration. When we meet a
zyguan marked this conversation as resolved.
Show resolved Hide resolved
// pessimistic lock error and decide to retry, we add the first duration to
// the second and reset the first to 0 by calling `resetPhaseDurations`.
phaseBuildDurations [2]time.Duration
zyguan marked this conversation as resolved.
Show resolved Hide resolved
phaseOpenDurations [2]time.Duration
phaseNextDurations [2]time.Duration
phaseLockDurations [2]time.Duration

// OutputNames will be set if using cached plan
OutputNames []*types.FieldName
PsStmt *plannercore.CachedPrepareStmt
Expand Down Expand Up @@ -425,7 +436,7 @@ func (a *ExecStmt) Exec(ctx context.Context) (_ sqlexec.RecordSet, err error) {
ctx = a.observeStmtBeginForTopSQL(ctx)

breakpoint.Inject(a.Ctx, sessiontxn.BreakPointBeforeExecutorFirstRun)
if err = e.Open(ctx); err != nil {
if err = a.openExecutor(ctx, e); err != nil {
terror.Call(e.Close)
return nil, err
}
Expand Down Expand Up @@ -625,7 +636,7 @@ func (a *ExecStmt) runPessimisticSelectForUpdate(ctx context.Context, e Executor
var err error
req := newFirstChunk(e)
for {
err = Next(ctx, e, req)
err = a.next(ctx, e, req)
if err != nil {
// Handle 'write conflict' error.
break
Expand Down Expand Up @@ -671,7 +682,7 @@ func (a *ExecStmt) handleNoDelayExecutor(ctx context.Context, e Executor) (sqlex
}
}

err = Next(ctx, e, newFirstChunk(e))
err = a.next(ctx, e, newFirstChunk(e))
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -724,6 +735,7 @@ func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e Executor) error {
ctx = context.WithValue(ctx, util.LockKeysDetailCtxKey, &lockKeyStats)
startLocking := time.Now()
err = txn.LockKeys(ctx, lockCtx, keys...)
a.phaseLockDurations[0] += time.Since(startLocking)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Point get and batch point get will lock keys internally, that duration will not be observed here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, here the lock phase is only for pessimistic DMLs that have extra keys to lock. The internal lock key duration (of point-get, point-update, etc) is counted in next phase.

if lockKeyStats != nil {
seVars.StmtCtx.MergeLockKeysExecDetails(lockKeyStats)
}
Expand Down Expand Up @@ -789,6 +801,8 @@ func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, lockErr error

breakpoint.Inject(a.Ctx, sessiontxn.BreakPointOnStmtRetryAfterLockError)

a.resetPhaseDurations()

e, err := a.buildExecutor()
if err != nil {
return nil, err
Expand All @@ -802,7 +816,7 @@ func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, lockErr error
sessiontxn.RecordAssert(a.Ctx, "assertTxnManagerAfterPessimisticLockErrorRetry", true)
})

if err = e.Open(ctx); err != nil {
if err = a.openExecutor(ctx, e); err != nil {
return nil, err
}
return e, nil
Expand All @@ -816,6 +830,7 @@ type pessimisticTxn interface {

// buildExecutor build an executor from plan, prepared statement may need additional procedure.
func (a *ExecStmt) buildExecutor() (Executor, error) {
defer func(start time.Time) { a.phaseBuildDurations[0] += time.Since(start) }(time.Now())
ctx := a.Ctx
stmtCtx := ctx.GetSessionVars().StmtCtx
if _, ok := a.Plan.(*plannercore.Execute); !ok {
Expand Down Expand Up @@ -858,6 +873,31 @@ func (a *ExecStmt) buildExecutor() (Executor, error) {
return e, nil
}

func (a *ExecStmt) openExecutor(ctx context.Context, e Executor) error {
start := time.Now()
err := e.Open(ctx)
a.phaseOpenDurations[0] += time.Since(start)
return err
}

func (a *ExecStmt) next(ctx context.Context, e Executor, req *chunk.Chunk) error {
start := time.Now()
err := Next(ctx, e, req)
a.phaseNextDurations[0] += time.Since(start)
return err
}

func (a *ExecStmt) resetPhaseDurations() {
a.phaseBuildDurations[1] += a.phaseBuildDurations[0]
a.phaseBuildDurations[0] = 0
a.phaseOpenDurations[1] += a.phaseOpenDurations[0]
a.phaseOpenDurations[0] = 0
a.phaseNextDurations[1] += a.phaseNextDurations[0]
a.phaseNextDurations[0] = 0
a.phaseLockDurations[1] += a.phaseLockDurations[0]
a.phaseLockDurations[0] = 0
}

// QueryReplacer replaces new line and tab for grep result including query string.
var QueryReplacer = strings.NewReplacer("\r", " ", "\n", " ", "\t", " ")

Expand Down Expand Up @@ -900,8 +940,127 @@ var (
sessionExecuteRunDurationInternal = metrics.SessionExecuteRunDuration.WithLabelValues(metrics.LblInternal)
sessionExecuteRunDurationGeneral = metrics.SessionExecuteRunDuration.WithLabelValues(metrics.LblGeneral)
totalTiFlashQuerySuccCounter = metrics.TiFlashQueryTotalCounter.WithLabelValues("", metrics.LblOK)

execBuildLocking = metrics.ExecPhaseDuration.WithLabelValues("build:locking", "0")
execOpenLocking = metrics.ExecPhaseDuration.WithLabelValues("open:locking", "0")
execNextLocking = metrics.ExecPhaseDuration.WithLabelValues("next:locking", "0")
execLockLocking = metrics.ExecPhaseDuration.WithLabelValues("lock:locking", "0")
execBuildFinal = metrics.ExecPhaseDuration.WithLabelValues("build:final", "0")
execOpenFinal = metrics.ExecPhaseDuration.WithLabelValues("open:final", "0")
execNextFinal = metrics.ExecPhaseDuration.WithLabelValues("next:final", "0")
execLockFinal = metrics.ExecPhaseDuration.WithLabelValues("lock:final", "0")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the next operation maybe we need to integrate more with the kv client metrics in the future.


execCommitPrewrite = metrics.ExecPhaseDuration.WithLabelValues("commit:prewrite", "0")
execCommitCommit = metrics.ExecPhaseDuration.WithLabelValues("commit:commit", "0")
execCommitWaitCommitTS = metrics.ExecPhaseDuration.WithLabelValues("commit:wait:commit-ts", "0")
zyguan marked this conversation as resolved.
Show resolved Hide resolved
execCommitWaitLatestTS = metrics.ExecPhaseDuration.WithLabelValues("commit:wait:latest-ts", "0")
execCommitWaitLatch = metrics.ExecPhaseDuration.WithLabelValues("commit:wait:local-latch", "0")
execCommitWaitBinlog = metrics.ExecPhaseDuration.WithLabelValues("commit:wait:prewrite-binlog", "0")
)

func (a *ExecStmt) observePhaseDurations(internal bool, commitDetails *util.CommitDetails) {
if d := a.phaseBuildDurations[0]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("build:final", "1").Observe(d.Seconds())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we could abstract the label constants and pre-define the related metrics?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

updated

} else {
execBuildFinal.Observe(d.Seconds())
}
}
if d := a.phaseBuildDurations[1]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("build:locking", "1").Observe(d.Seconds())
} else {
execBuildLocking.Observe(d.Seconds())
}
}
if d := a.phaseOpenDurations[0]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("open:final", "1").Observe(d.Seconds())
} else {
execOpenFinal.Observe(d.Seconds())
}
}
if d := a.phaseOpenDurations[1]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("open:locking", "1").Observe(d.Seconds())
} else {
execOpenLocking.Observe(d.Seconds())
}
}
if d := a.phaseNextDurations[0]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("next:final", "1").Observe(d.Seconds())
} else {
execNextFinal.Observe(d.Seconds())
}
}
if d := a.phaseNextDurations[1]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("next:locking", "1").Observe(d.Seconds())
} else {
execNextLocking.Observe(d.Seconds())
}
}
if d := a.phaseLockDurations[0]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("lock:final", "1").Observe(d.Seconds())
} else {
execLockFinal.Observe(d.Seconds())
}
}
if d := a.phaseLockDurations[1]; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("lock:locking", "1").Observe(d.Seconds())
} else {
execLockLocking.Observe(d.Seconds())
}
}
if commitDetails != nil {
if d := commitDetails.PrewriteTime; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("commit:prewrite", "1").Observe(d.Seconds())
} else {
execCommitPrewrite.Observe(d.Seconds())
}
}
if d := commitDetails.CommitTime; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("commit:commit", "1").Observe(d.Seconds())
} else {
execCommitCommit.Observe(d.Seconds())
}
}
if d := commitDetails.GetCommitTsTime; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("commit:wait:commit-ts", "1").Observe(d.Seconds())
} else {
execCommitWaitCommitTS.Observe(d.Seconds())
}
}
if d := commitDetails.GetLatestTsTime; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("commit:wait:latest-ts", "1").Observe(d.Seconds())
} else {
execCommitWaitLatestTS.Observe(d.Seconds())
}
}
if d := commitDetails.LocalLatchTime; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("commit:wait:local-latch", "1").Observe(d.Seconds())
} else {
execCommitWaitLatch.Observe(d.Seconds())
}
}
if d := commitDetails.WaitPrewriteBinlogTime; d > 0 {
if internal {
metrics.ExecPhaseDuration.WithLabelValues("commit:wait:prewrite-binlog", "1").Observe(d.Seconds())
} else {
execCommitWaitBinlog.Observe(d.Seconds())
}
}
}
}

// FinishExecuteStmt is used to record some information after `ExecStmt` execution finished:
// 1. record slow log if needed.
// 2. record summary statement.
Expand Down Expand Up @@ -946,6 +1105,7 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults boo
}
sessVars.PrevStmt = FormatSQL(a.GetTextToLog())

a.observePhaseDurations(sessVars.InRestrictedSQL, execDetail.CommitDetail)
executeDuration := time.Since(sessVars.StartTime) - sessVars.DurationCompile
if sessVars.InRestrictedSQL {
sessionExecuteRunDurationInternal.Observe(executeDuration.Seconds())
Expand Down
9 changes: 9 additions & 0 deletions metrics/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,13 @@ var (
Name: "statement_db_total",
Help: "Counter of StmtNode by Database.",
}, []string{LblDb, LblType})

// ExecPhaseDuration records the duration of each execution phase.
ExecPhaseDuration = prometheus.NewSummaryVec(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain why summary is used here? I have never seen summary type metrics used in TiDB before and histogram is always used....

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To reduce the size of metrics data (histogram = summary + buckets). IMO, we won't care too much about sth like "what's the p99 latency of next phase". There are too many kinds of executors (as well as their combinations), some may be fast and other may be extremely slow, a higher or lower p99 latency may not provide more info (we do not known about the distribution of each kind of executors). Besides, it's hard to decide buckets here, some phases (like open) take very little time, but phases like lock may cost a few seconds.

prometheus.SummaryOpts{
Namespace: "tidb",
Subsystem: "executor",
Name: "phase_duration_seconds",
Help: "Summary of each execution phase duration.",
}, []string{LblPhase, LblInternal})
)
1 change: 1 addition & 0 deletions metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ func RegisterMetrics() {
prometheus.MustRegister(StatsInaccuracyRate)
prometheus.MustRegister(StmtNodeCounter)
prometheus.MustRegister(DbStmtNodeCounter)
prometheus.MustRegister(ExecPhaseDuration)
prometheus.MustRegister(StoreQueryFeedbackCounter)
prometheus.MustRegister(TimeJumpBackCounter)
prometheus.MustRegister(TransactionDuration)
Expand Down
7 changes: 4 additions & 3 deletions metrics/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ var (
Name: "transaction_statement_num",
Help: "Bucketed histogram of statements count in each transaction.",
Buckets: prometheus.ExponentialBuckets(1, 2, 16), // 1 ~ 32768
}, []string{LbTxnMode, LblType})
}, []string{LblTxnMode, LblType})

TransactionDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Expand All @@ -90,7 +90,7 @@ var (
Name: "transaction_duration_seconds",
Help: "Bucketed histogram of a transaction execution duration, including retry.",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
}, []string{LbTxnMode, LblType})
}, []string{LblTxnMode, LblType})

StatementDeadlockDetectDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Expand Down Expand Up @@ -169,7 +169,7 @@ const (
LblCoprType = "copr_type"
LblGeneral = "general"
LblInternal = "internal"
LbTxnMode = "txn_mode"
LblTxnMode = "txn_mode"
LblPessimistic = "pessimistic"
LblOptimistic = "optimistic"
LblStore = "store"
Expand All @@ -187,4 +187,5 @@ const (
LblCommitting = "committing"
LblRollingBack = "rolling_back"
LblHasLock = "has_lock"
LblPhase = "phase"
)