diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go old mode 100644 new mode 100755 index 11b44a6ba19c8..13773dc6d2ee4 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -232,8 +232,9 @@ type local struct { errorMgr *errormanager.ErrorManager importClientFactory ImportClientFactory - bufferPool *membuf.Pool - metrics *metric.Metrics + bufferPool *membuf.Pool + metrics *metric.Metrics + writeLimiter StoreWriteLimiter } func openDuplicateDB(storeDir string) (*pebble.DB, error) { @@ -308,6 +309,12 @@ func NewLocalBackend( if duplicateDetection { keyAdapter = dupDetectKeyAdapter{} } + var writeLimiter StoreWriteLimiter + if cfg.TikvImporter.StoreWriteBWLimit > 0 { + writeLimiter = newStoreWriteLimiter(int(cfg.TikvImporter.StoreWriteBWLimit)) + } else { + writeLimiter = noopStoreWriteLimiter{} + } local := &local{ engines: sync.Map{}, pdCtl: pdCtl, @@ -334,6 +341,7 @@ func NewLocalBackend( errorMgr: errorMgr, importClientFactory: importClientFactory, bufferPool: membuf.NewPool(membuf.WithAllocator(manual.Allocator{})), + writeLimiter: writeLimiter, } if m, ok := metric.FromContext(ctx); ok { local.metrics = m @@ -784,6 +792,7 @@ func (local *local) WriteToTiKV( leaderID := region.Leader.GetId() clients := make([]sst.ImportSST_WriteClient, 0, len(region.Region.GetPeers())) + storeIDs := make([]uint64, 0, len(region.Region.GetPeers())) requests := make([]*sst.WriteRequest, 0, len(region.Region.GetPeers())) for _, peer := range region.Region.GetPeers() { cli, err := local.getImportClient(ctx, peer.StoreId) @@ -812,6 +821,7 @@ func (local *local) WriteToTiKV( } clients = append(clients, wstream) requests = append(requests, req) + storeIDs = append(storeIDs, peer.StoreId) } bytesBuf := local.bufferPool.NewBuffer() @@ -819,43 +829,57 @@ func (local *local) WriteToTiKV( pairs := make([]*sst.Pair, 0, local.batchWriteKVPairs) count := 0 size := int64(0) + totalSize := int64(0) totalCount := int64(0) - firstLoop := true // if region-split-size <= 96MiB, we bump the threshold a bit to avoid too many retry split // because the range-properties is not 100% accurate regionMaxSize := regionSplitSize if regionSplitSize <= int64(config.SplitRegionSize) { regionMaxSize = regionSplitSize * 4 / 3 } + // Set a lower flush limit to make the speed of write more smooth. + flushLimit := int64(local.writeLimiter.Limit() / 10) + + flushKVs := func() error { + for i := range clients { + if err := local.writeLimiter.WaitN(ctx, storeIDs[i], int(size)); err != nil { + return errors.Trace(err) + } + requests[i].Chunk.(*sst.WriteRequest_Batch).Batch.Pairs = pairs[:count] + if err := clients[i].Send(requests[i]); err != nil { + return errors.Trace(err) + } + } + return nil + } for iter.First(); iter.Valid(); iter.Next() { - size += int64(len(iter.Key()) + len(iter.Value())) + kvSize := int64(len(iter.Key()) + len(iter.Value())) // here we reuse the `*sst.Pair`s to optimize object allocation - if firstLoop { + if count < len(pairs) { + pairs[count].Key = bytesBuf.AddBytes(iter.Key()) + pairs[count].Value = bytesBuf.AddBytes(iter.Value()) + } else { pair := &sst.Pair{ Key: bytesBuf.AddBytes(iter.Key()), Value: bytesBuf.AddBytes(iter.Value()), } pairs = append(pairs, pair) - } else { - pairs[count].Key = bytesBuf.AddBytes(iter.Key()) - pairs[count].Value = bytesBuf.AddBytes(iter.Value()) } count++ totalCount++ + size += kvSize + totalSize += kvSize - if count >= local.batchWriteKVPairs { - for i := range clients { - requests[i].Chunk.(*sst.WriteRequest_Batch).Batch.Pairs = pairs[:count] - if err := clients[i].Send(requests[i]); err != nil { - return nil, Range{}, stats, errors.Trace(err) - } + if count >= local.batchWriteKVPairs || size >= flushLimit { + if err := flushKVs(); err != nil { + return nil, Range{}, stats, err } count = 0 + size = 0 bytesBuf.Reset() - firstLoop = false } - if size >= regionMaxSize || totalCount >= regionSplitKeys { + if totalSize >= regionMaxSize || totalCount >= regionSplitKeys { break } } @@ -865,12 +889,12 @@ func (local *local) WriteToTiKV( } if count > 0 { - for i := range clients { - requests[i].Chunk.(*sst.WriteRequest_Batch).Batch.Pairs = pairs[:count] - if err := clients[i].Send(requests[i]); err != nil { - return nil, Range{}, stats, errors.Trace(err) - } + if err := flushKVs(); err != nil { + return nil, Range{}, stats, err } + count = 0 + size = 0 + bytesBuf.Reset() } var leaderPeerMetas []*sst.SSTMeta @@ -913,7 +937,7 @@ func (local *local) WriteToTiKV( logutil.Region(region.Region), logutil.Leader(region.Leader)) } stats.count = totalCount - stats.totalBytes = size + stats.totalBytes = totalSize return leaderPeerMetas, finishedRange, stats, nil } diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index 98413b20e71e0..c4aaae30db37b 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "database/sql" + "math" "regexp" "runtime" "sort" @@ -40,6 +41,7 @@ import ( "go.uber.org/multierr" "go.uber.org/zap" "golang.org/x/sync/errgroup" + "golang.org/x/time/rate" ) const ( @@ -592,3 +594,75 @@ func intersectRange(region *metapb.Region, rg Range) Range { return Range{start: startKey, end: endKey} } + +type StoreWriteLimiter interface { + WaitN(ctx context.Context, storeID uint64, n int) error + Limit() int +} + +type storeWriteLimiter struct { + rwm sync.RWMutex + limiters map[uint64]*rate.Limiter + limit int + burst int +} + +func newStoreWriteLimiter(limit int) *storeWriteLimiter { + var burst int + // Allow burst of at most 20% of the limit. + if limit <= math.MaxInt-limit/5 { + burst = limit + limit/5 + } else { + // If overflowed, set burst to math.MaxInt. + burst = math.MaxInt + } + return &storeWriteLimiter{ + limiters: make(map[uint64]*rate.Limiter), + limit: limit, + burst: burst, + } +} + +func (s *storeWriteLimiter) WaitN(ctx context.Context, storeID uint64, n int) error { + limiter := s.getLimiter(storeID) + // The original WaitN doesn't allow n > burst, + // so we call WaitN with burst multiple times. + for n > limiter.Burst() { + if err := limiter.WaitN(ctx, limiter.Burst()); err != nil { + return err + } + n -= limiter.Burst() + } + return limiter.WaitN(ctx, n) +} + +func (s *storeWriteLimiter) Limit() int { + return s.limit +} + +func (s *storeWriteLimiter) getLimiter(storeID uint64) *rate.Limiter { + s.rwm.RLock() + limiter, ok := s.limiters[storeID] + s.rwm.RUnlock() + if ok { + return limiter + } + s.rwm.Lock() + defer s.rwm.Unlock() + limiter, ok = s.limiters[storeID] + if !ok { + limiter = rate.NewLimiter(rate.Limit(s.limit), s.burst) + s.limiters[storeID] = limiter + } + return limiter +} + +type noopStoreWriteLimiter struct{} + +func (noopStoreWriteLimiter) WaitN(ctx context.Context, storeID uint64, n int) error { + return nil +} + +func (noopStoreWriteLimiter) Limit() int { + return math.MaxInt +} diff --git a/br/pkg/lightning/backend/local/localhelper_test.go b/br/pkg/lightning/backend/local/localhelper_test.go index 48ce64da5e3b6..767829e9c857f 100644 --- a/br/pkg/lightning/backend/local/localhelper_test.go +++ b/br/pkg/lightning/backend/local/localhelper_test.go @@ -770,3 +770,46 @@ func TestNeedSplit(t *testing.T) { } } } + +func TestStoreWriteLimiter(t *testing.T) { + // Test create store write limiter with limit math.MaxInt. + limiter := newStoreWriteLimiter(math.MaxInt) + err := limiter.WaitN(context.Background(), 1, 1024) + require.NoError(t, err) + + // Test WaitN exceeds the burst. + limiter = newStoreWriteLimiter(100) + start := time.Now() + // 120 is the initial burst, 150 is the number of new tokens. + err = limiter.WaitN(context.Background(), 1, 120+120) + require.NoError(t, err) + require.Greater(t, time.Since(start), time.Second) + + // Test WaitN with different store id. + limiter = newStoreWriteLimiter(100) + var wg sync.WaitGroup + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + for i := 0; i < 10; i++ { + wg.Add(1) + go func(storeID uint64) { + defer wg.Done() + start = time.Now() + var gotTokens int + for { + n := rand.Intn(50) + if limiter.WaitN(ctx, storeID, n) != nil { + break + } + gotTokens += n + } + elapsed := time.Since(start) + maxTokens := 120 + int(float64(elapsed)/float64(time.Second)*100) + // In theory, gotTokens should be less than or equal to maxTokens. + // But we allow a little of error to avoid the test being flaky. + require.LessOrEqual(t, gotTokens, maxTokens+1) + + }(uint64(i)) + } + wg.Wait() +} diff --git a/br/pkg/lightning/config/config.go b/br/pkg/lightning/config/config.go index fee2aaf29deb2..b0ffe32fa3cd5 100644 --- a/br/pkg/lightning/config/config.go +++ b/br/pkg/lightning/config/config.go @@ -532,6 +532,7 @@ type TikvImporter struct { EngineMemCacheSize ByteSize `toml:"engine-mem-cache-size" json:"engine-mem-cache-size"` LocalWriterMemCacheSize ByteSize `toml:"local-writer-mem-cache-size" json:"local-writer-mem-cache-size"` + StoreWriteBWLimit ByteSize `toml:"store-write-bwlimit" json:"store-write-bwlimit"` } type Checkpoint struct { diff --git a/br/tests/lightning_write_limit/config.toml b/br/tests/lightning_write_limit/config.toml new file mode 100644 index 0000000000000..e45e694126964 --- /dev/null +++ b/br/tests/lightning_write_limit/config.toml @@ -0,0 +1,5 @@ +[tikv-importer] +store-write-bwlimit = "1Mi" + +[mydumper.csv] +header = false diff --git a/br/tests/lightning_write_limit/run.sh b/br/tests/lightning_write_limit/run.sh new file mode 100644 index 0000000000000..b48d34e79a58d --- /dev/null +++ b/br/tests/lightning_write_limit/run.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# +# Copyright 2022 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +mkdir -p "$TEST_DIR/data" + +cat <"$TEST_DIR/data/test-schema-create.sql" +CREATE DATABASE test; +EOF +cat <"$TEST_DIR/data/test.t-schema.sql" +CREATE TABLE test.t ( + id int, + a int, + b int, + c int +); +EOF + +# Generate 200k rows. Total size is about 5MiB. +set +x +for i in {1..200000}; do + echo "$i,$i,$i,$i" >>"$TEST_DIR/data/test.t.0.csv" +done +set -x + +start=$(date +%s) +run_lightning --backend local -d "$TEST_DIR/data" --config "tests/$TEST_NAME/config.toml" +end=$(date +%s) +take=$((end - start)) + +# The encoded kv size is 10MiB. Usually it should take more than 10s. +if [ $take -lt 10 ]; then + echo "Lightning runs too fast. The write limiter doesn't work." + exit 1 +fi diff --git a/br/tidb-lightning.toml b/br/tidb-lightning.toml index 8840eba06bb1d..a33eb46500104 100644 --- a/br/tidb-lightning.toml +++ b/br/tidb-lightning.toml @@ -136,6 +136,8 @@ addr = "127.0.0.1:8287" # The memory cache used in for local sorting during the encode-KV phase before flushing into the engines. The memory # usage is bound by region-concurrency * local-writer-mem-cache-size. #local-writer-mem-cache-size = '128MiB' +# Limit the write bandwidth to each tikv store. The unit is 'Bytes per second'. 0 means no limit. +#store-write-bwlimit = 0 [mydumper] # block size of file reading diff --git a/errno/errcode.go b/errno/errcode.go index 0d80810516897..a26a1a1eaea6e 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -1023,6 +1023,8 @@ const ( ErrAssertionFailed = 8141 ErrInstanceScope = 8142 ErrNonTransactionalJobFailure = 8143 + ErrSettingNoopVariable = 8144 + ErrGettingNoopVariable = 8145 // Error codes used by TiDB ddl package ErrUnsupportedDDLOperation = 8200 diff --git a/errno/errname.go b/errno/errname.go index e14ddbe22ee2f..58866b7564cd0 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -1018,6 +1018,8 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrAssertionFailed: mysql.Message("assertion failed: key: %s, assertion: %s, start_ts: %v, existing start ts: %v, existing commit ts: %v", []int{0}), ErrInstanceScope: mysql.Message("modifying %s will require SET GLOBAL in a future version of TiDB", nil), ErrNonTransactionalJobFailure: mysql.Message("non-transactional job failed, job id: %d, total jobs: %d. job range: [%s, %s], job sql: %s, err: %v", []int{2, 3, 4}), + ErrSettingNoopVariable: mysql.Message("setting %s has no effect in TiDB", nil), + ErrGettingNoopVariable: mysql.Message("variable %s has no effect in TiDB", nil), ErrWarnOptimizerHintInvalidInteger: mysql.Message("integer value is out of range in '%s'", nil), ErrWarnOptimizerHintUnsupportedHint: mysql.Message("Optimizer hint %s is not supported by TiDB and is ignored", nil), diff --git a/errors.toml b/errors.toml index 6ccebe76a9b46..11518f664cda6 100755 --- a/errors.toml +++ b/errors.toml @@ -1456,6 +1456,11 @@ error = ''' modifying %s will require SET GLOBAL in a future version of TiDB ''' +["executor:8144"] +error = ''' +setting %s has no effect in TiDB +''' + ["executor:8212"] error = ''' Failed to split region ranges: %s @@ -2076,6 +2081,11 @@ error = ''' Column '%s' in ANALYZE column option does not exist in table '%s' ''' +["planner:8145"] +error = ''' +variable %s has no effect in TiDB +''' + ["planner:8242"] error = ''' '%s' is unsupported on cache tables. diff --git a/executor/errors.go b/executor/errors.go index 7551430e6901f..c65962f490f9c 100644 --- a/executor/errors.go +++ b/executor/errors.go @@ -55,6 +55,7 @@ var ( ErrInvalidSplitRegionRanges = dbterror.ClassExecutor.NewStd(mysql.ErrInvalidSplitRegionRanges) ErrViewInvalid = dbterror.ClassExecutor.NewStd(mysql.ErrViewInvalid) ErrInstanceScope = dbterror.ClassExecutor.NewStd(mysql.ErrInstanceScope) + ErrSettingNoopVariable = dbterror.ClassExecutor.NewStd(mysql.ErrSettingNoopVariable) ErrBRIEBackupFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEBackupFailed) ErrBRIERestoreFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIERestoreFailed) diff --git a/executor/set.go b/executor/set.go index a79055abb5dbe..df0868e45f875 100644 --- a/executor/set.go +++ b/executor/set.go @@ -115,7 +115,12 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres } return variable.ErrUnknownSystemVar.GenWithStackByArgs(name) } - + if sysVar.IsNoop && !variable.EnableNoopVariables.Load() { + // The variable is a noop. For compatibility we allow it to still + // be changed, but we append a warning since users might be expecting + // something that's not going to happen. + sessionVars.StmtCtx.AppendWarning(ErrSettingNoopVariable.GenWithStackByArgs(sysVar.Name)) + } if sysVar.HasInstanceScope() && !v.IsGlobal && sessionVars.EnableLegacyInstanceScope { // For backward compatibility we will change the v.IsGlobal to true, // and append a warning saying this will not be supported in future. diff --git a/executor/set_test.go b/executor/set_test.go index 8408af7ce75e4..eb171e872d8c4 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -736,6 +736,31 @@ func TestSetVar(t *testing.T) { tk.MustQuery("select @@tidb_cost_model_version").Check(testkit.Rows("2")) } +func TestGetSetNoopVars(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + + // By default you can get/set noop sysvars without issue. + tk.MustQuery("SELECT @@query_cache_type").Check(testkit.Rows("OFF")) + tk.MustQuery("SHOW VARIABLES LIKE 'query_cache_type'").Check(testkit.Rows("query_cache_type OFF")) + tk.MustExec("SET query_cache_type=2") + tk.MustQuery("SELECT @@query_cache_type").Check(testkit.Rows("DEMAND")) + // When tidb_enable_noop_variables is OFF, you can GET in @@ context + // and always SET. But you can't see in SHOW VARIABLES. + // Warnings are also returned. + tk.MustExec("SET GLOBAL tidb_enable_noop_variables = OFF") + defer tk.MustExec("SET GLOBAL tidb_enable_noop_variables = ON") + tk.MustQuery("SELECT @@global.tidb_enable_noop_variables").Check(testkit.Rows("OFF")) + tk.MustQuery("SELECT @@query_cache_type").Check(testkit.Rows("DEMAND")) + tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 8145 variable query_cache_type has no effect in TiDB")) + tk.MustQuery("SHOW VARIABLES LIKE 'query_cache_type'").Check(testkit.Rows()) + tk.MustExec("SET query_cache_type = OFF") + tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 8144 setting query_cache_type has no effect in TiDB")) + // but the change is still effective. + tk.MustQuery("SELECT @@query_cache_type").Check(testkit.Rows("OFF")) +} + func TestTruncateIncorrectIntSessionVar(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/executor/show.go b/executor/show.go index 4b6d35d8ef187..9075444fd53f4 100644 --- a/executor/show.go +++ b/executor/show.go @@ -820,6 +820,9 @@ func (e *ShowExec) fetchShowVariables() (err error) { // otherwise, fetch the value from table `mysql.Global_Variables`. for _, v := range variable.GetSysVars() { if v.Scope != variable.ScopeSession { + if v.IsNoop && !variable.EnableNoopVariables.Load() { + continue + } if fieldFilter != "" && v.Name != fieldFilter { continue } else if fieldPatternsLike != nil && !fieldPatternsLike.DoMatch(v.Name) { @@ -842,6 +845,9 @@ func (e *ShowExec) fetchShowVariables() (err error) { // If it is a session only variable, use the default value defined in code, // otherwise, fetch the value from table `mysql.Global_Variables`. for _, v := range variable.GetSysVars() { + if v.IsNoop && !variable.EnableNoopVariables.Load() { + continue + } if fieldFilter != "" && v.Name != fieldFilter { continue } else if fieldPatternsLike != nil && !fieldPatternsLike.DoMatch(v.Name) { diff --git a/planner/core/errors.go b/planner/core/errors.go index 7182702e9d06a..84b92e39ef014 100644 --- a/planner/core/errors.go +++ b/planner/core/errors.go @@ -109,4 +109,5 @@ var ( ErrViewSelectTemporaryTable = dbterror.ClassOptimizer.NewStd(mysql.ErrViewSelectTmptable) ErrSubqueryMoreThan1Row = dbterror.ClassOptimizer.NewStd(mysql.ErrSubqueryNo1Row) ErrKeyPart0 = dbterror.ClassOptimizer.NewStd(mysql.ErrKeyPart0) + ErrGettingNoopVariable = dbterror.ClassOptimizer.NewStd(mysql.ErrGettingNoopVariable) ) diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index 253fb98dd2a9d..f80ea20ad07c9 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1297,6 +1297,10 @@ func (er *expressionRewriter) rewriteVariable(v *ast.VariableExpr) { } return } + if sysVar.IsNoop && !variable.EnableNoopVariables.Load() { + // The variable does nothing, append a warning to the statement output. + sessionVars.StmtCtx.AppendWarning(ErrGettingNoopVariable.GenWithStackByArgs(sysVar.Name)) + } if sem.IsEnabled() && sem.IsInvisibleSysVar(sysVar.Name) { err := ErrSpecificAccessDenied.GenWithStackByArgs("RESTRICTED_VARIABLES_ADMIN") er.b.visitInfo = appendDynamicVisitInfo(er.b.visitInfo, "RESTRICTED_VARIABLES_ADMIN", false, err) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 816757ea46539..e2a36568740eb 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -800,6 +800,12 @@ var defaultSysVars = []*SysVar{ }, GetGlobal: func(s *SessionVars) (string, error) { return BoolToOnOff(EnableConcurrentDDL.Load()), nil }}, + {Scope: ScopeGlobal, Name: TiDBEnableNoopVariables, Value: BoolToOnOff(DefTiDBEnableNoopVariables), Type: TypeEnum, PossibleValues: []string{Off, On, Warn}, SetGlobal: func(s *SessionVars, val string) error { + EnableNoopVariables.Store(TiDBOptOn(val)) + return nil + }, GetGlobal: func(s *SessionVars) (string, error) { + return BoolToOnOff(EnableNoopVariables.Load()), nil + }}, /* The system variables below have GLOBAL and SESSION scope */ {Scope: ScopeGlobal | ScopeSession, Name: SQLSelectLimit, Value: "18446744073709551615", Type: TypeUnsigned, MinValue: 0, MaxValue: math.MaxUint64, SetSession: func(s *SessionVars, val string) error { diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 64c1916292c73..03eca96b0e20c 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -670,6 +670,9 @@ const ( // TiDBQueryLogMaxLen is used to set the max length of the query in the log. TiDBQueryLogMaxLen = "tidb_query_log_max_len" + // TiDBEnableNoopVariables is used to indicate if noops appear in SHOW [GLOBAL] VARIABLES + TiDBEnableNoopVariables = "tidb_enable_noop_variables" + // TiDBNonTransactionalIgnoreError is used to ignore error in non-transactional DMLs. // When set to false, a non-transactional DML returns when it meets the first error. // When set to true, a non-transactional DML finishes all batches even if errors are met in some batches. @@ -855,6 +858,7 @@ const ( DefTiDBWaitSplitRegionFinish = true DefWaitSplitRegionTimeout = 300 // 300s DefTiDBEnableNoopFuncs = Off + DefTiDBEnableNoopVariables = true DefTiDBAllowRemoveAutoInc = false DefTiDBUsePlanBaselines = true DefTiDBEvolvePlanBaselines = false @@ -985,6 +989,7 @@ var ( PreparedPlanCacheSize = atomic.NewUint64(DefTiDBPrepPlanCacheSize) PreparedPlanCacheMemoryGuardRatio = atomic.NewFloat64(DefTiDBPrepPlanCacheMemoryGuardRatio) EnableConcurrentDDL = atomic.NewBool(DefTiDBEnableConcurrentDDL) + EnableNoopVariables = atomic.NewBool(DefTiDBEnableNoopVariables) ) var (