From 8946074cf3a791242bc58795935cc62fb186f8da Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 16 Jun 2022 11:30:45 +0800 Subject: [PATCH 01/21] config, sysvar: map config `run-ddl` to sysvar `enable_ddl` --- br/cmd/br/backup.go | 4 ++-- cmd/ddltest/ddl_test.go | 4 ++-- config/config.go | 11 +++++++---- config/config.toml.example | 6 +++--- config/config_test.go | 22 +++++++++++++++------- ddl/ddl.go | 8 ++++---- ddl/ddl_worker.go | 2 -- ddl/restart_test.go | 3 ++- sessionctx/variable/sysvar.go | 11 +++++++++++ tidb-server/main.go | 5 +++-- 10 files changed, 49 insertions(+), 27 deletions(-) diff --git a/br/cmd/br/backup.go b/br/cmd/br/backup.go index 942fd8d4a46db..0b44b26f78e90 100644 --- a/br/cmd/br/backup.go +++ b/br/cmd/br/backup.go @@ -11,7 +11,7 @@ import ( "github.com/pingcap/tidb/br/pkg/trace" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" "go.uber.org/zap" @@ -78,7 +78,7 @@ func NewBackupCommand() *cobra.Command { task.LogArguments(c) // Do not run ddl worker in BR. - ddl.RunWorker = false + config.GetGlobalConfig().Instance.EnableDDL.Store(false) summary.SetUnit(summary.BackupUnit) return nil diff --git a/cmd/ddltest/ddl_test.go b/cmd/ddltest/ddl_test.go index bbc16fbc983a6..202b640d58c1e 100644 --- a/cmd/ddltest/ddl_test.go +++ b/cmd/ddltest/ddl_test.go @@ -33,7 +33,7 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -116,7 +116,7 @@ func createDDLSuite(t *testing.T) (s *ddlSuite) { // Stop current DDL worker, so that we can't be the owner now. err = domain.GetDomain(s.ctx).DDL().Stop() require.NoError(t, err) - ddl.RunWorker = false + config.GetGlobalConfig().Instance.EnableDDL.Store(false) session.ResetStoreForWithTiKVTest(s.store) s.dom.Close() require.NoError(t, s.store.Close()) diff --git a/config/config.go b/config/config.go index a701b1501bf15..bfbcb4a89d174 100644 --- a/config/config.go +++ b/config/config.go @@ -117,6 +117,7 @@ var ( map[string]string{ "check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8", "enable-collect-execution-info": "tidb_enable_collect_execution_info", + "run-ddl": "enable_ddl", }, }, { @@ -470,9 +471,10 @@ type Instance struct { ForcePriority string `toml:"tidb_force_priority" json:"tidb_force_priority"` MemoryUsageAlarmRatio float64 `toml:"tidb_memory_usage_alarm_ratio" json:"tidb_memory_usage_alarm_ratio"` // EnableCollectExecutionInfo enables the TiDB to collect execution info. - EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"` - PluginDir string `toml:"plugin_dir" json:"plugin_dir"` - PluginLoad string `toml:"plugin_load" json:"plugin_load"` + EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"` + PluginDir string `toml:"plugin_dir" json:"plugin_dir"` + PluginLoad string `toml:"plugin_load" json:"plugin_load"` + EnableDDL AtomicBool `toml:"enable_ddl" json:"enable_ddl"` } func (l *Log) getDisableTimestamp() bool { @@ -824,6 +826,7 @@ var defaultConf = Config{ EnableCollectExecutionInfo: true, PluginDir: "/data/deploy/plugin", PluginLoad: "", + EnableDDL: *NewAtomicBool(true), }, Status: Status{ ReportStatus: true, @@ -1158,7 +1161,7 @@ func (c *Config) Valid() error { } return fmt.Errorf("invalid store=%s, valid storages=%v", c.Store, nameList) } - if c.Store == "mocktikv" && !c.RunDDL { + if c.Store == "mocktikv" && !c.Instance.EnableDDL.Load() { return fmt.Errorf("can't disable DDL on mocktikv") } if c.MaxIndexLength < DefMaxIndexLength || c.MaxIndexLength > DefMaxOfMaxIndexLength { diff --git a/config/config.toml.example b/config/config.toml.example index e23f390e23efb..c1ebb82dc2f4c 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -18,9 +18,6 @@ path = "/tmp/tidb" # The socket file to use for connection. socket = "/tmp/tidb-{Port}.sock" -# Run ddl worker on this tidb-server. -run-ddl = true - # Schema lease duration, very dangerous to change only if you know what you do. lease = "45s" @@ -465,3 +462,6 @@ tidb_slow_log_threshold = 300 # tidb_record_plan_in_slow_log is used to enable record query plan in slow log. # 0 is disable. 1 is enable. tidb_record_plan_in_slow_log = 1 + +# Run ddl worker on this tidb-server. +enable_ddl = true diff --git a/config/config_test.go b/config/config_test.go index 391bd874d3942..598ecc04f6b88 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -196,9 +196,6 @@ path = "/tmp/tidb" # The socket file to use for connection. socket = "/tmp/tidb-{Port}.sock" -# Run ddl worker on this tidb-server. -run-ddl = true - # Schema lease duration, very dangerous to change only if you know what you do. lease = "45s" @@ -309,6 +306,11 @@ deprecate-integer-display-length = false # See https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html for more details. enable-enum-length-limit = true +[instance] + +# Run ddl worker on this tidb-server. +enable_ddl = true + [log] # Log level: debug, info, warn, error, fatal. level = "info" @@ -1020,7 +1022,10 @@ func TestConflictInstanceConfig(t *testing.T) { // Just receive a warning and keep their respective values. expectedConflictOptions := map[string]InstanceConfigSection{ "": { - "", map[string]string{"check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8"}, + "", map[string]string{ + "check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8", + "run-ddl": "enable_ddl", + }, }, "log": { "log", map[string]string{"enable-slow-log": "tidb_enable_slow_log"}, @@ -1029,10 +1034,10 @@ func TestConflictInstanceConfig(t *testing.T) { "performance", map[string]string{"force-priority": "tidb_force_priority"}, }, } - _, err = f.WriteString("check-mb4-value-in-utf8 = true \n" + + _, err = f.WriteString("check-mb4-value-in-utf8 = true \nrun-ddl = true \n" + "[log] \nenable-slow-log = true \n" + "[performance] \nforce-priority = \"NO_PRIORITY\"\n" + - "[instance] \ntidb_check_mb4_value_in_utf8 = false \ntidb_enable_slow_log = false \ntidb_force_priority = \"LOW_PRIORITY\"") + "[instance] \ntidb_check_mb4_value_in_utf8 = false \ntidb_enable_slow_log = false \ntidb_force_priority = \"LOW_PRIORITY\"\nenable_ddl = false") require.NoError(t, err) require.NoError(t, f.Sync()) err = conf.Load(configFile) @@ -1044,6 +1049,8 @@ func TestConflictInstanceConfig(t *testing.T) { require.Equal(t, false, conf.Instance.EnableSlowLog.Load()) require.Equal(t, "NO_PRIORITY", conf.Performance.ForcePriority) require.Equal(t, "LOW_PRIORITY", conf.Instance.ForcePriority) + require.Equal(t, true, conf.RunDDL) + require.Equal(t, false, conf.Instance.EnableDDL.Load()) require.Equal(t, 0, len(DeprecatedOptions)) for _, conflictOption := range ConflictOptions { expectedConflictOption, ok := expectedConflictOptions[conflictOption.SectionName] @@ -1073,6 +1080,7 @@ func TestDeprecatedConfig(t *testing.T) { "": { "", map[string]string{ "enable-collect-execution-info": "tidb_enable_collect_execution_info", + "run-ddl": "enable_ddl", }, }, "log": { @@ -1088,7 +1096,7 @@ func TestDeprecatedConfig(t *testing.T) { }, }, } - _, err = f.WriteString("enable-collect-execution-info = false \n" + + _, err = f.WriteString("enable-collect-execution-info = false \nrun-ddl = false \n" + "[plugin] \ndir=\"/plugin-path\" \nload=\"audit-1,whitelist-1\" \n" + "[log] \nslow-threshold = 100 \n" + "[performance] \nmemory-usage-alarm-ratio = 0.5") diff --git a/ddl/ddl.go b/ddl/ddl.go index 0893dc4126881..516fd78e5a5f3 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -468,14 +468,14 @@ func (d *ddl) newDeleteRangeManager(mock bool) delRangeManager { // Start implements DDL.Start interface. func (d *ddl) Start(ctxPool *pools.ResourcePool) error { - logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", RunWorker)) + logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.EnableDDL.Load())) d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) - // If RunWorker is true, we need campaign owner and do DDL job. + // If enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. - if RunWorker { + if config.GetGlobalConfig().Instance.EnableDDL.Load() { err := d.ownerManager.CampaignOwner() if err != nil { return errors.Trace(err) @@ -661,7 +661,7 @@ func getJobCheckInterval(job *model.Job, i int) (time.Duration, bool) { func (d *ddl) asyncNotifyWorker(job *model.Job) { // If the workers don't run, we needn't notify workers. - if !RunWorker { + if !config.GetGlobalConfig().Instance.EnableDDL.Load() { return } var worker *worker diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index b7d5341fac9f1..9a860e1f0252d 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -48,8 +48,6 @@ import ( ) var ( - // RunWorker indicates if this TiDB server starts DDL worker and can run DDL job. - RunWorker = true // ddlWorkerID is used for generating the next DDL worker ID. ddlWorkerID = int32(0) // WaitTimeWhenErrorOccurred is waiting interval when processing DDL jobs encounter errors. diff --git a/ddl/restart_test.go b/ddl/restart_test.go index fc4334a2e1b85..84842ee45d253 100644 --- a/ddl/restart_test.go +++ b/ddl/restart_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" @@ -48,7 +49,7 @@ func (d *ddl) restartWorkers(ctx context.Context) { d.ctx, d.cancel = context.WithCancel(ctx) d.wg.Run(d.limitDDLJobs) - if !RunWorker { + if !config.GetGlobalConfig().Instance.EnableDDL.Load() { return } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 06e206c98ab8a..388d842b1ae8e 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -415,6 +415,15 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeInstance, Name: PluginDir, Value: "/data/deploy/plugin", ReadOnly: true, GetGlobal: func(s *SessionVars) (string, error) { return config.GetGlobalConfig().Instance.PluginDir, nil }}, + {Scope: ScopeInstance, Name: EnableDDL, Value: BoolToOnOff(config.GetGlobalConfig().Instance.EnableDDL.Load()), Type: TypeBool, + SetGlobal: func(s *SessionVars, val string) error { + config.GetGlobalConfig().Instance.EnableDDL.Store(TiDBOptOn(val)) + return nil + }, + GetGlobal: func(s *SessionVars) (string, error) { + return BoolToOnOff(config.GetGlobalConfig().Instance.EnableDDL.Load()), nil + }, + }, /* The system variables below have GLOBAL scope */ {Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576}, @@ -1769,6 +1778,8 @@ const ( PluginDir = "plugin_dir" // PluginLoad is the name of 'plugin_load' system variable. PluginLoad = "plugin_load" + // EnableDDL indicates whether the tidb-server runs DDL statements, + EnableDDL = "enable_ddl" // Port is the name for 'port' system variable. Port = "port" // DataDir is the name for 'datadir' system variable. diff --git a/tidb-server/main.go b/tidb-server/main.go index 40dd3cacfd5a1..d61b1475aba29 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -446,7 +446,7 @@ func overrideConfig(cfg *config.Config) { cfg.Binlog.Enable = *enableBinlog } if actualFlags[nmRunDDL] { - cfg.RunDDL = *runDDL + cfg.Instance.EnableDDL.Store(*runDDL) } if actualFlags[nmDdlLease] { cfg.Lease = *ddlLease @@ -558,6 +558,8 @@ func setGlobalVars() { cfg.Instance.CheckMb4ValueInUTF8.Store(cfg.CheckMb4ValueInUTF8.Load()) case "enable-collect-execution-info": cfg.Instance.EnableCollectExecutionInfo = cfg.EnableCollectExecutionInfo + case "run-ddl": + cfg.Instance.EnableDDL.Store(cfg.RunDDL) } case "log": switch oldName { @@ -610,7 +612,6 @@ func setGlobalVars() { statistics.FeedbackProbability.Store(cfg.Performance.FeedbackProbability) statistics.MaxQueryFeedbackCount.Store(int64(cfg.Performance.QueryFeedbackLimit)) statistics.RatioOfPseudoEstimate.Store(cfg.Performance.PseudoEstimateRatio) - ddl.RunWorker = cfg.RunDDL if cfg.SplitTable { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) } From cd2a367c6f668b0cf969a5e3a3e6c8af888aaa05 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 16 Jun 2022 23:06:37 +0800 Subject: [PATCH 02/21] Rename: add prefix TiDBxxx and tidb_xxx. --- br/cmd/br/backup.go | 2 +- cmd/ddltest/ddl_test.go | 2 +- config/config.go | 8 ++++---- config/config.toml.example | 2 +- config/config_test.go | 10 +++++----- ddl/ddl.go | 8 ++++---- ddl/restart_test.go | 2 +- sessionctx/variable/sysvar.go | 10 +++++----- tidb-server/main.go | 4 ++-- 9 files changed, 24 insertions(+), 24 deletions(-) diff --git a/br/cmd/br/backup.go b/br/cmd/br/backup.go index 0b44b26f78e90..a6f973d26829c 100644 --- a/br/cmd/br/backup.go +++ b/br/cmd/br/backup.go @@ -78,7 +78,7 @@ func NewBackupCommand() *cobra.Command { task.LogArguments(c) // Do not run ddl worker in BR. - config.GetGlobalConfig().Instance.EnableDDL.Store(false) + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false) summary.SetUnit(summary.BackupUnit) return nil diff --git a/cmd/ddltest/ddl_test.go b/cmd/ddltest/ddl_test.go index 202b640d58c1e..63f4e96b44d89 100644 --- a/cmd/ddltest/ddl_test.go +++ b/cmd/ddltest/ddl_test.go @@ -116,7 +116,7 @@ func createDDLSuite(t *testing.T) (s *ddlSuite) { // Stop current DDL worker, so that we can't be the owner now. err = domain.GetDomain(s.ctx).DDL().Stop() require.NoError(t, err) - config.GetGlobalConfig().Instance.EnableDDL.Store(false) + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false) session.ResetStoreForWithTiKVTest(s.store) s.dom.Close() require.NoError(t, s.store.Close()) diff --git a/config/config.go b/config/config.go index bfbcb4a89d174..5835f7b9338e1 100644 --- a/config/config.go +++ b/config/config.go @@ -117,7 +117,7 @@ var ( map[string]string{ "check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8", "enable-collect-execution-info": "tidb_enable_collect_execution_info", - "run-ddl": "enable_ddl", + "run-ddl": "tidb_enable_ddl", }, }, { @@ -474,7 +474,7 @@ type Instance struct { EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"` PluginDir string `toml:"plugin_dir" json:"plugin_dir"` PluginLoad string `toml:"plugin_load" json:"plugin_load"` - EnableDDL AtomicBool `toml:"enable_ddl" json:"enable_ddl"` + TiDBEnableDDL AtomicBool `toml:"tidb_enable_ddl" json:"tidb_enable_ddl"` } func (l *Log) getDisableTimestamp() bool { @@ -826,7 +826,7 @@ var defaultConf = Config{ EnableCollectExecutionInfo: true, PluginDir: "/data/deploy/plugin", PluginLoad: "", - EnableDDL: *NewAtomicBool(true), + TiDBEnableDDL: *NewAtomicBool(true), }, Status: Status{ ReportStatus: true, @@ -1161,7 +1161,7 @@ func (c *Config) Valid() error { } return fmt.Errorf("invalid store=%s, valid storages=%v", c.Store, nameList) } - if c.Store == "mocktikv" && !c.Instance.EnableDDL.Load() { + if c.Store == "mocktikv" && !c.Instance.TiDBEnableDDL.Load() { return fmt.Errorf("can't disable DDL on mocktikv") } if c.MaxIndexLength < DefMaxIndexLength || c.MaxIndexLength > DefMaxOfMaxIndexLength { diff --git a/config/config.toml.example b/config/config.toml.example index c1ebb82dc2f4c..6d52b67f0bb06 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -464,4 +464,4 @@ tidb_slow_log_threshold = 300 tidb_record_plan_in_slow_log = 1 # Run ddl worker on this tidb-server. -enable_ddl = true +tidb_enable_ddl = true diff --git a/config/config_test.go b/config/config_test.go index 598ecc04f6b88..8e62926ac0d4e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -309,7 +309,7 @@ enable-enum-length-limit = true [instance] # Run ddl worker on this tidb-server. -enable_ddl = true +tidb_enable_ddl = true [log] # Log level: debug, info, warn, error, fatal. @@ -1024,7 +1024,7 @@ func TestConflictInstanceConfig(t *testing.T) { "": { "", map[string]string{ "check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8", - "run-ddl": "enable_ddl", + "run-ddl": "tidb_enable_ddl", }, }, "log": { @@ -1037,7 +1037,7 @@ func TestConflictInstanceConfig(t *testing.T) { _, err = f.WriteString("check-mb4-value-in-utf8 = true \nrun-ddl = true \n" + "[log] \nenable-slow-log = true \n" + "[performance] \nforce-priority = \"NO_PRIORITY\"\n" + - "[instance] \ntidb_check_mb4_value_in_utf8 = false \ntidb_enable_slow_log = false \ntidb_force_priority = \"LOW_PRIORITY\"\nenable_ddl = false") + "[instance] \ntidb_check_mb4_value_in_utf8 = false \ntidb_enable_slow_log = false \ntidb_force_priority = \"LOW_PRIORITY\"\ntidb_enable_ddl = false") require.NoError(t, err) require.NoError(t, f.Sync()) err = conf.Load(configFile) @@ -1050,7 +1050,7 @@ func TestConflictInstanceConfig(t *testing.T) { require.Equal(t, "NO_PRIORITY", conf.Performance.ForcePriority) require.Equal(t, "LOW_PRIORITY", conf.Instance.ForcePriority) require.Equal(t, true, conf.RunDDL) - require.Equal(t, false, conf.Instance.EnableDDL.Load()) + require.Equal(t, false, conf.Instance.TiDBEnableDDL.Load()) require.Equal(t, 0, len(DeprecatedOptions)) for _, conflictOption := range ConflictOptions { expectedConflictOption, ok := expectedConflictOptions[conflictOption.SectionName] @@ -1080,7 +1080,7 @@ func TestDeprecatedConfig(t *testing.T) { "": { "", map[string]string{ "enable-collect-execution-info": "tidb_enable_collect_execution_info", - "run-ddl": "enable_ddl", + "run-ddl": "tidb_enable_ddl", }, }, "log": { diff --git a/ddl/ddl.go b/ddl/ddl.go index 516fd78e5a5f3..af91516930c1d 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -468,14 +468,14 @@ func (d *ddl) newDeleteRangeManager(mock bool) delRangeManager { // Start implements DDL.Start interface. func (d *ddl) Start(ctxPool *pools.ResourcePool) error { - logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.EnableDDL.Load())) + logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) - // If enable_ddl is true, we need campaign owner and do DDL job. + // If tidb_enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. - if config.GetGlobalConfig().Instance.EnableDDL.Load() { + if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { err := d.ownerManager.CampaignOwner() if err != nil { return errors.Trace(err) @@ -661,7 +661,7 @@ func getJobCheckInterval(job *model.Job, i int) (time.Duration, bool) { func (d *ddl) asyncNotifyWorker(job *model.Job) { // If the workers don't run, we needn't notify workers. - if !config.GetGlobalConfig().Instance.EnableDDL.Load() { + if !config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { return } var worker *worker diff --git a/ddl/restart_test.go b/ddl/restart_test.go index 84842ee45d253..a4095184fa895 100644 --- a/ddl/restart_test.go +++ b/ddl/restart_test.go @@ -49,7 +49,7 @@ func (d *ddl) restartWorkers(ctx context.Context) { d.ctx, d.cancel = context.WithCancel(ctx) d.wg.Run(d.limitDDLJobs) - if !config.GetGlobalConfig().Instance.EnableDDL.Load() { + if !config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { return } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 388d842b1ae8e..c8582beb282a7 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -415,13 +415,13 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeInstance, Name: PluginDir, Value: "/data/deploy/plugin", ReadOnly: true, GetGlobal: func(s *SessionVars) (string, error) { return config.GetGlobalConfig().Instance.PluginDir, nil }}, - {Scope: ScopeInstance, Name: EnableDDL, Value: BoolToOnOff(config.GetGlobalConfig().Instance.EnableDDL.Load()), Type: TypeBool, + {Scope: ScopeInstance, Name: TiDBEnableDDL, Value: BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error { - config.GetGlobalConfig().Instance.EnableDDL.Store(TiDBOptOn(val)) + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(TiDBOptOn(val)) return nil }, GetGlobal: func(s *SessionVars) (string, error) { - return BoolToOnOff(config.GetGlobalConfig().Instance.EnableDDL.Load()), nil + return BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), nil }, }, @@ -1778,8 +1778,8 @@ const ( PluginDir = "plugin_dir" // PluginLoad is the name of 'plugin_load' system variable. PluginLoad = "plugin_load" - // EnableDDL indicates whether the tidb-server runs DDL statements, - EnableDDL = "enable_ddl" + // TiDBEnableDDL indicates whether the tidb-server runs DDL statements, + TiDBEnableDDL = "tidb_enable_ddl" // Port is the name for 'port' system variable. Port = "port" // DataDir is the name for 'datadir' system variable. diff --git a/tidb-server/main.go b/tidb-server/main.go index d61b1475aba29..33bddc7086c0d 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -446,7 +446,7 @@ func overrideConfig(cfg *config.Config) { cfg.Binlog.Enable = *enableBinlog } if actualFlags[nmRunDDL] { - cfg.Instance.EnableDDL.Store(*runDDL) + cfg.Instance.TiDBEnableDDL.Store(*runDDL) } if actualFlags[nmDdlLease] { cfg.Lease = *ddlLease @@ -559,7 +559,7 @@ func setGlobalVars() { case "enable-collect-execution-info": cfg.Instance.EnableCollectExecutionInfo = cfg.EnableCollectExecutionInfo case "run-ddl": - cfg.Instance.EnableDDL.Store(cfg.RunDDL) + cfg.Instance.TiDBEnableDDL.Store(cfg.RunDDL) } case "log": switch oldName { From d2ed88e672e616bc074277c5fdf7030e28228e12 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 23 Jun 2022 14:13:02 +0800 Subject: [PATCH 03/21] Support pause a campaign in owner --- ddl/ddl.go | 93 +++++++++++++++++++++++--------- errno/errcode.go | 2 + errno/errname.go | 1 + errors.toml | 5 ++ owner/manager.go | 36 ++++++++----- owner/mock.go | 5 ++ sessionctx/variable/sysvar.go | 12 ++++- sessionctx/variable/tidb_vars.go | 4 ++ util/dbterror/ddl_terror.go | 2 + 9 files changed, 119 insertions(+), 41 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 2b53901bf8074..05aeae4bc18af 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -439,6 +440,9 @@ func newDDL(ctx context.Context, options ...Option) *ddl { limitJobCh: make(chan *limitJobTask, batchAddingJobs), enableTiFlashPoll: atomicutil.NewBool(true), } + // Register functions for enable/disable ddl when changing system variable `tidb_enable_ddl`. + variable.EnableDDL = d.EnableDDL + variable.DisableDDL = d.DisableDDL return d } @@ -468,54 +472,91 @@ func (d *ddl) newDeleteRangeManager(mock bool) delRangeManager { // Start implements DDL.Start interface. func (d *ddl) Start(ctxPool *pools.ResourcePool) error { + var err error logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) + d.workers = make(map[workerType]*worker, 2) + d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) + d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) + d.workers[addIdxWorker] = newWorker(d.ctx, addIdxWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) + for _, worker := range d.workers { + worker.wg.Add(1) + w := worker + go w.start(d.ddlCtx) + + metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, worker.String())).Inc() + + // When the start function is called, we will send a fake job to let worker + // checks owner firstly and try to find whether a job exists and run. + asyncNotify(worker.ddlJobCh) + } + + d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() + if err != nil { + return err + } + + go d.schemaSyncer.StartCleanWork() + if config.TableLockEnabled() { + d.wg.Add(1) + go d.startCleanDeadTableLock() + } + metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() // If tidb_enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { - err := d.ownerManager.CampaignOwner() + err = d.ownerManager.CampaignOwner() if err != nil { return errors.Trace(err) } + } - d.workers = make(map[workerType]*worker, 2) - d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) - d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) - d.workers[addIdxWorker] = newWorker(d.ctx, addIdxWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) - for _, worker := range d.workers { - worker.wg.Add(1) - w := worker - go w.start(d.ddlCtx) + variable.RegisterStatistics(d) - metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, worker.String())).Inc() + metrics.DDLCounter.WithLabelValues(metrics.CreateDDLInstance).Inc() - // When the start function is called, we will send a fake job to let worker - // checks owner firstly and try to find whether a job exists and run. - asyncNotify(worker.ddlJobCh) - } + // Start some background routine to manage TiFlash replica. + d.wg.Run(d.PollTiFlashRoutine) - d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() + return nil +} + +// EnableDDL enable this node to execute ddl. +// Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, +// we should make sure that before invoking EnableDDL(), ddl is DISABLE. +func (d *ddl) EnableDDL() error { + err := d.ownerManager.CampaignOwner() + return errors.Trace(err) +} + +// DisableDDL disable this node to execute ddl. +// We should make sure that before invoking DisableDDL(), ddl is ENABLE. +func (d *ddl) DisableDDL() error { + if d.ownerManager.IsOwner() { + // If there is only one node, we should NOT disable ddl. + serverInfo, err := infosync.GetAllServerInfo(d.ctx) if err != nil { return err } - - go d.schemaSyncer.StartCleanWork() - if config.TableLockEnabled() { - d.wg.Add(1) - go d.startCleanDeadTableLock() + if len(serverInfo) <= 1 { + return dbterror.ErrDDLSetting } - metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() - } - variable.RegisterStatistics(d) + // FIXME: if possible, when this node is the only node with DDL, ths setting of DisableDDL should fail. - metrics.DDLCounter.WithLabelValues(metrics.CreateDDLInstance).Inc() + // lets the owner start a new election. + err = d.ownerManager.ResignOwner(d.ctx) - // Start some background routine to manage TiFlash replica. - d.wg.Run(d.PollTiFlashRoutine) + if err != nil { + return err + } + } + + // disable campaign by interrupting campaignLoop + d.ownerManager.CampaignCancel() return nil } diff --git a/errno/errcode.go b/errno/errcode.go index 83284118d103a..75bb8650d9830 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -1069,6 +1069,8 @@ const ( ErrOptOnCacheTable = 8242 ErrHTTPServiceError = 8243 ErrPartitionColumnStatsMissing = 8244 + ErrDDLSetting = 8245 + // TiKV/PD/TiFlash errors. ErrPDServerTimeout = 9001 ErrTiKVServerTimeout = 9002 diff --git a/errno/errname.go b/errno/errname.go index aaebb8ed668e1..04fb6b55dd479 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -1063,6 +1063,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrJSONObjectKeyTooLong: mysql.Message("TiDB does not yet support JSON objects with the key length >= 65536", nil), ErrPartitionStatsMissing: mysql.Message("Build table: %s global-level stats failed due to missing partition-level stats", nil), ErrPartitionColumnStatsMissing: mysql.Message("Build table: %s global-level stats failed due to missing partition-level column stats, please run analyze table to refresh columns of all partitions", nil), + ErrDDLSetting: mysql.Message("Error happened when enable/disable DDL", nil), ErrNotSupportedWithSem: mysql.Message("Feature '%s' is not supported when security enhanced mode is enabled", nil), ErrPlacementPolicyCheck: mysql.Message("Placement policy didn't meet the constraint, reason: %s", nil), diff --git a/errors.toml b/errors.toml index 8e657911f045b..fb426e65f738f 100755 --- a/errors.toml +++ b/errors.toml @@ -1231,6 +1231,11 @@ error = ''' '%s' is unsupported on cache tables. ''' +["ddl:8245"] +error = ''' +Error happened when enable/disable DDL +''' + ["domain:8027"] error = ''' Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV diff --git a/owner/manager.go b/owner/manager.go index f90dd4cebdd2d..b06ace59429a9 100644 --- a/owner/manager.go +++ b/owner/manager.go @@ -50,10 +50,12 @@ type Manager interface { CampaignOwner() error // ResignOwner lets the owner start a new election. ResignOwner(ctx context.Context) error - // Cancel cancels this etcd ownerManager campaign. + // Cancel cancels this etcd ownerManager. Cancel() // RequireOwner requires the ownerManager is owner. RequireOwner(ctx context.Context) error + // CampaignCancel cancels one etcd campaign + CampaignCancel() } const ( @@ -68,16 +70,17 @@ type DDLOwnerChecker interface { // ownerManager represents the structure which is used for electing owner. type ownerManager struct { - id string // id is the ID of the manager. - key string - ctx context.Context - prompt string - logPrefix string - logCtx context.Context - etcdCli *clientv3.Client - cancel context.CancelFunc - elec unsafe.Pointer - wg sync.WaitGroup + id string // id is the ID of the manager. + key string + ctx context.Context + prompt string + logPrefix string + logCtx context.Context + etcdCli *clientv3.Client + cancel context.CancelFunc + elec unsafe.Pointer + wg sync.WaitGroup + campaignCancel context.CancelFunc } // NewOwnerManager creates a new Manager. @@ -174,11 +177,16 @@ func (m *ownerManager) RetireOwner() { atomic.StorePointer(&m.elec, nil) } +// CampaignCancel implements Manager.CampaignCancel interface. +func (m *ownerManager) CampaignCancel() { + m.campaignCancel() +} + func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { - var cancel context.CancelFunc - ctx, cancel := context.WithCancel(m.ctx) + var ctx context.Context + ctx, m.campaignCancel = context.WithCancel(m.ctx) defer func() { - cancel() + m.campaignCancel() if r := recover(); r != nil { logutil.BgLogger().Error("recover panic", zap.String("prompt", m.prompt), zap.Any("error", r), zap.Stack("buffer")) metrics.PanicCounter.WithLabelValues(metrics.LabelDDLOwner).Inc() diff --git a/owner/mock.go b/owner/mock.go index c13ff88f3fdf6..0bc90982e64bb 100644 --- a/owner/mock.go +++ b/owner/mock.go @@ -91,3 +91,8 @@ func (m *mockManager) ResignOwner(ctx context.Context) error { func (m *mockManager) RequireOwner(context.Context) error { return nil } + +// CampaignCancel implements Manager.CampaignCancel interface +func (m *mockManager) CampaignCancel() { + // do nothing +} diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index cb06ac38fcd59..5685b9d66a5da 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -417,7 +417,17 @@ var defaultSysVars = []*SysVar{ }}, {Scope: ScopeInstance, Name: TiDBEnableDDL, Value: BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error { - config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(TiDBOptOn(val)) + oldVal, newVal := config.GetGlobalConfig().Instance.TiDBEnableDDL.Load(), TiDBOptOn(val) + if oldVal != newVal { + var err error + if newVal && EnableDDL != nil { + err = EnableDDL() + } else if !newVal && DisableDDL != nil { + err = DisableDDL() + } + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(newVal) + return err + } return nil }, GetGlobal: func(s *SessionVars) (string, error) { diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 64c1916292c73..0afcc63fa09d8 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -994,4 +994,8 @@ var ( GetMemQuotaAnalyze func() int64 = nil // SetStatsCacheCapacity is the func registered by domain to set statsCache memory quota. SetStatsCacheCapacity atomic.Value + // EnableDDL is the func registered by ddl to enable running ddl in this instance. + EnableDDL func() error = nil + // DisableDDL is the func registered by ddl to disable running ddl in this instance. + DisableDDL func() error = nil ) diff --git a/util/dbterror/ddl_terror.go b/util/dbterror/ddl_terror.go index 31ec5c309f327..646d8eec14923 100644 --- a/util/dbterror/ddl_terror.go +++ b/util/dbterror/ddl_terror.go @@ -388,4 +388,6 @@ var ( ErrCancelFinishedDDLJob = ClassDDL.NewStd(mysql.ErrCancelFinishedDDLJob) // ErrCannotCancelDDLJob returns when cancel a almost finished ddl job, because cancel in now may cause data inconsistency. ErrCannotCancelDDLJob = ClassDDL.NewStd(mysql.ErrCannotCancelDDLJob) + // ErrDDLSetting returns when failing to enable/disable DDL + ErrDDLSetting = ClassDDL.NewStd(mysql.ErrDDLSetting) ) From 54d85fb28ba93856f4e04c9024bbd039b696a0a8 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Sat, 2 Jul 2022 17:04:52 +0800 Subject: [PATCH 04/21] make *ddl having a real Start and Stop --- ddl/ddl.go | 78 +++++++++++++++++++++++++++--------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index cde498318d8b3..99d4e5718a2e8 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -485,7 +485,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { var err error logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) - d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) d.ownerManager.SetBeOwnerHook(func() { var err error @@ -494,23 +493,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) } }) - d.workers = make(map[workerType]*worker, 2) - d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) - d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) - d.workers[addIdxWorker] = newWorker(d.ctx, addIdxWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) - for _, worker := range d.workers { - worker.wg.Add(1) - w := worker - go w.start(d.ddlCtx) - - metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, worker.String())).Inc() - - // When the start function is called, we will send a fake job to let worker - // checks owner firstly and try to find whether a job exists and run. - asyncNotify(worker.ddlJobCh) - } - - go d.schemaSyncer.StartCleanWork() if config.TableLockEnabled() { d.wg.Add(1) go d.startCleanDeadTableLock() @@ -520,9 +502,8 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // If tidb_enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { - err = d.ownerManager.CampaignOwner() - if err != nil { - return errors.Trace(err) + if err = d.EnableDDL(); err != nil { + return err } } @@ -530,9 +511,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { metrics.DDLCounter.WithLabelValues(metrics.CreateDDLInstance).Inc() - // Start some background routine to manage TiFlash replica. - d.wg.Run(d.PollTiFlashRoutine) - return nil } @@ -540,6 +518,27 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, // we should make sure that before invoking EnableDDL(), ddl is DISABLE. func (d *ddl) EnableDDL() error { + d.wg.Run(d.limitDDLJobs) + d.workers = make(map[workerType]*worker, 2) + d.delRangeMgr = d.newDeleteRangeManager(d.sessPool.resPool == nil) + d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) + d.workers[addIdxWorker] = newWorker(d.ctx, addIdxWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) + for _, worker := range d.workers { + worker.wg.Add(1) + w := worker + go w.start(d.ddlCtx) + + metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, worker.String())).Inc() + + // When the start function is called, we will send a fake job to let worker + // checks owner firstly and try to find whether a job exists and run. + asyncNotify(worker.ddlJobCh) + } + + go d.schemaSyncer.StartCleanWork() + // Start some background routine to manage TiFlash replica. + d.wg.Run(d.PollTiFlashRoutine) + err := d.ownerManager.CampaignOwner() return errors.Trace(err) } @@ -567,8 +566,21 @@ func (d *ddl) DisableDDL() error { } } + // d.delRangeMgr using sessions from d.sessPool. + // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. + if d.delRangeMgr != nil { + d.delRangeMgr.clear() + } + for k, worker := range d.workers { + worker.Close() + delete(d.workers, k) + } + d.schemaSyncer.Close() + // disable campaign by interrupting campaignLoop d.ownerManager.CampaignCancel() + d.cancel() + d.wg.Wait() return nil } @@ -592,22 +604,10 @@ func (d *ddl) close() { } startTime := time.Now() - d.cancel() - d.wg.Wait() - d.ownerManager.Cancel() - d.schemaSyncer.Close() - - for _, worker := range d.workers { - worker.Close() - } - // d.delRangeMgr using sessions from d.sessPool. - // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. - if d.delRangeMgr != nil { - d.delRangeMgr.clear() - } - if d.sessPool != nil { - d.sessPool.close() + if err := d.DisableDDL(); err != nil { + logutil.BgLogger().Error("[ddl] error when closing DDL", zap.Error(err)) } + d.ownerManager.Cancel() variable.UnregisterStatistics(d) From a022b50ddae4ac8a093b70e8ecf64a989d38e594 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Sat, 2 Jul 2022 17:23:48 +0800 Subject: [PATCH 05/21] Fix --- ddl/ddl.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 99d4e5718a2e8..e208c167c71fd 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -485,6 +485,7 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { var err error logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) + d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) d.ownerManager.SetBeOwnerHook(func() { var err error @@ -518,7 +519,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, // we should make sure that before invoking EnableDDL(), ddl is DISABLE. func (d *ddl) EnableDDL() error { - d.wg.Run(d.limitDDLJobs) d.workers = make(map[workerType]*worker, 2) d.delRangeMgr = d.newDeleteRangeManager(d.sessPool.resPool == nil) d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) @@ -608,6 +608,9 @@ func (d *ddl) close() { logutil.BgLogger().Error("[ddl] error when closing DDL", zap.Error(err)) } d.ownerManager.Cancel() + if d.sessPool != nil { + d.sessPool.close() + } variable.UnregisterStatistics(d) From 64f9066befcdf6eef796379fe9805a343b7a4105 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Sat, 2 Jul 2022 19:37:46 +0800 Subject: [PATCH 06/21] Fix --- ddl/ddl.go | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index e208c167c71fd..486f6f6d19413 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -494,11 +494,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) } }) - if config.TableLockEnabled() { - d.wg.Add(1) - go d.startCleanDeadTableLock() - } - metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() // If tidb_enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. @@ -508,10 +503,19 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { } } + if config.TableLockEnabled() { + d.wg.Add(1) + go d.startCleanDeadTableLock() + } + metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() + variable.RegisterStatistics(d) metrics.DDLCounter.WithLabelValues(metrics.CreateDDLInstance).Inc() + // Start some background routine to manage TiFlash replica. + d.wg.Run(d.PollTiFlashRoutine) + return nil } @@ -519,6 +523,8 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, // we should make sure that before invoking EnableDDL(), ddl is DISABLE. func (d *ddl) EnableDDL() error { + + err := d.ownerManager.CampaignOwner() d.workers = make(map[workerType]*worker, 2) d.delRangeMgr = d.newDeleteRangeManager(d.sessPool.resPool == nil) d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) @@ -536,10 +542,6 @@ func (d *ddl) EnableDDL() error { } go d.schemaSyncer.StartCleanWork() - // Start some background routine to manage TiFlash replica. - d.wg.Run(d.PollTiFlashRoutine) - - err := d.ownerManager.CampaignOwner() return errors.Trace(err) } @@ -566,21 +568,20 @@ func (d *ddl) DisableDDL() error { } } + d.cancel() + d.wg.Wait() + d.schemaSyncer.Close() + for k, worker := range d.workers { + worker.Close() + delete(d.workers, k) + } // d.delRangeMgr using sessions from d.sessPool. // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. if d.delRangeMgr != nil { d.delRangeMgr.clear() } - for k, worker := range d.workers { - worker.Close() - delete(d.workers, k) - } - d.schemaSyncer.Close() - // disable campaign by interrupting campaignLoop d.ownerManager.CampaignCancel() - d.cancel() - d.wg.Wait() return nil } From 21adce64e646bd1e519f31f522a5e9e9e59f3640 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Mon, 18 Jul 2022 14:15:20 +0800 Subject: [PATCH 07/21] Fix --- ddl/restart_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ddl/restart_test.go b/ddl/restart_test.go index 8d31b8214df5d..130e988a9367f 100644 --- a/ddl/restart_test.go +++ b/ddl/restart_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/ngaut/pools" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" From 28ea9dd31e38eb1d06d954e19ce6a8609247b6ea Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Wed, 20 Jul 2022 14:59:55 +0800 Subject: [PATCH 08/21] Fix goleak --- ddl/ddl.go | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 05a8c2fddcc59..cd8cc2183cb01 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -489,13 +489,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) - d.ownerManager.SetBeOwnerHook(func() { - var err error - d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() - if err != nil { - logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) - } - }) // If tidb_enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. @@ -505,12 +498,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { } } - if config.TableLockEnabled() { - d.wg.Add(1) - go d.startCleanDeadTableLock() - } - metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() - variable.RegisterStatistics(d) metrics.DDLCounter.WithLabelValues(metrics.CreateDDLInstance).Inc() @@ -526,7 +513,18 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // we should make sure that before invoking EnableDDL(), ddl is DISABLE. func (d *ddl) EnableDDL() error { + d.ownerManager.SetBeOwnerHook(func() { + var err error + d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() + if err != nil { + logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) + } + }) + err := d.ownerManager.CampaignOwner() + if err != nil { + return errors.Trace(err) + } d.workers = make(map[workerType]*worker, 2) d.delRangeMgr = d.newDeleteRangeManager(d.sessPool.resPool == nil) d.workers[generalWorker] = newWorker(d.ctx, generalWorker, d.sessPool, d.delRangeMgr, d.ddlCtx) @@ -544,6 +542,11 @@ func (d *ddl) EnableDDL() error { } go d.schemaSyncer.StartCleanWork() + if config.TableLockEnabled() { + d.wg.Add(1) + go d.startCleanDeadTableLock() + } + metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() return errors.Trace(err) } @@ -570,8 +573,6 @@ func (d *ddl) DisableDDL() error { } } - d.cancel() - d.wg.Wait() d.schemaSyncer.Close() for k, worker := range d.workers { worker.Close() @@ -614,6 +615,8 @@ func (d *ddl) close() { if d.sessPool != nil { d.sessPool.close() } + d.cancel() + d.wg.Wait() variable.UnregisterStatistics(d) From 7025f05cc8f04e0c535a59bd56800edcb66d4842 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Wed, 20 Jul 2022 15:54:42 +0800 Subject: [PATCH 09/21] Fix goleak --- ddl/ddl.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index cd8cc2183cb01..750c4ab00d83a 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -557,6 +557,7 @@ func (d *ddl) DisableDDL() error { // If there is only one node, we should NOT disable ddl. serverInfo, err := infosync.GetAllServerInfo(d.ctx) if err != nil { + logutil.BgLogger().Error("[ddl] error when GetAllServerInfo", zap.Error(err)) return err } if len(serverInfo) <= 1 { @@ -569,6 +570,7 @@ func (d *ddl) DisableDDL() error { err = d.ownerManager.ResignOwner(d.ctx) if err != nil { + logutil.BgLogger().Error("[ddl] error when ResignOwner", zap.Error(err)) return err } } @@ -582,6 +584,8 @@ func (d *ddl) DisableDDL() error { // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. if d.delRangeMgr != nil { d.delRangeMgr.clear() + } else { + logutil.BgLogger().Warn("[ddl] d.delRangeMgr is ") } // disable campaign by interrupting campaignLoop d.ownerManager.CampaignCancel() @@ -608,15 +612,22 @@ func (d *ddl) close() { } startTime := time.Now() - if err := d.DisableDDL(); err != nil { - logutil.BgLogger().Error("[ddl] error when closing DDL", zap.Error(err)) - } + d.cancel() + d.wg.Wait() d.ownerManager.Cancel() + d.schemaSyncer.Close() + + for _, worker := range d.workers { + worker.Close() + } + // d.delRangeMgr using sessions from d.sessPool. + // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. + if d.delRangeMgr != nil { + d.delRangeMgr.clear() + } if d.sessPool != nil { d.sessPool.close() } - d.cancel() - d.wg.Wait() variable.UnregisterStatistics(d) From e598dae4e299096466afffdcf126dc064d44793a Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 21 Jul 2022 14:56:37 +0800 Subject: [PATCH 10/21] Remove an empty line --- ddl/ddl.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index a34fc9b91a713..2f5f3b5604685 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -674,7 +674,6 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, // we should make sure that before invoking EnableDDL(), ddl is DISABLE. func (d *ddl) EnableDDL() error { - d.ownerManager.SetBeOwnerHook(func() { var err error d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() From 34d79e0d384599aa3d845b640e88c662b5f33931 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Fri, 22 Jul 2022 17:10:46 +0800 Subject: [PATCH 11/21] Fix --- owner/manager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/owner/manager.go b/owner/manager.go index 927d8b2965ee5..f9f7a457e10ec 100644 --- a/owner/manager.go +++ b/owner/manager.go @@ -191,6 +191,7 @@ func (m *ownerManager) RetireOwner() { // CampaignCancel implements Manager.CampaignCancel interface. func (m *ownerManager) CampaignCancel() { m.campaignCancel() + m.wg.Wait() } func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { From aeb9291ea0d04fae3b87e5eb3e761efec4bd5595 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Tue, 26 Jul 2022 15:16:45 +0800 Subject: [PATCH 12/21] fmt --- sessionctx/variable/sysvar.go | 7 +------ sessionctx/variable/tidb_vars.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 7a250acff4ca0..215fc57500b9e 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -438,12 +438,7 @@ var defaultSysVars = []*SysVar{ SetGlobal: func(s *SessionVars, val string) error { oldVal, newVal := config.GetGlobalConfig().Instance.TiDBEnableDDL.Load(), TiDBOptOn(val) if oldVal != newVal { - var err error - if newVal && EnableDDL != nil { - err = EnableDDL() - } else if !newVal && DisableDDL != nil { - err = DisableDDL() - } + err := switchDDL(newVal) config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(newVal) return err } diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index f4cf39b1cf7a7..fa65f76df9c62 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -1060,3 +1060,13 @@ var ( // DisableDDL is the func registered by ddl to disable running ddl in this instance. DisableDDL func() error = nil ) + +// switchDDL turns on/off DDL in an instance. +func switchDDL(on bool) error { + if on && EnableDDL != nil { + return EnableDDL() + } else if !on && DisableDDL != nil { + return DisableDDL() + } + return nil +} From d0cc647ec71d1d24238d043d6fad0316e1528955 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Tue, 26 Jul 2022 16:16:58 +0800 Subject: [PATCH 13/21] error message --- ddl/ddl.go | 2 +- errno/errname.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 7641f54d35a3a..3255d8267685f 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -712,7 +712,7 @@ func (d *ddl) DisableDDL() error { return err } if len(serverInfo) <= 1 { - return dbterror.ErrDDLSetting + return dbterror.ErrDDLSetting.GenWithStackByArgs("can not disable ddl when there is only one instance") } // FIXME: if possible, when this node is the only node with DDL, ths setting of DisableDDL should fail. diff --git a/errno/errname.go b/errno/errname.go index 914704cede603..94e7b9f3a1887 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -1066,7 +1066,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrJSONObjectKeyTooLong: mysql.Message("TiDB does not yet support JSON objects with the key length >= 65536", nil), ErrPartitionStatsMissing: mysql.Message("Build table: %s global-level stats failed due to missing partition-level stats", nil), ErrPartitionColumnStatsMissing: mysql.Message("Build table: %s global-level stats failed due to missing partition-level column stats, please run analyze table to refresh columns of all partitions", nil), - ErrDDLSetting: mysql.Message("Error happened when enable/disable DDL", nil), + ErrDDLSetting: mysql.Message("Error happened when enable/disable DDL: %s", nil), ErrNotSupportedWithSem: mysql.Message("Feature '%s' is not supported when security enhanced mode is enabled", nil), ErrPlacementPolicyCheck: mysql.Message("Placement policy didn't meet the constraint, reason: %s", nil), From 0c8894222ecc299a3f297d32f13e9e77cb0d7f6f Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Tue, 26 Jul 2022 16:35:29 +0800 Subject: [PATCH 14/21] Fix --- errors.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/errors.toml b/errors.toml index e15c7cdb342b5..d8f9ef387c33c 100755 --- a/errors.toml +++ b/errors.toml @@ -1258,7 +1258,7 @@ column %s id %d does not exist, this column may have been updated by other DDL r ["ddl:8246"] error = ''' -Error happened when enable/disable DDL +Error happened when enable/disable DDL: %s ''' ["domain:8027"] From 78733d75b3ddda1fca535347d1185ed402c5e676 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Wed, 27 Jul 2022 23:17:00 +0800 Subject: [PATCH 15/21] Revert: DisableDDL only break campaign --- ddl/ddl.go | 68 ++++++++++++++++-------------------------------- owner/manager.go | 14 +++++----- 2 files changed, 30 insertions(+), 52 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 3486a97223508..67d0d87d556be 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -642,7 +642,6 @@ func (d *ddl) prepareWorkers4legacyDDL() { // Start implements DDL.Start interface. func (d *ddl) Start(ctxPool *pools.ResourcePool) error { - var err error logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) d.wg.Run(d.limitDDLJobs) @@ -651,9 +650,30 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // If tidb_enable_ddl is true, we need campaign owner and do DDL job. // Otherwise, we needn't do that. if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { - if err = d.EnableDDL(); err != nil { - return err + d.ownerManager.SetBeOwnerHook(func() { + var err error + d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() + if err != nil { + logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) + } + }) + + err := d.ownerManager.CampaignOwner() + if err != nil { + return errors.Trace(err) + } + + d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) + + d.prepareWorkers4ConcurrencyDDL() + d.prepareWorkers4legacyDDL() + + go d.schemaSyncer.StartCleanWork() + if config.TableLockEnabled() { + d.wg.Add(1) + go d.startCleanDeadTableLock() } + metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() } variable.RegisterStatistics(d) @@ -670,30 +690,7 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, // we should make sure that before invoking EnableDDL(), ddl is DISABLE. func (d *ddl) EnableDDL() error { - d.ownerManager.SetBeOwnerHook(func() { - var err error - d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() - if err != nil { - logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) - } - }) - err := d.ownerManager.CampaignOwner() - if err != nil { - return errors.Trace(err) - } - - d.delRangeMgr = d.newDeleteRangeManager(d.sessPool.resPool == nil) - - d.prepareWorkers4ConcurrencyDDL() - d.prepareWorkers4legacyDDL() - - go d.schemaSyncer.StartCleanWork() - if config.TableLockEnabled() { - d.wg.Add(1) - go d.startCleanDeadTableLock() - } - metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() return errors.Trace(err) } @@ -722,27 +719,8 @@ func (d *ddl) DisableDDL() error { } } - d.cancel() - d.wg.Wait() - d.schemaSyncer.Close() - if d.reorgWorkerPool != nil { - d.reorgWorkerPool.close() - } - if d.generalDDLWorkerPool != nil { - d.generalDDLWorkerPool.close() - } - - for _, worker := range d.workers { - worker.Close() - } - // d.delRangeMgr using sessions from d.sessPool. - // Put it before d.sessPool.close to reduce the time spent by d.sessPool.close. - if d.delRangeMgr != nil { - d.delRangeMgr.clear() - } // disable campaign by interrupting campaignLoop d.ownerManager.CampaignCancel() - return nil } diff --git a/owner/manager.go b/owner/manager.go index f9f7a457e10ec..4223a433b8b55 100644 --- a/owner/manager.go +++ b/owner/manager.go @@ -195,8 +195,8 @@ func (m *ownerManager) CampaignCancel() { } func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { - var ctx context.Context - ctx, m.campaignCancel = context.WithCancel(m.ctx) + var campaignContext context.Context + campaignContext, m.campaignCancel = context.WithCancel(m.ctx) defer func() { m.campaignCancel() if r := recover(); r != nil { @@ -218,13 +218,13 @@ func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { case <-etcdSession.Done(): logutil.Logger(logCtx).Info("etcd session is done, creates a new one") leaseID := etcdSession.Lease() - etcdSession, err = util2.NewSession(ctx, logPrefix, m.etcdCli, util2.NewSessionRetryUnlimited, ManagerSessionTTL) + etcdSession, err = util2.NewSession(campaignContext, logPrefix, m.etcdCli, util2.NewSessionRetryUnlimited, ManagerSessionTTL) if err != nil { logutil.Logger(logCtx).Info("break campaign loop, NewSession failed", zap.Error(err)) m.revokeSession(logPrefix, leaseID) return } - case <-ctx.Done(): + case <-campaignContext.Done(): logutil.Logger(logCtx).Info("break campaign loop, context is done") m.revokeSession(logPrefix, etcdSession.Lease()) return @@ -242,19 +242,19 @@ func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { } elec := concurrency.NewElection(etcdSession, m.key) - err = elec.Campaign(ctx, m.id) + err = elec.Campaign(campaignContext, m.id) if err != nil { logutil.Logger(logCtx).Info("failed to campaign", zap.Error(err)) continue } - ownerKey, err := GetOwnerInfo(ctx, logCtx, elec, m.id) + ownerKey, err := GetOwnerInfo(campaignContext, logCtx, elec, m.id) if err != nil { continue } m.toBeOwner(elec) - m.watchOwner(ctx, etcdSession, ownerKey) + m.watchOwner(campaignContext, etcdSession, ownerKey) m.RetireOwner() metrics.CampaignOwnerCounter.WithLabelValues(m.prompt, metrics.NoLongerOwner).Inc() From 1bb0c46242e9e9d6ebc92261f2e7c9a67d978759 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 28 Jul 2022 16:32:56 +0800 Subject: [PATCH 16/21] Fix --- ddl/ddl.go | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 67d0d87d556be..d500fd75be6f6 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -646,34 +646,32 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) - - // If tidb_enable_ddl is true, we need campaign owner and do DDL job. - // Otherwise, we needn't do that. - if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { - d.ownerManager.SetBeOwnerHook(func() { - var err error - d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() - if err != nil { - logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) - } - }) - - err := d.ownerManager.CampaignOwner() + d.ownerManager.SetBeOwnerHook(func() { + var err error + d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() if err != nil { - return errors.Trace(err) + logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) } + }) - d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) + d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) - d.prepareWorkers4ConcurrencyDDL() - d.prepareWorkers4legacyDDL() + d.prepareWorkers4ConcurrencyDDL() + d.prepareWorkers4legacyDDL() + + go d.schemaSyncer.StartCleanWork() + if config.TableLockEnabled() { + d.wg.Add(1) + go d.startCleanDeadTableLock() + } + metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() - go d.schemaSyncer.StartCleanWork() - if config.TableLockEnabled() { - d.wg.Add(1) - go d.startCleanDeadTableLock() + // If tidb_enable_ddl is true, we need campaign owner and do DDL job. + // Otherwise, we needn't do that. + if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { + if err := d.EnableDDL(); err != nil { + return err } - metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() } variable.RegisterStatistics(d) From d2ab14ce9f171ace84c3f5cd77ec26c1873eae84 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 28 Jul 2022 17:01:04 +0800 Subject: [PATCH 17/21] Fix bazel build fail --- br/cmd/br/BUILD.bazel | 1 - cmd/ddltest/BUILD.bazel | 2 +- dumpling/export/BUILD.bazel | 1 + util/plancodec/BUILD.bazel | 2 ++ 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/br/cmd/br/BUILD.bazel b/br/cmd/br/BUILD.bazel index 7254e0b4c3fdb..a8605aae2f3ea 100644 --- a/br/cmd/br/BUILD.bazel +++ b/br/cmd/br/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//br/pkg/utils", "//br/pkg/version/build", "//config", - "//ddl", "//parser/model", "//session", "//util", diff --git a/cmd/ddltest/BUILD.bazel b/cmd/ddltest/BUILD.bazel index 6870dddb5fd04..220fc51cbaa25 100644 --- a/cmd/ddltest/BUILD.bazel +++ b/cmd/ddltest/BUILD.bazel @@ -11,7 +11,7 @@ go_test( ], flaky = True, deps = [ - "//ddl", + "//config", "//domain", "//kv", "//parser/model", diff --git a/dumpling/export/BUILD.bazel b/dumpling/export/BUILD.bazel index d6679b4e3e68e..6bd4a0e0e86ca 100644 --- a/dumpling/export/BUILD.bazel +++ b/dumpling/export/BUILD.bazel @@ -95,6 +95,7 @@ go_test( "//config", "//dumpling/context", "//dumpling/log", + "//errno", "//parser", "//util/filter", "//util/promutil", diff --git a/util/plancodec/BUILD.bazel b/util/plancodec/BUILD.bazel index 776c53d63a816..e0e05e6562df4 100644 --- a/util/plancodec/BUILD.bazel +++ b/util/plancodec/BUILD.bazel @@ -12,11 +12,13 @@ go_library( deps = [ "//kv", "//util/hack", + "//util/logutil", "//util/memory", "//util/texttree", "@com_github_golang_snappy//:snappy", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_tipb//go-tipb", + "@org_uber_go_zap//:zap", ], ) From 27bd71f627e14e93bb031b12f223855d15c97dbd Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Thu, 28 Jul 2022 19:37:18 +0800 Subject: [PATCH 18/21] Introduce Manager.Ctx() --- ddl/ddl.go | 2 +- owner/manager.go | 11 +++++++++-- owner/mock.go | 6 ++++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index d500fd75be6f6..ac3d46f20cb4b 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -709,7 +709,7 @@ func (d *ddl) DisableDDL() error { // FIXME: if possible, when this node is the only node with DDL, ths setting of DisableDDL should fail. // lets the owner start a new election. - err = d.ownerManager.ResignOwner(d.ctx) + err = d.ownerManager.ResignOwner(d.ownerManager.Ctx()) if err != nil { logutil.BgLogger().Error("[ddl] error when ResignOwner", zap.Error(err)) diff --git a/owner/manager.go b/owner/manager.go index 4223a433b8b55..51497866ed387 100644 --- a/owner/manager.go +++ b/owner/manager.go @@ -50,6 +50,8 @@ type Manager interface { CampaignOwner() error // ResignOwner lets the owner start a new election. ResignOwner(ctx context.Context) error + // Ctx returns the context of the manager. + Ctx() context.Context // Cancel cancels this etcd ownerManager. Cancel() // RequireOwner requires the ownerManager is owner. @@ -113,6 +115,11 @@ func (m *ownerManager) IsOwner() bool { return atomic.LoadPointer(&m.elec) != unsafe.Pointer(nil) } +// Ctx implements Manager.Ctx interface. +func (m *ownerManager) Ctx() context.Context { + return m.ctx +} + // Cancel implements Manager.Cancel interface. func (m *ownerManager) Cancel() { m.cancel() @@ -149,12 +156,12 @@ func setManagerSessionTTL() error { func (m *ownerManager) CampaignOwner() error { logPrefix := fmt.Sprintf("[%s] %s", m.prompt, m.key) logutil.BgLogger().Info("start campaign owner", zap.String("ownerInfo", logPrefix)) - session, err := util2.NewSession(m.ctx, logPrefix, m.etcdCli, util2.NewSessionDefaultRetryCnt, ManagerSessionTTL) + etcdSession, err := util2.NewSession(m.ctx, logPrefix, m.etcdCli, util2.NewSessionDefaultRetryCnt, ManagerSessionTTL) if err != nil { return errors.Trace(err) } m.wg.Add(1) - go m.campaignLoop(session) + go m.campaignLoop(etcdSession) return nil } diff --git a/owner/mock.go b/owner/mock.go index 546f955c47268..630d74d47b3b5 100644 --- a/owner/mock.go +++ b/owner/mock.go @@ -29,6 +29,7 @@ var _ Manager = &mockManager{} type mockManager struct { owner int32 id string // id is the ID of manager. + ctx context.Context cancel context.CancelFunc beOwnerHook func() } @@ -42,6 +43,11 @@ func NewMockManager(ctx context.Context, id string) Manager { } } +// Ctx implements Manager.Ctx interface +func (m *mockManager) Ctx() context.Context { + return m.ctx +} + // ID implements Manager.ID interface. func (m *mockManager) ID() string { return m.id From 70111dea95436623a826352a6ab1325351af3023 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Fri, 29 Jul 2022 11:50:13 +0800 Subject: [PATCH 19/21] Remove unnecessary logics --- ddl/ddl.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index ac3d46f20cb4b..30c1d363971e2 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -707,14 +707,6 @@ func (d *ddl) DisableDDL() error { } // FIXME: if possible, when this node is the only node with DDL, ths setting of DisableDDL should fail. - - // lets the owner start a new election. - err = d.ownerManager.ResignOwner(d.ownerManager.Ctx()) - - if err != nil { - logutil.BgLogger().Error("[ddl] error when ResignOwner", zap.Error(err)) - return err - } } // disable campaign by interrupting campaignLoop From f47dd963282968d84195576a1931458526976e17 Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Fri, 29 Jul 2022 12:07:06 +0800 Subject: [PATCH 20/21] Revert "Introduce Manager.Ctx()" This reverts commit 27bd71f627e14e93bb031b12f223855d15c97dbd. --- owner/manager.go | 11 ++--------- owner/mock.go | 6 ------ 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/owner/manager.go b/owner/manager.go index 51497866ed387..4223a433b8b55 100644 --- a/owner/manager.go +++ b/owner/manager.go @@ -50,8 +50,6 @@ type Manager interface { CampaignOwner() error // ResignOwner lets the owner start a new election. ResignOwner(ctx context.Context) error - // Ctx returns the context of the manager. - Ctx() context.Context // Cancel cancels this etcd ownerManager. Cancel() // RequireOwner requires the ownerManager is owner. @@ -115,11 +113,6 @@ func (m *ownerManager) IsOwner() bool { return atomic.LoadPointer(&m.elec) != unsafe.Pointer(nil) } -// Ctx implements Manager.Ctx interface. -func (m *ownerManager) Ctx() context.Context { - return m.ctx -} - // Cancel implements Manager.Cancel interface. func (m *ownerManager) Cancel() { m.cancel() @@ -156,12 +149,12 @@ func setManagerSessionTTL() error { func (m *ownerManager) CampaignOwner() error { logPrefix := fmt.Sprintf("[%s] %s", m.prompt, m.key) logutil.BgLogger().Info("start campaign owner", zap.String("ownerInfo", logPrefix)) - etcdSession, err := util2.NewSession(m.ctx, logPrefix, m.etcdCli, util2.NewSessionDefaultRetryCnt, ManagerSessionTTL) + session, err := util2.NewSession(m.ctx, logPrefix, m.etcdCli, util2.NewSessionDefaultRetryCnt, ManagerSessionTTL) if err != nil { return errors.Trace(err) } m.wg.Add(1) - go m.campaignLoop(etcdSession) + go m.campaignLoop(session) return nil } diff --git a/owner/mock.go b/owner/mock.go index 630d74d47b3b5..546f955c47268 100644 --- a/owner/mock.go +++ b/owner/mock.go @@ -29,7 +29,6 @@ var _ Manager = &mockManager{} type mockManager struct { owner int32 id string // id is the ID of manager. - ctx context.Context cancel context.CancelFunc beOwnerHook func() } @@ -43,11 +42,6 @@ func NewMockManager(ctx context.Context, id string) Manager { } } -// Ctx implements Manager.Ctx interface -func (m *mockManager) Ctx() context.Context { - return m.ctx -} - // ID implements Manager.ID interface. func (m *mockManager) ID() string { return m.id From af356ccca1af73d3d44eba855ca0cfd193906fec Mon Sep 17 00:00:00 2001 From: cbcwestwolf <1004626265@qq.com> Date: Mon, 1 Aug 2022 14:22:26 +0800 Subject: [PATCH 21/21] Fix --- ddl/ddl.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ddl/ddl.go b/ddl/ddl.go index 30c1d363971e2..bad7c9b7d088b 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -705,7 +705,6 @@ func (d *ddl) DisableDDL() error { if len(serverInfo) <= 1 { return dbterror.ErrDDLSetting.GenWithStackByArgs("can not disable ddl when there is only one instance") } - // FIXME: if possible, when this node is the only node with DDL, ths setting of DisableDDL should fail. }