From 727d0274527db85ec98cb6437f309dba61496f9d Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 14 Jul 2023 17:51:44 +0800 Subject: [PATCH] *: decouple the scheduler and checker interfaces (#6776) ref tikv/pd#5839 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mock/mockcluster/mockcluster.go | 18 +++- pkg/schedule/checker/checker_controller.go | 8 +- pkg/schedule/checker/joint_state_checker.go | 4 +- pkg/schedule/checker/learner_checker.go | 4 +- pkg/schedule/checker/merge_checker.go | 16 ++-- pkg/schedule/checker/merge_checker_test.go | 8 +- pkg/schedule/checker/priority_inspector.go | 6 +- .../checker/priority_inspector_test.go | 4 +- pkg/schedule/checker/replica_checker.go | 6 +- pkg/schedule/checker/replica_checker_test.go | 20 ++-- pkg/schedule/checker/replica_strategy.go | 12 +-- pkg/schedule/checker/rule_checker.go | 22 ++--- pkg/schedule/checker/split_checker.go | 6 +- pkg/schedule/config/config.go | 90 ++++++++++-------- pkg/schedule/coordinator.go | 37 ++++---- pkg/schedule/core/cluster_informer.go | 41 +++++++-- pkg/schedule/diagnostic/diagnostic_manager.go | 4 +- pkg/schedule/filter/candidates.go | 4 +- pkg/schedule/filter/candidates_test.go | 4 +- pkg/schedule/filter/comparer.go | 2 +- pkg/schedule/filter/filters.go | 92 +++++++++---------- pkg/schedule/filter/filters_test.go | 22 ++--- pkg/schedule/filter/healthy.go | 10 +- pkg/schedule/filter/region_filters.go | 12 +-- pkg/schedule/operator/builder.go | 18 ++-- pkg/schedule/operator/create_operator.go | 32 +++---- pkg/schedule/operator/operator_controller.go | 4 +- .../operator/operator_controller_test.go | 28 +++--- pkg/schedule/operator/step.go | 28 +++--- pkg/schedule/operator/step_test.go | 2 +- pkg/schedule/placement/rule_manager.go | 4 +- pkg/schedule/scatter/region_scatterer.go | 12 +-- pkg/schedule/scatter/region_scatterer_test.go | 26 +++--- .../schedulers/balance_benchmark_test.go | 4 +- pkg/schedule/schedulers/balance_leader.go | 18 ++-- pkg/schedule/schedulers/balance_region.go | 21 +++-- pkg/schedule/schedulers/balance_test.go | 2 +- pkg/schedule/schedulers/balance_witness.go | 14 +-- .../schedulers/balance_witness_test.go | 2 +- pkg/schedule/schedulers/base_scheduler.go | 4 +- .../schedulers/diagnostic_recorder.go | 4 +- pkg/schedule/schedulers/evict_leader.go | 16 ++-- pkg/schedule/schedulers/evict_slow_store.go | 16 ++-- pkg/schedule/schedulers/evict_slow_trend.go | 30 +++--- pkg/schedule/schedulers/grant_hot_region.go | 19 ++-- pkg/schedule/schedulers/grant_leader.go | 10 +- pkg/schedule/schedulers/hot_region.go | 52 +++++------ pkg/schedule/schedulers/hot_region_config.go | 7 +- pkg/schedule/schedulers/label.go | 10 +- pkg/schedule/schedulers/random_merge.go | 12 +-- pkg/schedule/schedulers/range_cluster.go | 20 ++-- pkg/schedule/schedulers/scatter_range.go | 12 +-- pkg/schedule/schedulers/scheduler.go | 8 +- .../schedulers/scheduler_controller.go | 32 +++---- pkg/schedule/schedulers/scheduler_test.go | 4 +- pkg/schedule/schedulers/shuffle_hot_region.go | 15 +-- pkg/schedule/schedulers/shuffle_leader.go | 8 +- pkg/schedule/schedulers/shuffle_region.go | 16 ++-- pkg/schedule/schedulers/split_bucket.go | 8 +- .../schedulers/transfer_witness_leader.go | 10 +- pkg/schedule/schedulers/utils.go | 20 ++-- pkg/statistics/region_collection.go | 4 +- plugin/scheduler_example/evict_leader.go | 12 +-- server/cluster/cluster.go | 15 +++ server/cluster/cluster_test.go | 4 +- 65 files changed, 550 insertions(+), 485 deletions(-) diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index ef9c9a97db2f..391d31a4036f 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -88,9 +88,19 @@ func (mc *Cluster) GetStoreConfig() sc.StoreConfig { return mc.StoreConfigManager.GetStoreConfig() } -// GetOpts returns the cluster configuration. -func (mc *Cluster) GetOpts() sc.Config { - return mc.PersistOptions +// GetCheckerConfig returns the checker config. +func (mc *Cluster) GetCheckerConfig() sc.CheckerConfig { + return mc +} + +// GetSchedulerConfig returns the scheduler config. +func (mc *Cluster) GetSchedulerConfig() sc.SchedulerConfig { + return mc +} + +// GetSharedConfig returns the shared config. +func (mc *Cluster) GetSharedConfig() sc.SharedConfig { + return mc } // GetStorage returns the storage. @@ -198,7 +208,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { func (mc *Cluster) initRuleManager() { if mc.RuleManager == nil { - mc.RuleManager = placement.NewRuleManager(mc.GetStorage(), mc, mc.GetOpts()) + mc.RuleManager = placement.NewRuleManager(mc.GetStorage(), mc, mc.GetSharedConfig()) mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels) } } diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index 5cc7e85eeb66..8ec144367eba 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -37,8 +37,8 @@ var denyCheckersByLabelerCounter = labeler.LabelerEventCounter.WithLabelValues(" // Controller is used to manage all checkers. type Controller struct { - cluster sche.ClusterInformer - conf config.Config + cluster sche.CheckerCluster + conf config.CheckerConfig opController *operator.Controller learnerChecker *LearnerChecker replicaChecker *ReplicaChecker @@ -53,7 +53,7 @@ type Controller struct { } // NewController create a new Controller. -func NewController(ctx context.Context, cluster sche.ClusterInformer, conf config.Config, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *operator.Controller) *Controller { +func NewController(ctx context.Context, cluster sche.CheckerCluster, conf config.CheckerConfig, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *operator.Controller) *Controller { regionWaitingList := cache.NewDefaultCache(DefaultCacheSize) return &Controller{ cluster: cluster, @@ -87,7 +87,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { } if c.conf.IsPlacementRulesEnabled() { - skipRuleCheck := c.cluster.GetOpts().IsPlacementRulesCacheEnabled() && + skipRuleCheck := c.cluster.GetCheckerConfig().IsPlacementRulesCacheEnabled() && c.cluster.GetRuleManager().IsRegionFitCached(c.cluster, region) if skipRuleCheck { // If the fit is fetched from cache, it seems that the region doesn't need check diff --git a/pkg/schedule/checker/joint_state_checker.go b/pkg/schedule/checker/joint_state_checker.go index fdd24a5f3cdc..872c957f0a71 100644 --- a/pkg/schedule/checker/joint_state_checker.go +++ b/pkg/schedule/checker/joint_state_checker.go @@ -26,7 +26,7 @@ import ( // JointStateChecker ensures region is in joint state will leave. type JointStateChecker struct { PauseController - cluster sche.ClusterInformer + cluster sche.CheckerCluster } const jointStateCheckerName = "joint_state_checker" @@ -41,7 +41,7 @@ var ( ) // NewJointStateChecker creates a joint state checker. -func NewJointStateChecker(cluster sche.ClusterInformer) *JointStateChecker { +func NewJointStateChecker(cluster sche.CheckerCluster) *JointStateChecker { return &JointStateChecker{ cluster: cluster, } diff --git a/pkg/schedule/checker/learner_checker.go b/pkg/schedule/checker/learner_checker.go index 52132ca7018d..c6f7d671ac35 100644 --- a/pkg/schedule/checker/learner_checker.go +++ b/pkg/schedule/checker/learner_checker.go @@ -25,7 +25,7 @@ import ( // LearnerChecker ensures region has a learner will be promoted. type LearnerChecker struct { PauseController - cluster sche.ClusterInformer + cluster sche.CheckerCluster } var ( @@ -34,7 +34,7 @@ var ( ) // NewLearnerChecker creates a learner checker. -func NewLearnerChecker(cluster sche.ClusterInformer) *LearnerChecker { +func NewLearnerChecker(cluster sche.CheckerCluster) *LearnerChecker { return &LearnerChecker{ cluster: cluster, } diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 728583fc8451..771afbdb9fa5 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -76,14 +76,14 @@ var ( // MergeChecker ensures region to merge with adjacent region when size is small type MergeChecker struct { PauseController - cluster sche.ScheduleCluster - conf config.Config + cluster sche.CheckerCluster + conf config.CheckerConfig splitCache *cache.TTLUint64 startTime time.Time // it's used to judge whether server recently start. } // NewMergeChecker creates a merge checker. -func NewMergeChecker(ctx context.Context, cluster sche.ScheduleCluster, conf config.Config) *MergeChecker { +func NewMergeChecker(ctx context.Context, cluster sche.CheckerCluster, conf config.CheckerConfig) *MergeChecker { splitCache := cache.NewIDTTL(ctx, time.Minute, conf.GetSplitMergeInterval()) return &MergeChecker{ cluster: cluster, @@ -250,7 +250,7 @@ func (m *MergeChecker) checkTarget(region, adjacent *core.RegionInfo) bool { } // AllowMerge returns true if two regions can be merged according to the key type. -func AllowMerge(cluster sche.ScheduleCluster, region, adjacent *core.RegionInfo) bool { +func AllowMerge(cluster sche.SharedCluster, region, adjacent *core.RegionInfo) bool { var start, end []byte if bytes.Equal(region.GetEndKey(), adjacent.GetStartKey()) && len(region.GetEndKey()) != 0 { start, end = region.GetStartKey(), adjacent.GetEndKey() @@ -266,7 +266,7 @@ func AllowMerge(cluster sche.ScheduleCluster, region, adjacent *core.RegionInfo) // We can consider using dependency injection techniques to optimize in // the future. - if cluster.GetOpts().IsPlacementRulesEnabled() { + if cluster.GetSharedConfig().IsPlacementRulesEnabled() { cl, ok := cluster.(interface{ GetRuleManager() *placement.RuleManager }) if !ok || len(cl.GetRuleManager().GetSplitKeys(start, end)) > 0 { return false @@ -283,10 +283,10 @@ func AllowMerge(cluster sche.ScheduleCluster, region, adjacent *core.RegionInfo) } } - policy := cluster.GetOpts().GetKeyType() + policy := cluster.GetSharedConfig().GetKeyType() switch policy { case constant.Table: - if cluster.GetOpts().IsCrossTableMergeEnabled() { + if cluster.GetSharedConfig().IsCrossTableMergeEnabled() { return true } return isTableIDSame(region, adjacent) @@ -306,7 +306,7 @@ func isTableIDSame(region, adjacent *core.RegionInfo) bool { // Check whether there is a peer of the adjacent region on an offline store, // while the source region has no peer on it. This is to prevent from bringing // any other peer into an offline store to slow down the offline process. -func checkPeerStore(cluster sche.ScheduleCluster, region, adjacent *core.RegionInfo) bool { +func checkPeerStore(cluster sche.SharedCluster, region, adjacent *core.RegionInfo) bool { regionStoreIDs := region.GetStoreIDs() for _, peer := range adjacent.GetPeers() { storeID := peer.GetStoreId() diff --git a/pkg/schedule/checker/merge_checker_test.go b/pkg/schedule/checker/merge_checker_test.go index 9c38d677619f..6478eb0b2c45 100644 --- a/pkg/schedule/checker/merge_checker_test.go +++ b/pkg/schedule/checker/merge_checker_test.go @@ -80,7 +80,7 @@ func (suite *mergeCheckerTestSuite) SetupTest() { for _, region := range suite.regions { suite.cluster.PutRegion(region) } - suite.mc = NewMergeChecker(suite.ctx, suite.cluster, suite.cluster.GetOpts()) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster, suite.cluster.GetCheckerConfig()) } func (suite *mergeCheckerTestSuite) TearDownTest() { @@ -461,9 +461,9 @@ func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { tc.PutRegion(region) } - mc := NewMergeChecker(suite.ctx, tc, tc.GetOpts()) + mc := NewMergeChecker(suite.ctx, tc, tc.GetCheckerConfig()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := operator.NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) regions[2] = regions[2].Clone( core.SetPeers([]*metapb.Peer{ @@ -530,7 +530,7 @@ func (suite *mergeCheckerTestSuite) TestCache() { suite.cluster.PutRegion(region) } - suite.mc = NewMergeChecker(suite.ctx, suite.cluster, suite.cluster.GetOpts()) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster, suite.cluster.GetCheckerConfig()) ops := suite.mc.Check(suite.regions[1]) suite.Nil(ops) diff --git a/pkg/schedule/checker/priority_inspector.go b/pkg/schedule/checker/priority_inspector.go index adb94707033f..7e0716010236 100644 --- a/pkg/schedule/checker/priority_inspector.go +++ b/pkg/schedule/checker/priority_inspector.go @@ -29,13 +29,13 @@ const defaultPriorityQueueSize = 1280 // PriorityInspector ensures high priority region should run first type PriorityInspector struct { - cluster sche.ClusterInformer - conf config.Config + cluster sche.CheckerCluster + conf config.CheckerConfig queue *cache.PriorityQueue } // NewPriorityInspector creates a priority inspector. -func NewPriorityInspector(cluster sche.ClusterInformer, conf config.Config) *PriorityInspector { +func NewPriorityInspector(cluster sche.CheckerCluster, conf config.CheckerConfig) *PriorityInspector { return &PriorityInspector{ cluster: cluster, conf: conf, diff --git a/pkg/schedule/checker/priority_inspector_test.go b/pkg/schedule/checker/priority_inspector_test.go index fc88b3e428f3..7988adf6ab6d 100644 --- a/pkg/schedule/checker/priority_inspector_test.go +++ b/pkg/schedule/checker/priority_inspector_test.go @@ -37,7 +37,7 @@ func TestCheckPriorityRegions(t *testing.T) { tc.AddLeaderRegion(2, 2, 3) tc.AddLeaderRegion(3, 2) - pc := NewPriorityInspector(tc, tc.GetOpts()) + pc := NewPriorityInspector(tc, tc.GetCheckerConfig()) checkPriorityRegionTest(re, pc, tc) opt.SetPlacementRuleEnabled(true) re.True(opt.IsPlacementRulesEnabled()) @@ -47,7 +47,7 @@ func TestCheckPriorityRegions(t *testing.T) { func checkPriorityRegionTest(re *require.Assertions, pc *PriorityInspector, tc *mockcluster.Cluster) { // case1: inspect region 1, it doesn't lack replica region := tc.GetRegion(1) - opt := tc.GetOpts() + opt := tc.GetCheckerConfig() pc.Inspect(region) re.Equal(0, pc.queue.Len()) diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index f944993940f8..4397d8f425e9 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -61,13 +61,13 @@ var ( // Location management, mainly used for cross data center deployment. type ReplicaChecker struct { PauseController - cluster sche.ClusterInformer - conf config.Config + cluster sche.CheckerCluster + conf config.CheckerConfig regionWaitingList cache.Cache } // NewReplicaChecker creates a replica checker. -func NewReplicaChecker(cluster sche.ClusterInformer, conf config.Config, regionWaitingList cache.Cache) *ReplicaChecker { +func NewReplicaChecker(cluster sche.CheckerCluster, conf config.CheckerConfig, regionWaitingList cache.Cache) *ReplicaChecker { return &ReplicaChecker{ cluster: cluster, conf: conf, diff --git a/pkg/schedule/checker/replica_checker_test.go b/pkg/schedule/checker/replica_checker_test.go index 2d0961543e53..a326d39d451c 100644 --- a/pkg/schedule/checker/replica_checker_test.go +++ b/pkg/schedule/checker/replica_checker_test.go @@ -50,7 +50,7 @@ func (suite *replicaCheckerTestSuite) SetupTest() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - suite.rc = NewReplicaChecker(suite.cluster, suite.cluster.GetOpts(), cache.NewDefaultCache(10)) + suite.rc = NewReplicaChecker(suite.cluster, suite.cluster.GetCheckerConfig(), cache.NewDefaultCache(10)) stats := &pdpb.StoreStats{ Capacity: 100, Available: 100, @@ -207,7 +207,7 @@ func (suite *replicaCheckerTestSuite) TestBasic() { tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetMaxSnapshotCount(2) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) // Add stores 1,2,3,4. tc.AddRegionStore(1, 4) @@ -283,7 +283,7 @@ func (suite *replicaCheckerTestSuite) TestLostStore() { tc.AddRegionStore(1, 1) tc.AddRegionStore(2, 1) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) // now region peer in store 1,2,3.but we just have store 1,2 // This happens only in recovering the PD tc @@ -301,7 +301,7 @@ func (suite *replicaCheckerTestSuite) TestOffline() { tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(2, 2, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(3, 3, map[string]string{"zone": "z3", "rack": "r1", "host": "h1"}) @@ -352,7 +352,7 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore() { tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 9, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(2, 8, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) @@ -431,7 +431,7 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore2() { tc.SetMaxReplicas(5) tc.SetLocationLabels([]string{"zone", "host"}) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "host": "h1"}) tc.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "host": "h2"}) @@ -459,7 +459,7 @@ func (suite *replicaCheckerTestSuite) TestStorageThreshold() { tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetLocationLabels([]string{"zone"}) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) tc.UpdateStorageRatio(1, 0.5, 0.5) @@ -494,7 +494,7 @@ func (suite *replicaCheckerTestSuite) TestOpts() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddRegionStore(1, 100) tc.AddRegionStore(2, 100) @@ -526,7 +526,7 @@ func (suite *replicaCheckerTestSuite) TestFixDownPeer() { tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) tc.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) @@ -557,7 +557,7 @@ func (suite *replicaCheckerTestSuite) TestFixOfflinePeer() { tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) - rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetCheckerConfig(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) tc.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) diff --git a/pkg/schedule/checker/replica_strategy.go b/pkg/schedule/checker/replica_strategy.go index 797d8bb28536..fdf05a0c4795 100644 --- a/pkg/schedule/checker/replica_strategy.go +++ b/pkg/schedule/checker/replica_strategy.go @@ -27,7 +27,7 @@ import ( // exists to allow replica_checker and rule_checker to reuse common logics. type ReplicaStrategy struct { checkerName string // replica-checker / rule-checker - cluster sche.ClusterInformer + cluster sche.CheckerCluster locationLabels []string isolationLevel string region *core.RegionInfo @@ -77,13 +77,13 @@ func (s *ReplicaStrategy) SelectStoreToAdd(coLocationStores []*core.StoreInfo, e isolationComparer := filter.IsolationComparer(s.locationLabels, coLocationStores) strictStateFilter := &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true, AllowFastFailover: s.fastFailover, OperatorLevel: level} targetCandidate := filter.NewCandidates(s.cluster.GetStores()). - FilterTarget(s.cluster.GetOpts(), nil, nil, filters...). + FilterTarget(s.cluster.GetCheckerConfig(), nil, nil, filters...). KeepTheTopStores(isolationComparer, false) // greater isolation score is better if targetCandidate.Len() == 0 { return 0, false } - target := targetCandidate.FilterTarget(s.cluster.GetOpts(), nil, nil, strictStateFilter). - PickTheTopStore(filter.RegionScoreComparer(s.cluster.GetOpts()), true) // less region score is better + target := targetCandidate.FilterTarget(s.cluster.GetCheckerConfig(), nil, nil, strictStateFilter). + PickTheTopStore(filter.RegionScoreComparer(s.cluster.GetCheckerConfig()), true) // less region score is better if target == nil { return 0, true // filter by temporary states } @@ -139,9 +139,9 @@ func (s *ReplicaStrategy) SelectStoreToRemove(coLocationStores []*core.StoreInfo level = constant.Urgent } source := filter.NewCandidates(coLocationStores). - FilterSource(s.cluster.GetOpts(), nil, nil, &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true, OperatorLevel: level}). + FilterSource(s.cluster.GetCheckerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true, OperatorLevel: level}). KeepTheTopStores(isolationComparer, true). - PickTheTopStore(filter.RegionScoreComparer(s.cluster.GetOpts()), false) + PickTheTopStore(filter.RegionScoreComparer(s.cluster.GetCheckerConfig()), false) if source == nil { log.Debug("no removable store", zap.Uint64("region-id", s.region.GetID())) return 0 diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 95e634149668..a7f9ba63b11b 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -81,7 +81,7 @@ var ( // RuleChecker fix/improve region by placement rules. type RuleChecker struct { PauseController - cluster sche.ClusterInformer + cluster sche.CheckerCluster ruleManager *placement.RuleManager name string regionWaitingList cache.Cache @@ -91,14 +91,14 @@ type RuleChecker struct { } // NewRuleChecker creates a checker instance. -func NewRuleChecker(ctx context.Context, cluster sche.ClusterInformer, ruleManager *placement.RuleManager, regionWaitingList cache.Cache) *RuleChecker { +func NewRuleChecker(ctx context.Context, cluster sche.CheckerCluster, ruleManager *placement.RuleManager, regionWaitingList cache.Cache) *RuleChecker { return &RuleChecker{ cluster: cluster, ruleManager: ruleManager, name: ruleCheckerName, regionWaitingList: regionWaitingList, pendingList: cache.NewDefaultCache(maxPendingListLen), - switchWitnessCache: cache.NewIDTTL(ctx, time.Minute, cluster.GetOpts().GetSwitchWitnessInterval()), + switchWitnessCache: cache.NewIDTTL(ctx, time.Minute, cluster.GetCheckerConfig().GetSwitchWitnessInterval()), record: newRecord(), } } @@ -160,7 +160,7 @@ func (c *RuleChecker) CheckWithFit(region *core.RegionInfo, fit *placement.Regio return op } } - if c.cluster.GetOpts().IsPlacementRulesCacheEnabled() { + if c.cluster.GetCheckerConfig().IsPlacementRulesCacheEnabled() { if placement.ValidateFit(fit) && placement.ValidateRegion(region) && placement.ValidateStores(fit.GetRegionStores()) { // If there is no need to fix, we will cache the fit c.ruleManager.SetRegionFitCache(region, fit) @@ -173,12 +173,12 @@ func (c *RuleChecker) CheckWithFit(region *core.RegionInfo, fit *placement.Regio // RecordRegionPromoteToNonWitness put the recently switch non-witness region into cache. RuleChecker // will skip switch it back to witness for a while. func (c *RuleChecker) RecordRegionPromoteToNonWitness(regionID uint64) { - c.switchWitnessCache.PutWithTTL(regionID, nil, c.cluster.GetOpts().GetSwitchWitnessInterval()) + c.switchWitnessCache.PutWithTTL(regionID, nil, c.cluster.GetCheckerConfig().GetSwitchWitnessInterval()) } func (c *RuleChecker) isWitnessEnabled() bool { - return versioninfo.IsFeatureSupported(c.cluster.GetOpts().GetClusterVersion(), versioninfo.SwitchWitness) && - c.cluster.GetOpts().IsWitnessAllowed() + return versioninfo.IsFeatureSupported(c.cluster.GetCheckerConfig().GetClusterVersion(), versioninfo.SwitchWitness) && + c.cluster.GetCheckerConfig().IsWitnessAllowed() } func (c *RuleChecker) fixRulePeer(region *core.RegionInfo, fit *placement.RegionFit, rf *placement.RuleFit) (*operator.Operator, error) { @@ -343,7 +343,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. return nil, errPeerCannotBeWitness } if !core.IsWitness(peer) && rf.Rule.IsWitness && c.isWitnessEnabled() { - c.switchWitnessCache.UpdateTTL(c.cluster.GetOpts().GetSwitchWitnessInterval()) + c.switchWitnessCache.UpdateTTL(c.cluster.GetCheckerConfig().GetSwitchWitnessInterval()) if c.switchWitnessCache.Exists(region.GetID()) { ruleCheckerRecentlyPromoteToNonWitnessCounter.Inc() return nil, nil @@ -378,7 +378,7 @@ func (c *RuleChecker) allowLeader(fit *placement.RegionFit, peer *metapb.Peer) b return false } stateFilter := &filter.StoreStateFilter{ActionScope: "rule-checker", TransferLeader: true} - if !stateFilter.Target(c.cluster.GetOpts(), s).IsOK() { + if !stateFilter.Target(c.cluster.GetCheckerConfig(), s).IsOK() { return false } for _, rf := range fit.RuleFits { @@ -498,7 +498,7 @@ func (c *RuleChecker) isDownPeer(region *core.RegionInfo, peer *metapb.Peer) boo func (c *RuleChecker) isStoreDownTimeHitMaxDownTime(storeID uint64) bool { store := c.cluster.GetStore(storeID) - return store.DownTime() >= c.cluster.GetOpts().GetMaxStoreDownTime() + return store.DownTime() >= c.cluster.GetCheckerConfig().GetMaxStoreDownTime() } func (c *RuleChecker) isOfflinePeer(peer *metapb.Peer) bool { @@ -587,7 +587,7 @@ func (o *recorder) incOfflineLeaderCount(storeID uint64) { // Offline is triggered manually and only appears when the node makes some adjustments. here is an operator timeout / 2. var offlineCounterTTL = 5 * time.Minute -func (o *recorder) refresh(cluster sche.ClusterInformer) { +func (o *recorder) refresh(cluster sche.CheckerCluster) { // re-count the offlineLeaderCounter if the store is already tombstone or store is gone. if len(o.offlineLeaderCounter) > 0 && time.Since(o.lastUpdateTime) > offlineCounterTTL { needClean := false diff --git a/pkg/schedule/checker/split_checker.go b/pkg/schedule/checker/split_checker.go index f3d0422e8754..072bdcf7a2e7 100644 --- a/pkg/schedule/checker/split_checker.go +++ b/pkg/schedule/checker/split_checker.go @@ -28,7 +28,7 @@ import ( // SplitChecker splits regions when the key range spans across rule/label boundary. type SplitChecker struct { PauseController - cluster sche.ClusterInformer + cluster sche.CheckerCluster ruleManager *placement.RuleManager labeler *labeler.RegionLabeler } @@ -42,7 +42,7 @@ var ( ) // NewSplitChecker creates a new SplitChecker. -func NewSplitChecker(cluster sche.ClusterInformer, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler) *SplitChecker { +func NewSplitChecker(cluster sche.CheckerCluster, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler) *SplitChecker { return &SplitChecker{ cluster: cluster, ruleManager: ruleManager, @@ -71,7 +71,7 @@ func (c *SplitChecker) Check(region *core.RegionInfo) *operator.Operator { desc := "labeler-split-region" keys := c.labeler.GetSplitKeys(start, end) - if len(keys) == 0 && c.cluster.GetOpts().IsPlacementRulesEnabled() { + if len(keys) == 0 && c.cluster.GetCheckerConfig().IsPlacementRulesEnabled() { desc = "rule-split-region" keys = c.ruleManager.GetSplitKeys(start, end) } diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 0f75d1e5cad5..b4a5c3fb1a14 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -28,17 +28,18 @@ func IsSchedulerRegistered(name string) bool { return ok } -// Config is the interface that wraps the Config related methods. -type Config interface { +// SchedulerConfig is the interface for scheduler configurations. +type SchedulerConfig interface { + SharedConfig + IsSchedulingHalted() bool + IsSchedulerDisabled(string) bool AddSchedulerCfg(string, []string) RemoveSchedulerCfg(string) Persist(endpoint.ConfigStorage) error - GetReplicaScheduleLimit() uint64 GetRegionScheduleLimit() uint64 - GetMergeScheduleLimit() uint64 GetLeaderScheduleLimit() uint64 GetHotRegionScheduleLimit() uint64 GetWitnessScheduleLimit() uint64 @@ -47,54 +48,69 @@ type Config interface { GetMaxMovableHotPeerSize() int64 IsTraceRegionFlow() bool - GetSplitMergeInterval() time.Duration - GetMaxMergeRegionSize() uint64 - GetMaxMergeRegionKeys() uint64 - GetKeyType() constant.KeyType - IsOneWayMergeEnabled() bool - IsCrossTableMergeEnabled() bool + GetTolerantSizeRatio() float64 + GetLeaderSchedulePolicy() constant.SchedulePolicy - IsPlacementRulesEnabled() bool - IsPlacementRulesCacheEnabled() bool + IsDebugMetricsEnabled() bool + IsDiagnosticAllowed() bool + GetSlowStoreEvictingAffectedStoreRatioThreshold() float64 +} - GetMaxReplicas() int - GetPatrolRegionInterval() time.Duration - GetMaxStoreDownTime() time.Duration - GetLocationLabels() []string - GetIsolationLevel() string +// CheckerConfig is the interface for checker configurations. +type CheckerConfig interface { + SharedConfig + + GetSwitchWitnessInterval() time.Duration + IsRemoveExtraReplicaEnabled() bool + IsRemoveDownReplicaEnabled() bool IsReplaceOfflineReplicaEnabled() bool IsMakeUpReplicaEnabled() bool - IsRemoveExtraReplicaEnabled() bool IsLocationReplacementEnabled() bool - IsRemoveDownReplicaEnabled() bool - - GetSwitchWitnessInterval() time.Duration - IsWitnessAllowed() bool + GetIsolationLevel() string + GetSplitMergeInterval() time.Duration + GetPatrolRegionInterval() time.Duration + GetMaxMergeRegionSize() uint64 + GetMaxMergeRegionKeys() uint64 + GetReplicaScheduleLimit() uint64 +} +// SharedConfig is the interface for shared configurations. +type SharedConfig interface { + GetMaxReplicas() int + IsPlacementRulesEnabled() bool + GetMaxSnapshotCount() uint64 + GetMaxPendingPeerCount() uint64 GetLowSpaceRatio() float64 GetHighSpaceRatio() float64 - GetTolerantSizeRatio() float64 - GetLeaderSchedulePolicy() constant.SchedulePolicy + GetMaxStoreDownTime() time.Duration + GetLocationLabels() []string + CheckLabelProperty(string, []*metapb.StoreLabel) bool + GetClusterVersion() *semver.Version + IsUseJointConsensus() bool + GetKeyType() constant.KeyType + IsCrossTableMergeEnabled() bool + IsOneWayMergeEnabled() bool + GetMergeScheduleLimit() uint64 GetRegionScoreFormulaVersion() string - - GetMaxSnapshotCount() uint64 - GetMaxPendingPeerCount() uint64 GetSchedulerMaxWaitingOperator() uint64 GetStoreLimitByType(uint64, storelimit.Type) float64 - SetAllStoresLimit(storelimit.Type, float64) - GetSlowStoreEvictingAffectedStoreRatioThreshold() float64 - IsUseJointConsensus() bool - CheckLabelProperty(string, []*metapb.StoreLabel) bool - IsDebugMetricsEnabled() bool - GetClusterVersion() *semver.Version - GetStoreLimitVersion() string - IsDiagnosticAllowed() bool + IsWitnessAllowed() bool + IsPlacementRulesCacheEnabled() bool + + // for test purpose + SetPlacementRulesCacheEnabled(bool) + SetEnableWitness(bool) +} + +// Config is the interface that wraps the Config related methods. +type Config interface { + SchedulerConfig + CheckerConfig // for test purpose SetPlacementRuleEnabled(bool) SetSplitMergeInterval(time.Duration) SetMaxReplicas(int) - SetPlacementRulesCacheEnabled(bool) - SetEnableWitness(bool) + SetAllStoresLimit(typ storelimit.Type, ratePerMin float64) // only for store configuration UseRaftV2() } diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index cb36a3671653..244f7e5d79ff 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -86,23 +86,22 @@ type Coordinator struct { // NewCoordinator creates a new Coordinator. func NewCoordinator(ctx context.Context, cluster sche.ClusterInformer, hbStreams *hbstream.HeartbeatStreams) *Coordinator { ctx, cancel := context.WithCancel(ctx) - opController := operator.NewController(ctx, cluster.GetBasicCluster(), cluster.GetOpts(), hbStreams) + opController := operator.NewController(ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), hbStreams) schedulers := schedulers.NewController(ctx, cluster, cluster.GetStorage(), opController) - c := &Coordinator{ - ctx: ctx, - cancel: cancel, - cluster: cluster, - prepareChecker: newPrepareChecker(), - checkers: checker.NewController(ctx, cluster, cluster.GetOpts(), cluster.GetRuleManager(), cluster.GetRegionLabeler(), opController), - regionScatterer: scatter.NewRegionScatterer(ctx, cluster, opController), - regionSplitter: splitter.NewRegionSplitter(cluster, splitter.NewSplitRegionsHandler(cluster, opController)), - schedulers: schedulers, - opController: opController, - hbStreams: hbStreams, - pluginInterface: NewPluginInterface(), - } - c.diagnosticManager = diagnostic.NewManager(schedulers, cluster.GetOpts()) - return c + return &Coordinator{ + ctx: ctx, + cancel: cancel, + cluster: cluster, + prepareChecker: newPrepareChecker(), + checkers: checker.NewController(ctx, cluster, cluster.GetCheckerConfig(), cluster.GetRuleManager(), cluster.GetRegionLabeler(), opController), + regionScatterer: scatter.NewRegionScatterer(ctx, cluster, opController), + regionSplitter: splitter.NewRegionSplitter(cluster, splitter.NewSplitRegionsHandler(cluster, opController)), + schedulers: schedulers, + opController: opController, + hbStreams: hbStreams, + pluginInterface: NewPluginInterface(), + diagnosticManager: diagnostic.NewManager(schedulers, cluster.GetSchedulerConfig()), + } } // GetWaitingRegions returns the regions in the waiting list. @@ -122,7 +121,7 @@ func (c *Coordinator) PatrolRegions() { defer logutil.LogPanic() defer c.wg.Done() - ticker := time.NewTicker(c.cluster.GetOpts().GetPatrolRegionInterval()) + ticker := time.NewTicker(c.cluster.GetCheckerConfig().GetPatrolRegionInterval()) defer ticker.Stop() log.Info("Coordinator starts patrol regions") @@ -135,7 +134,7 @@ func (c *Coordinator) PatrolRegions() { select { case <-ticker.C: // Note: we reset the ticker here to support updating configuration dynamically. - ticker.Reset(c.cluster.GetOpts().GetPatrolRegionInterval()) + ticker.Reset(c.cluster.GetCheckerConfig().GetPatrolRegionInterval()) case <-c.ctx.Done(): log.Info("patrol regions has been stopped") return @@ -505,7 +504,7 @@ func (c *Coordinator) Stop() { // GetHotRegionsByType gets hot regions' statistics by RWType. func (c *Coordinator) GetHotRegionsByType(typ statistics.RWType) *statistics.StoreHotPeersInfos { - isTraceFlow := c.cluster.GetOpts().IsTraceRegionFlow() + isTraceFlow := c.cluster.GetSchedulerConfig().IsTraceRegionFlow() storeLoads := c.cluster.GetStoresLoads() stores := c.cluster.GetStores() var infos *statistics.StoreHotPeersInfos diff --git a/pkg/schedule/core/cluster_informer.go b/pkg/schedule/core/cluster_informer.go index c7eb0d7581c4..fb0647a1b050 100644 --- a/pkg/schedule/core/cluster_informer.go +++ b/pkg/schedule/core/cluster_informer.go @@ -27,27 +27,50 @@ import ( // ClusterInformer provides the necessary information of a cluster. type ClusterInformer interface { - ScheduleCluster + SchedulerCluster + CheckerCluster + ScatterCluster GetStorage() storage.Storage UpdateRegionsLabelLevelStats(regions []*core.RegionInfo) - AddSuspectRegions(ids ...uint64) GetPersistOptions() *config.PersistOptions } -// ScheduleCluster is an aggregate interface that wraps multiple interfaces for schedulers use -type ScheduleCluster interface { - BasicCluster +// SchedulerCluster is an aggregate interface that wraps multiple interfaces +type SchedulerCluster interface { + SharedCluster statistics.StoreStatInformer - statistics.RegionStatInformer buckets.BucketStatInformer - GetOpts() sc.Config - GetRuleManager() *placement.RuleManager + GetSchedulerConfig() sc.SchedulerConfig GetRegionLabeler() *labeler.RegionLabeler - GetBasicCluster() *core.BasicCluster GetStoreConfig() sc.StoreConfig +} + +// CheckerCluster is an aggregate interface that wraps multiple interfaces +type CheckerCluster interface { + SharedCluster + + GetCheckerConfig() sc.CheckerConfig + GetStoreConfig() sc.StoreConfig +} + +// ScatterCluster is an aggregate interface that wraps multiple interfaces +type ScatterCluster interface { + SharedCluster + + AddSuspectRegions(ids ...uint64) +} + +// SharedCluster is an aggregate interface that wraps multiple interfaces +type SharedCluster interface { + BasicCluster + statistics.RegionStatInformer + + GetBasicCluster() *core.BasicCluster + GetSharedConfig() sc.SharedConfig + GetRuleManager() *placement.RuleManager AllocID() (uint64, error) } diff --git a/pkg/schedule/diagnostic/diagnostic_manager.go b/pkg/schedule/diagnostic/diagnostic_manager.go index 8e9546aa2900..7d5a83062c02 100644 --- a/pkg/schedule/diagnostic/diagnostic_manager.go +++ b/pkg/schedule/diagnostic/diagnostic_manager.go @@ -24,12 +24,12 @@ import ( // Manager is used to manage the diagnostic result of schedulers for now. type Manager struct { - config config.Config + config config.SchedulerConfig schedulerController *schedulers.Controller } // NewManager creates a new Manager. -func NewManager(schedulerController *schedulers.Controller, config config.Config) *Manager { +func NewManager(schedulerController *schedulers.Controller, config config.SchedulerConfig) *Manager { return &Manager{ config: config, schedulerController: schedulerController, diff --git a/pkg/schedule/filter/candidates.go b/pkg/schedule/filter/candidates.go index b3524da47ec8..6393192c8af9 100644 --- a/pkg/schedule/filter/candidates.go +++ b/pkg/schedule/filter/candidates.go @@ -37,13 +37,13 @@ func NewCandidates(stores []*core.StoreInfo) *StoreCandidates { } // FilterSource keeps stores that can pass all source filters. -func (c *StoreCandidates) FilterSource(conf config.Config, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { +func (c *StoreCandidates) FilterSource(conf config.SharedConfig, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { c.Stores = SelectSourceStores(c.Stores, filters, conf, collector, counter) return c } // FilterTarget keeps stores that can pass all target filters. -func (c *StoreCandidates) FilterTarget(conf config.Config, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { +func (c *StoreCandidates) FilterTarget(conf config.SharedConfig, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { c.Stores = SelectTargetStores(c.Stores, filters, conf, collector, counter) return c } diff --git a/pkg/schedule/filter/candidates_test.go b/pkg/schedule/filter/candidates_test.go index be30a8744689..f7b803bb4a74 100644 --- a/pkg/schedule/filter/candidates_test.go +++ b/pkg/schedule/filter/candidates_test.go @@ -50,7 +50,7 @@ type idFilter func(uint64) bool func (f idFilter) Scope() string { return "idFilter" } func (f idFilter) Type() filterType { return filterType(0) } -func (f idFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f idFilter) Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } @@ -58,7 +58,7 @@ func (f idFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status return statusStoreScoreDisallowed } -func (f idFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f idFilter) Target(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } diff --git a/pkg/schedule/filter/comparer.go b/pkg/schedule/filter/comparer.go index eb319105f874..24ea9af8024b 100644 --- a/pkg/schedule/filter/comparer.go +++ b/pkg/schedule/filter/comparer.go @@ -25,7 +25,7 @@ type StoreComparer func(a, b *core.StoreInfo) int // RegionScoreComparer creates a StoreComparer to sort store by region // score. -func RegionScoreComparer(conf config.Config) StoreComparer { +func RegionScoreComparer(conf config.SharedConfig) StoreComparer { return func(a, b *core.StoreInfo) int { sa := a.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0) sb := b.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0) diff --git a/pkg/schedule/filter/filters.go b/pkg/schedule/filter/filters.go index c197ba4d2a8a..2e4c9787e307 100644 --- a/pkg/schedule/filter/filters.go +++ b/pkg/schedule/filter/filters.go @@ -31,7 +31,7 @@ import ( ) // SelectSourceStores selects stores that be selected as source store from the list. -func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, conf config.Config, collector *plan.Collector, +func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, conf config.SharedConfig, collector *plan.Collector, counter *Counter) []*core.StoreInfo { return filterStoresBy(stores, func(s *core.StoreInfo) bool { return slice.AllOf(filters, func(i int) bool { @@ -55,7 +55,7 @@ func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, conf config. } // SelectUnavailableTargetStores selects unavailable stores that can't be selected as target store from the list. -func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, conf config.Config, +func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, conf config.SharedConfig, collector *plan.Collector, counter *Counter) []*core.StoreInfo { return filterStoresBy(stores, func(s *core.StoreInfo) bool { targetID := strconv.FormatUint(s.GetID(), 10) @@ -85,7 +85,7 @@ func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, c } // SelectTargetStores selects stores that be selected as target store from the list. -func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, conf config.Config, collector *plan.Collector, +func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, conf config.SharedConfig, collector *plan.Collector, counter *Counter) []*core.StoreInfo { if len(filters) == 0 { return stores @@ -133,9 +133,9 @@ type Filter interface { Scope() string Type() filterType // Source Return plan.Status to show whether be filtered as source - Source(conf config.Config, store *core.StoreInfo) *plan.Status + Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status // Target Return plan.Status to show whether be filtered as target - Target(conf config.Config, store *core.StoreInfo) *plan.Status + Target(conf config.SharedConfig, store *core.StoreInfo) *plan.Status } // comparingFilter is an interface to filter target store by comparing source and target stores @@ -146,7 +146,7 @@ type comparingFilter interface { } // Target checks if store can pass all Filters as target store. -func Target(conf config.Config, store *core.StoreInfo, filters []Filter) bool { +func Target(conf config.SharedConfig, store *core.StoreInfo, filters []Filter) bool { storeID := strconv.FormatUint(store.GetID(), 10) for _, filter := range filters { status := filter.Target(conf, store) @@ -189,14 +189,14 @@ func (f *excludedFilter) Type() filterType { return excluded } -func (f *excludedFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if _, ok := f.sources[store.GetID()]; ok { return statusStoreAlreadyHasPeer } return statusOK } -func (f *excludedFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Target(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if _, ok := f.targets[store.GetID()]; ok { return statusStoreAlreadyHasPeer } @@ -219,11 +219,11 @@ func (f *storageThresholdFilter) Type() filterType { return storageThreshold } -func (f *storageThresholdFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *storageThresholdFilter) Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { return statusOK } -func (f *storageThresholdFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *storageThresholdFilter) Target(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if !store.IsLowSpace(conf.GetLowSpaceRatio()) { return statusOK } @@ -287,11 +287,11 @@ func (f *distinctScoreFilter) Type() filterType { return distinctScore } -func (f *distinctScoreFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { +func (f *distinctScoreFilter) Source(_ config.SharedConfig, _ *core.StoreInfo) *plan.Status { return statusOK } -func (f *distinctScoreFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *distinctScoreFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { score := core.DistinctScore(f.labels, f.stores, store) switch f.policy { case locationSafeguard: @@ -348,9 +348,9 @@ func (f *StoreStateFilter) Type() filterType { // conditionFunc defines condition to determine a store should be selected. // It should consider if the filter allows temporary states. -type conditionFunc func(config.Config, *core.StoreInfo) *plan.Status +type conditionFunc func(config.SharedConfig, *core.StoreInfo) *plan.Status -func (f *StoreStateFilter) isRemoved(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isRemoved(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if store.IsRemoved() { f.Reason = storeStateTombstone return statusStoreRemoved @@ -359,7 +359,7 @@ func (f *StoreStateFilter) isRemoved(_ config.Config, store *core.StoreInfo) *pl return statusOK } -func (f *StoreStateFilter) isDown(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isDown(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if store.DownTime() > conf.GetMaxStoreDownTime() { f.Reason = storeStateDown return statusStoreDown @@ -369,7 +369,7 @@ func (f *StoreStateFilter) isDown(conf config.Config, store *core.StoreInfo) *pl return statusOK } -func (f *StoreStateFilter) isRemoving(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isRemoving(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if store.IsRemoving() { f.Reason = storeStateOffline return statusStoresRemoving @@ -378,7 +378,7 @@ func (f *StoreStateFilter) isRemoving(_ config.Config, store *core.StoreInfo) *p return statusOK } -func (f *StoreStateFilter) pauseLeaderTransfer(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) pauseLeaderTransfer(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if !store.AllowLeaderTransfer() { f.Reason = storeStatePauseLeader return statusStoreRejectLeader @@ -387,7 +387,7 @@ func (f *StoreStateFilter) pauseLeaderTransfer(_ config.Config, store *core.Stor return statusOK } -func (f *StoreStateFilter) slowStoreEvicted(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) slowStoreEvicted(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if store.EvictedAsSlowStore() { f.Reason = storeStateSlow return statusStoreRejectLeader @@ -396,7 +396,7 @@ func (f *StoreStateFilter) slowStoreEvicted(conf config.Config, store *core.Stor return statusOK } -func (f *StoreStateFilter) slowTrendEvicted(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) slowTrendEvicted(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if store.IsEvictedAsSlowTrend() { f.Reason = storeStateSlowTrend return statusStoreRejectLeader @@ -405,7 +405,7 @@ func (f *StoreStateFilter) slowTrendEvicted(_ config.Config, store *core.StoreIn return statusOK } -func (f *StoreStateFilter) isDisconnected(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isDisconnected(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && store.IsDisconnected() { f.Reason = storeStateDisconnected return statusStoreDisconnected @@ -414,7 +414,7 @@ func (f *StoreStateFilter) isDisconnected(_ config.Config, store *core.StoreInfo return statusOK } -func (f *StoreStateFilter) isBusy(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isBusy(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && store.IsBusy() { f.Reason = storeStateBusy return statusStoreBusy @@ -423,7 +423,7 @@ func (f *StoreStateFilter) isBusy(_ config.Config, store *core.StoreInfo) *plan. return statusOK } -func (f *StoreStateFilter) exceedRemoveLimit(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) exceedRemoveLimit(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && !store.IsAvailable(storelimit.RemovePeer, f.OperatorLevel) { f.Reason = storeStateExceedRemoveLimit return statusStoreRemoveLimit @@ -432,7 +432,7 @@ func (f *StoreStateFilter) exceedRemoveLimit(_ config.Config, store *core.StoreI return statusOK } -func (f *StoreStateFilter) exceedAddLimit(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) exceedAddLimit(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && !store.IsAvailable(storelimit.AddPeer, f.OperatorLevel) { f.Reason = storeStateExceedAddLimit return statusStoreAddLimit @@ -441,7 +441,7 @@ func (f *StoreStateFilter) exceedAddLimit(_ config.Config, store *core.StoreInfo return statusOK } -func (f *StoreStateFilter) tooManySnapshots(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) tooManySnapshots(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && (uint64(store.GetSendingSnapCount()) > conf.GetMaxSnapshotCount() || uint64(store.GetReceivingSnapCount()) > conf.GetMaxSnapshotCount()) { f.Reason = storeStateTooManySnapshot @@ -451,7 +451,7 @@ func (f *StoreStateFilter) tooManySnapshots(conf config.Config, store *core.Stor return statusOK } -func (f *StoreStateFilter) tooManyPendingPeers(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) tooManyPendingPeers(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && conf.GetMaxPendingPeerCount() > 0 && store.GetPendingPeerCount() > int(conf.GetMaxPendingPeerCount()) { @@ -462,7 +462,7 @@ func (f *StoreStateFilter) tooManyPendingPeers(conf config.Config, store *core.S return statusOK } -func (f *StoreStateFilter) hasRejectLeaderProperty(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) hasRejectLeaderProperty(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if conf.CheckLabelProperty(config.RejectLeader, store.GetLabels()) { f.Reason = storeStateRejectLeader return statusStoreRejectLeader @@ -495,7 +495,7 @@ const ( fastFailoverTarget ) -func (f *StoreStateFilter) anyConditionMatch(typ int, conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) anyConditionMatch(typ int, conf config.SharedConfig, store *core.StoreInfo) *plan.Status { var funcs []conditionFunc switch typ { case leaderSource: @@ -527,7 +527,7 @@ func (f *StoreStateFilter) anyConditionMatch(typ int, conf config.Config, store // Source returns true when the store can be selected as the schedule // source. -func (f *StoreStateFilter) Source(conf config.Config, store *core.StoreInfo) (status *plan.Status) { +func (f *StoreStateFilter) Source(conf config.SharedConfig, store *core.StoreInfo) (status *plan.Status) { if f.TransferLeader { if status = f.anyConditionMatch(leaderSource, conf, store); !status.IsOK() { return @@ -544,7 +544,7 @@ func (f *StoreStateFilter) Source(conf config.Config, store *core.StoreInfo) (st // Target returns true when the store can be selected as the schedule // target. -func (f *StoreStateFilter) Target(conf config.Config, store *core.StoreInfo) (status *plan.Status) { +func (f *StoreStateFilter) Target(conf config.SharedConfig, store *core.StoreInfo) (status *plan.Status) { if f.TransferLeader { if status = f.anyConditionMatch(leaderTarget, conf, store); !status.IsOK() { return @@ -588,7 +588,7 @@ func (f labelConstraintFilter) Type() filterType { } // Source filters stores when select them as schedule source. -func (f labelConstraintFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f labelConstraintFilter) Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if placement.MatchLabelConstraints(store, f.constraints) { return statusOK } @@ -596,7 +596,7 @@ func (f labelConstraintFilter) Source(conf config.Config, store *core.StoreInfo) } // Target filters stores when select them as schedule target. -func (f labelConstraintFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f labelConstraintFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if placement.MatchLabelConstraints(store, f.constraints) { return statusOK } @@ -638,7 +638,7 @@ func (f *ruleFitFilter) Type() filterType { return ruleFit } -func (f *ruleFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { +func (f *ruleFitFilter) Source(_ config.SharedConfig, _ *core.StoreInfo) *plan.Status { return statusOK } @@ -647,7 +647,7 @@ func (f *ruleFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status // the replaced store can match the source rule. // RegionA:[1,2,3], move peer1 --> peer2 will not allow, because it's count not match the rule. // but transfer role peer1 --> peer2, it will support. -func (f *ruleFitFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *ruleFitFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if f.oldFit.Replace(f.srcStore, store) { return statusOK } @@ -691,11 +691,11 @@ func (f *ruleLeaderFitFilter) Type() filterType { return ruleLeader } -func (f *ruleLeaderFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { +func (f *ruleLeaderFitFilter) Source(_ config.SharedConfig, _ *core.StoreInfo) *plan.Status { return statusOK } -func (f *ruleLeaderFitFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *ruleLeaderFitFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { targetStoreID := store.GetID() targetPeer := f.region.GetStorePeer(targetStoreID) if targetPeer == nil && !f.allowMoveLeader { @@ -747,11 +747,11 @@ func (f *ruleWitnessFitFilter) Type() filterType { return ruleFit } -func (f *ruleWitnessFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { +func (f *ruleWitnessFitFilter) Source(_ config.SharedConfig, _ *core.StoreInfo) *plan.Status { return statusOK } -func (f *ruleWitnessFitFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *ruleWitnessFitFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { targetStoreID := store.GetID() targetPeer := f.region.GetStorePeer(targetStoreID) if targetPeer == nil { @@ -769,7 +769,7 @@ func (f *ruleWitnessFitFilter) Target(_ config.Config, store *core.StoreInfo) *p // NewPlacementSafeguard creates a filter that ensures after replace a peer with new // peer, the placement restriction will not become worse. -func NewPlacementSafeguard(scope string, conf config.Config, cluster *core.BasicCluster, ruleManager *placement.RuleManager, +func NewPlacementSafeguard(scope string, conf config.SharedConfig, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, oldFit *placement.RegionFit) Filter { if conf.IsPlacementRulesEnabled() { return newRuleFitFilter(scope, cluster, ruleManager, region, oldFit, sourceStore.GetID()) @@ -780,7 +780,7 @@ func NewPlacementSafeguard(scope string, conf config.Config, cluster *core.Basic // NewPlacementLeaderSafeguard creates a filter that ensures after transfer a leader with // existed peer, the placement restriction will not become worse. // Note that it only worked when PlacementRules enabled otherwise it will always permit the sourceStore. -func NewPlacementLeaderSafeguard(scope string, conf config.Config, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, allowMoveLeader bool) Filter { +func NewPlacementLeaderSafeguard(scope string, conf config.SharedConfig, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, allowMoveLeader bool) Filter { if conf.IsPlacementRulesEnabled() { return newRuleLeaderFitFilter(scope, cluster, ruleManager, region, sourceStore.GetID(), allowMoveLeader) } @@ -790,7 +790,7 @@ func NewPlacementLeaderSafeguard(scope string, conf config.Config, cluster *core // NewPlacementWitnessSafeguard creates a filter that ensures after transfer a witness with // existed peer, the placement restriction will not become worse. // Note that it only worked when PlacementRules enabled otherwise it will always permit the sourceStore. -func NewPlacementWitnessSafeguard(scope string, conf config.Config, cluster *core.BasicCluster, ruleManager *placement.RuleManager, +func NewPlacementWitnessSafeguard(scope string, conf config.SharedConfig, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, oldFit *placement.RegionFit) Filter { if conf.IsPlacementRulesEnabled() { return newRuleWitnessFitFilter(scope, cluster, ruleManager, region, oldFit, sourceStore.GetID()) @@ -819,14 +819,14 @@ func (f *engineFilter) Type() filterType { return engine } -func (f *engineFilter) Source(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *engineFilter) Source(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if f.constraint.MatchStore(store) { return statusOK } return statusStoreNotMatchRule } -func (f *engineFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *engineFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { if f.constraint.MatchStore(store) { return statusOK } @@ -862,14 +862,14 @@ func (f *specialUseFilter) Type() filterType { return specialUse } -func (f *specialUseFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *specialUseFilter) Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if store.IsLowSpace(conf.GetLowSpaceRatio()) || !f.constraint.MatchStore(store) { return statusOK } return statusStoreNotMatchRule } -func (f *specialUseFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *specialUseFilter) Target(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { if !f.constraint.MatchStore(store) { return statusOK } @@ -936,11 +936,11 @@ func (f *isolationFilter) Type() filterType { return isolation } -func (f *isolationFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { +func (f *isolationFilter) Source(conf config.SharedConfig, store *core.StoreInfo) *plan.Status { return statusOK } -func (f *isolationFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { +func (f *isolationFilter) Target(_ config.SharedConfig, store *core.StoreInfo) *plan.Status { // No isolation constraint to fit if len(f.constraintSet) == 0 { return statusStoreNotMatchIsolation diff --git a/pkg/schedule/filter/filters_test.go b/pkg/schedule/filter/filters_test.go index bd727f5b4632..d56004e6cf5a 100644 --- a/pkg/schedule/filter/filters_test.go +++ b/pkg/schedule/filter/filters_test.go @@ -93,7 +93,7 @@ func TestLabelConstraintsFilter(t *testing.T) { } for _, testCase := range testCases { filter := NewLabelConstraintFilter("", []placement.LabelConstraint{{Key: testCase.key, Op: placement.LabelConstraintOp(testCase.op), Values: testCase.values}}) - re.Equal(testCase.res, filter.Source(testCluster.GetOpts(), store).StatusCode) + re.Equal(testCase.res, filter.Source(testCluster.GetSharedConfig(), store).StatusCode) } } @@ -139,15 +139,15 @@ func TestRuleFitFilter(t *testing.T) { } for _, testCase := range testCases { filter := newRuleFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, nil, 1) - re.Equal(testCase.sourceRes, filter.Source(testCluster.GetOpts(), testCluster.GetStore(testCase.storeID)).StatusCode) - re.Equal(testCase.targetRes, filter.Target(testCluster.GetOpts(), testCluster.GetStore(testCase.storeID)).StatusCode) + re.Equal(testCase.sourceRes, filter.Source(testCluster.GetSharedConfig(), testCluster.GetStore(testCase.storeID)).StatusCode) + re.Equal(testCase.targetRes, filter.Target(testCluster.GetSharedConfig(), testCluster.GetStore(testCase.storeID)).StatusCode) leaderFilter := newRuleLeaderFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, 1, true) - re.Equal(testCase.targetRes, leaderFilter.Target(testCluster.GetOpts(), testCluster.GetStore(testCase.storeID)).StatusCode) + re.Equal(testCase.targetRes, leaderFilter.Target(testCluster.GetSharedConfig(), testCluster.GetStore(testCase.storeID)).StatusCode) } // store-6 is not exist in the peers, so it will not allow transferring leader to store 6. leaderFilter := newRuleLeaderFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, 1, false) - re.False(leaderFilter.Target(testCluster.GetOpts(), testCluster.GetStore(6)).IsOK()) + re.False(leaderFilter.Target(testCluster.GetSharedConfig(), testCluster.GetStore(6)).IsOK()) } func TestSendStateFilter(t *testing.T) { @@ -334,8 +334,8 @@ func TestIsolationFilter(t *testing.T) { for _, testCase := range testCases { filter := NewIsolationFilter("", testCase.isolationLevel, testCluster.GetLocationLabels(), testCluster.GetRegionStores(testCase.region)) for idx, store := range allStores { - re.Equal(testCase.sourceRes[idx], filter.Source(testCluster.GetOpts(), testCluster.GetStore(store.storeID)).StatusCode) - re.Equal(testCase.targetRes[idx], filter.Target(testCluster.GetOpts(), testCluster.GetStore(store.storeID)).StatusCode) + re.Equal(testCase.sourceRes[idx], filter.Source(testCluster.GetSharedConfig(), testCluster.GetStore(store.storeID)).StatusCode) + re.Equal(testCase.targetRes[idx], filter.Target(testCluster.GetSharedConfig(), testCluster.GetStore(store.storeID)).StatusCode) } } } @@ -362,10 +362,10 @@ func TestPlacementGuard(t *testing.T) { store := testCluster.GetStore(1) re.IsType(NewLocationSafeguard("", []string{"zone"}, testCluster.GetRegionStores(region), store), - NewPlacementSafeguard("", testCluster.GetOpts(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store, nil)) + NewPlacementSafeguard("", testCluster.GetSharedConfig(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store, nil)) testCluster.SetEnablePlacementRules(true) re.IsType(newRuleFitFilter("", testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, nil, 1), - NewPlacementSafeguard("", testCluster.GetOpts(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store, nil)) + NewPlacementSafeguard("", testCluster.GetSharedConfig(), testCluster.GetBasicCluster(), testCluster.GetRuleManager(), region, store, nil)) } func TestSpecialUseFilter(t *testing.T) { @@ -393,8 +393,8 @@ func TestSpecialUseFilter(t *testing.T) { store := core.NewStoreInfoWithLabel(1, testCase.label) store = store.Clone(core.SetStoreStats(&pdpb.StoreStats{StoreId: 1, Capacity: 100 * units.GiB, Available: 100 * units.GiB})) filter := NewSpecialUseFilter("", testCase.allowUse...) - re.Equal(testCase.sourceRes, filter.Source(testCluster.GetOpts(), store).StatusCode) - re.Equal(testCase.targetRes, filter.Target(testCluster.GetOpts(), store).StatusCode) + re.Equal(testCase.sourceRes, filter.Source(testCluster.GetSharedConfig(), store).StatusCode) + re.Equal(testCase.targetRes, filter.Target(testCluster.GetSharedConfig(), store).StatusCode) } } diff --git a/pkg/schedule/filter/healthy.go b/pkg/schedule/filter/healthy.go index dd017b974ec8..adc4085048b5 100644 --- a/pkg/schedule/filter/healthy.go +++ b/pkg/schedule/filter/healthy.go @@ -42,17 +42,17 @@ func hasDownPeers(region *core.RegionInfo) bool { // IsRegionReplicated checks if a region is fully replicated. When placement // rules is enabled, its peers should fit corresponding rules. When placement // rules is disabled, it should have enough replicas and no any learner peer. -func IsRegionReplicated(cluster sche.ScheduleCluster, region *core.RegionInfo) bool { - if cluster.GetOpts().IsPlacementRulesEnabled() { +func IsRegionReplicated(cluster sche.SharedCluster, region *core.RegionInfo) bool { + if cluster.GetSharedConfig().IsPlacementRulesEnabled() { return isRegionPlacementRuleSatisfied(cluster, region) } return isRegionReplicasSatisfied(cluster, region) } -func isRegionPlacementRuleSatisfied(cluster sche.ScheduleCluster, region *core.RegionInfo) bool { +func isRegionPlacementRuleSatisfied(cluster sche.SharedCluster, region *core.RegionInfo) bool { return cluster.GetRuleManager().FitRegion(cluster, region).IsSatisfied() } -func isRegionReplicasSatisfied(cluster sche.ScheduleCluster, region *core.RegionInfo) bool { - return len(region.GetLearners()) == 0 && len(region.GetPeers()) == cluster.GetOpts().GetMaxReplicas() +func isRegionReplicasSatisfied(cluster sche.SharedCluster, region *core.RegionInfo) bool { + return len(region.GetLearners()) == 0 && len(region.GetPeers()) == cluster.GetSharedConfig().GetMaxReplicas() } diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index bd3569768179..799cee7d90c8 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -100,12 +100,12 @@ func (f *regionDownFilter) Select(region *core.RegionInfo) *plan.Status { // RegionReplicatedFilter filters all unreplicated regions. type RegionReplicatedFilter struct { - cluster sche.ScheduleCluster + cluster sche.SharedCluster fit *placement.RegionFit } // NewRegionReplicatedFilter creates a RegionFilter that filters all unreplicated regions. -func NewRegionReplicatedFilter(cluster sche.ScheduleCluster) RegionFilter { +func NewRegionReplicatedFilter(cluster sche.SharedCluster) RegionFilter { return &RegionReplicatedFilter{cluster: cluster} } @@ -117,7 +117,7 @@ func (f *RegionReplicatedFilter) GetFit() *placement.RegionFit { // Select returns Ok if the given region satisfy the replication. // it will cache the lasted region fit if the region satisfy the replication. func (f *RegionReplicatedFilter) Select(region *core.RegionInfo) *plan.Status { - if f.cluster.GetOpts().IsPlacementRulesEnabled() { + if f.cluster.GetSharedConfig().IsPlacementRulesEnabled() { fit := f.cluster.GetRuleManager().FitRegion(f.cluster, region) if !fit.IsSatisfied() { return statusRegionNotMatchRule @@ -132,11 +132,11 @@ func (f *RegionReplicatedFilter) Select(region *core.RegionInfo) *plan.Status { } type regionEmptyFilter struct { - cluster sche.ScheduleCluster + cluster sche.SharedCluster } // NewRegionEmptyFilter returns creates a RegionFilter that filters all empty regions. -func NewRegionEmptyFilter(cluster sche.ScheduleCluster) RegionFilter { +func NewRegionEmptyFilter(cluster sche.SharedCluster) RegionFilter { return ®ionEmptyFilter{cluster: cluster} } @@ -148,7 +148,7 @@ func (f *regionEmptyFilter) Select(region *core.RegionInfo) *plan.Status { } // isEmptyRegionAllowBalance returns true if the region is not empty or the number of regions is too small. -func isEmptyRegionAllowBalance(cluster sche.ScheduleCluster, region *core.RegionInfo) bool { +func isEmptyRegionAllowBalance(cluster sche.SharedCluster, region *core.RegionInfo) bool { return region.GetApproximateSize() > core.EmptyRegionApproximateSize || cluster.GetTotalRegionCount() < core.InitClusterRegionThreshold } diff --git a/pkg/schedule/operator/builder.go b/pkg/schedule/operator/builder.go index 06e8628cc6a7..d197ce3b3aad 100644 --- a/pkg/schedule/operator/builder.go +++ b/pkg/schedule/operator/builder.go @@ -40,7 +40,7 @@ import ( // according to various constraints. type Builder struct { // basic info - sche.ScheduleCluster + sche.SharedCluster desc string regionID uint64 regionEpoch *metapb.RegionEpoch @@ -92,10 +92,10 @@ func SkipPlacementRulesCheck(b *Builder) { } // NewBuilder creates a Builder. -func NewBuilder(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, opts ...BuilderOption) *Builder { +func NewBuilder(desc string, ci sche.SharedCluster, region *core.RegionInfo, opts ...BuilderOption) *Builder { b := &Builder{ desc: desc, - ScheduleCluster: ci, + SharedCluster: ci, regionID: region.GetID(), regionEpoch: region.GetRegionEpoch(), approximateSize: region.GetApproximateSize(), @@ -135,7 +135,7 @@ func NewBuilder(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, o // placement rules var rules []*placement.Rule - if err == nil && !b.skipPlacementRulesCheck && b.GetOpts().IsPlacementRulesEnabled() { + if err == nil && !b.skipPlacementRulesCheck && b.GetSharedConfig().IsPlacementRulesEnabled() { fit := b.GetRuleManager().FitRegion(b.GetBasicCluster(), region) for _, rf := range fit.RuleFits { rules = append(rules, rf.Rule) @@ -151,14 +151,14 @@ func NewBuilder(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, o } // build flags - supportConfChangeV2 := versioninfo.IsFeatureSupported(b.GetOpts().GetClusterVersion(), versioninfo.ConfChangeV2) + supportConfChangeV2 := versioninfo.IsFeatureSupported(b.GetSharedConfig().GetClusterVersion(), versioninfo.ConfChangeV2) b.rules = rules b.originPeers = originPeers b.unhealthyPeers = unhealthyPeers b.originLeaderStoreID = originLeaderStoreID b.targetPeers = originPeers.Copy() - b.useJointConsensus = supportConfChangeV2 && b.GetOpts().IsUseJointConsensus() + b.useJointConsensus = supportConfChangeV2 && b.GetSharedConfig().IsUseJointConsensus() b.err = err return b } @@ -790,7 +790,7 @@ func (b *Builder) execRemovePeer(peer *metapb.Peer) { var isDownStore bool store := b.GetBasicCluster().GetStore(removeStoreID) if store != nil { - isDownStore = store.DownTime() > b.GetOpts().GetMaxStoreDownTime() + isDownStore = store.DownTime() > b.GetSharedConfig().GetMaxStoreDownTime() } b.steps = append(b.steps, RemovePeer{FromStore: removeStoreID, PeerID: peer.GetId(), IsDownStore: isDownStore}) delete(b.currentPeers, removeStoreID) @@ -906,7 +906,7 @@ func (b *Builder) allowLeader(peer *metapb.Peer, ignoreClusterLimit bool) bool { stateFilter := &filter.StoreStateFilter{ActionScope: "operator-builder", TransferLeader: true} // store state filter - if !stateFilter.Target(b.GetOpts(), store).IsOK() { + if !stateFilter.Target(b.GetSharedConfig(), store).IsOK() { return false } @@ -1177,7 +1177,7 @@ func (b *Builder) labelMatch(x, y uint64) int { if sx == nil || sy == nil { return 0 } - labels := b.GetOpts().GetLocationLabels() + labels := b.GetSharedConfig().GetLocationLabels() for i, l := range labels { if sx.GetLabelValue(l) != sy.GetLabelValue(l) { return i diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 260547b4d2a0..f6de48691fc7 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -31,35 +31,35 @@ import ( ) // CreateAddPeerOperator creates an operator that adds a new peer. -func CreateAddPeerOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, peer *metapb.Peer, kind OpKind) (*Operator, error) { +func CreateAddPeerOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, peer *metapb.Peer, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region). AddPeer(peer). Build(kind) } // CreateDemoteVoterOperator creates an operator that demotes a voter -func CreateDemoteVoterOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateDemoteVoterOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). DemoteVoter(peer.GetStoreId()). Build(0) } // CreatePromoteLearnerOperator creates an operator that promotes a learner. -func CreatePromoteLearnerOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreatePromoteLearnerOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). PromoteLearner(peer.GetStoreId()). Build(0) } // CreateRemovePeerOperator creates an operator that removes a peer from region. -func CreateRemovePeerOperator(desc string, ci sche.ScheduleCluster, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { +func CreateRemovePeerOperator(desc string, ci sche.SharedCluster, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(storeID). Build(kind) } // CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. -func CreateTransferLeaderOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { +func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck). SetLeader(targetStoreID). SetLeaders(targetStoreIDs). @@ -67,7 +67,7 @@ func CreateTransferLeaderOperator(desc string, ci sche.ScheduleCluster, region * } // CreateForceTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store forcible. -func CreateForceTransferLeaderOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { +func CreateForceTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck, SkipPlacementRulesCheck). SetLeader(targetStoreID). EnableForceTargetLeader(). @@ -75,7 +75,7 @@ func CreateForceTransferLeaderOperator(desc string, ci sche.ScheduleCluster, reg } // CreateMoveRegionOperator creates an operator that moves a region to specified stores. -func CreateMoveRegionOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, kind OpKind, roles map[uint64]placement.PeerRoleType) (*Operator, error) { +func CreateMoveRegionOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, kind OpKind, roles map[uint64]placement.PeerRoleType) (*Operator, error) { // construct the peers from roles oldPeers := region.GetPeers() peers := make(map[uint64]*metapb.Peer) @@ -97,7 +97,7 @@ func CreateMoveRegionOperator(desc string, ci sche.ScheduleCluster, region *core } // CreateMovePeerOperator creates an operator that replaces an old peer with a new peer. -func CreateMovePeerOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { +func CreateMovePeerOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -105,7 +105,7 @@ func CreateMovePeerOperator(desc string, ci sche.ScheduleCluster, region *core.R } // CreateMoveWitnessOperator creates an operator that replaces an old witness with a new witness. -func CreateMoveWitnessOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64) (*Operator, error) { +func CreateMoveWitnessOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeNonWitness(sourceStoreID). BecomeWitness(targetStoreID). @@ -113,7 +113,7 @@ func CreateMoveWitnessOperator(desc string, ci sche.ScheduleCluster, region *cor } // CreateReplaceLeaderPeerOperator creates an operator that replaces an old peer with a new peer, and move leader from old store firstly. -func CreateReplaceLeaderPeerOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer, leader *metapb.Peer) (*Operator, error) { +func CreateReplaceLeaderPeerOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer, leader *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -122,7 +122,7 @@ func CreateReplaceLeaderPeerOperator(desc string, ci sche.ScheduleCluster, regio } // CreateMoveLeaderOperator creates an operator that replaces an old leader with a new leader. -func CreateMoveLeaderOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { +func CreateMoveLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -157,7 +157,7 @@ func CreateSplitRegionOperator(desc string, region *core.RegionInfo, kind OpKind } // CreateMergeRegionOperator creates an operator that merge two region into one. -func CreateMergeRegionOperator(desc string, ci sche.ScheduleCluster, source *core.RegionInfo, target *core.RegionInfo, kind OpKind) ([]*Operator, error) { +func CreateMergeRegionOperator(desc string, ci sche.SharedCluster, source *core.RegionInfo, target *core.RegionInfo, kind OpKind) ([]*Operator, error) { if core.IsInJointState(source.GetPeers()...) || core.IsInJointState(target.GetPeers()...) { return nil, errors.Errorf("cannot merge regions which are in joint state") } @@ -215,7 +215,7 @@ func isRegionMatch(a, b *core.RegionInfo) bool { } // CreateScatterRegionOperator creates an operator that scatters the specified region. -func CreateScatterRegionOperator(desc string, ci sche.ScheduleCluster, origin *core.RegionInfo, targetPeers map[uint64]*metapb.Peer, targetLeader uint64) (*Operator, error) { +func CreateScatterRegionOperator(desc string, ci sche.SharedCluster, origin *core.RegionInfo, targetPeers map[uint64]*metapb.Peer, targetLeader uint64) (*Operator, error) { // randomly pick a leader. var ids []uint64 for id, peer := range targetPeers { @@ -243,7 +243,7 @@ func CreateScatterRegionOperator(desc string, ci sche.ScheduleCluster, origin *c const OpDescLeaveJointState = "leave-joint-state" // CreateLeaveJointStateOperator creates an operator that let region leave joint state. -func CreateLeaveJointStateOperator(desc string, ci sche.ScheduleCluster, origin *core.RegionInfo) (*Operator, error) { +func CreateLeaveJointStateOperator(desc string, ci sche.SharedCluster, origin *core.RegionInfo) (*Operator, error) { b := NewBuilder(desc, ci, origin, SkipOriginJointStateCheck, SkipPlacementRulesCheck) if b.err == nil && !core.IsInJointState(origin.GetPeers()...) { @@ -303,14 +303,14 @@ func CreateLeaveJointStateOperator(desc string, ci sche.ScheduleCluster, origin } // CreateWitnessPeerOperator creates an operator that set a follower or learner peer with witness -func CreateWitnessPeerOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateWitnessPeerOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeWitness(peer.GetStoreId()). Build(OpWitness) } // CreateNonWitnessPeerOperator creates an operator that set a peer with non-witness -func CreateNonWitnessPeerOperator(desc string, ci sche.ScheduleCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateNonWitnessPeerOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeNonWitness(peer.GetStoreId()). Build(OpWitness) diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index c82b2228c9dd..1b09a9939f23 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -56,7 +56,7 @@ var ( type Controller struct { syncutil.RWMutex ctx context.Context - config config.Config + config config.SharedConfig cluster *core.BasicCluster operators map[uint64]*Operator hbStreams *hbstream.HeartbeatStreams @@ -69,7 +69,7 @@ type Controller struct { } // NewController creates a Controller. -func NewController(ctx context.Context, cluster *core.BasicCluster, config config.Config, hbStreams *hbstream.HeartbeatStreams) *Controller { +func NewController(ctx context.Context, cluster *core.BasicCluster, config config.SharedConfig, hbStreams *hbstream.HeartbeatStreams) *Controller { return &Controller{ ctx: ctx, cluster: cluster, diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index 112e5a11f9c7..6d5f835ca68f 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -59,7 +59,7 @@ func (suite *operatorControllerTestSuite) TestCacheInfluence() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) bc := tc.GetBasicCluster() - oc := NewController(suite.ctx, bc, tc.GetOpts(), nil) + oc := NewController(suite.ctx, bc, tc.GetSharedConfig(), nil) tc.AddLeaderStore(2, 1) region := tc.AddLeaderRegion(1, 1, 2) @@ -91,7 +91,7 @@ func (suite *operatorControllerTestSuite) TestCacheInfluence() { func (suite *operatorControllerTestSuite) TestGetOpInfluence() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), nil) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), nil) tc.AddLeaderStore(2, 1) tc.AddLeaderRegion(1, 1, 2) tc.AddLeaderRegion(2, 1, 2) @@ -134,7 +134,7 @@ func (suite *operatorControllerTestSuite) TestOperatorStatus() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 0) tc.AddLeaderRegion(1, 1, 2) @@ -169,7 +169,7 @@ func (suite *operatorControllerTestSuite) TestFastFailOperator() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 0) tc.AddLeaderStore(3, 0) @@ -203,7 +203,7 @@ func (suite *operatorControllerTestSuite) TestFastFailWithUnhealthyStore() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 0) tc.AddLeaderStore(3, 0) @@ -223,7 +223,7 @@ func (suite *operatorControllerTestSuite) TestCheckAddUnexpectedStatus() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 0) tc.AddLeaderStore(2, 1) tc.AddLeaderRegion(1, 2, 1) @@ -288,7 +288,7 @@ func (suite *operatorControllerTestSuite) TestConcurrentRemoveOperator() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 0) tc.AddLeaderStore(2, 1) tc.AddLeaderRegion(1, 2, 1) @@ -329,7 +329,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegion() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 1) tc.AddLeaderRegion(1, 1, 2) @@ -402,7 +402,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 0) tc.UpdateLeaderCount(1, 1000) tc.AddLeaderStore(2, 0) @@ -469,7 +469,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { func (suite *operatorControllerTestSuite) TestDispatchOutdatedRegion() { cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) - controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetOpts(), stream) + controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) cluster.AddLeaderStore(1, 2) cluster.AddLeaderStore(2, 0) @@ -519,7 +519,7 @@ func (suite *operatorControllerTestSuite) TestDispatchOutdatedRegion() { func (suite *operatorControllerTestSuite) TestCalcInfluence() { cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) - controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetOpts(), stream) + controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) epoch := &metapb.RegionEpoch{ConfVer: 0, Version: 0} region := cluster.MockRegionInfo(1, 1, []uint64{2}, []uint64{}, epoch) @@ -596,7 +596,7 @@ func (suite *operatorControllerTestSuite) TestCalcInfluence() { func (suite *operatorControllerTestSuite) TestDispatchUnfinishedStep() { cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) - controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetOpts(), stream) + controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) // Create a new region with epoch(0, 0) // the region has two peers with its peer id allocated incrementally. @@ -733,7 +733,7 @@ func (suite *operatorControllerTestSuite) TestAddWaitingOperator() { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(suite.ctx, opts) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) - controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetOpts(), stream) + controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3"}) @@ -802,7 +802,7 @@ func (suite *operatorControllerTestSuite) TestInvalidStoreId() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) - oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // If PD and store 3 are gone, PD will not have info of store 3 after recreating it. tc.AddRegionStore(1, 1) tc.AddRegionStore(2, 1) diff --git a/pkg/schedule/operator/step.go b/pkg/schedule/operator/step.go index 97f3fc104ce4..10a21d8c86b1 100644 --- a/pkg/schedule/operator/step.go +++ b/pkg/schedule/operator/step.go @@ -54,7 +54,7 @@ type OpStep interface { fmt.Stringer ConfVerChanged(region *core.RegionInfo) uint64 IsFinish(region *core.RegionInfo) bool - CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error + CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error Influence(opInfluence OpInfluence, region *core.RegionInfo) Timeout(regionSize int64) time.Duration GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *pdpb.RegionHeartbeatResponse @@ -88,7 +88,7 @@ func (tl TransferLeader) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (tl TransferLeader) CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (tl TransferLeader) CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { errList := make([]error, 0, len(tl.ToStores)+1) for _, storeID := range append(tl.ToStores, tl.ToStore) { peer := region.GetStorePeer(tl.ToStore) @@ -193,7 +193,7 @@ func (ap AddPeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) { } // CheckInProgress checks if the step is in the progress of advancing. -func (ap AddPeer) CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (ap AddPeer) CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { if err := validateStore(ci, config, ap.ToStore); err != nil { return err } @@ -247,7 +247,7 @@ func (bw BecomeWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bw BecomeWitness) CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (bw BecomeWitness) CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { if err := validateStore(ci, config, bw.StoreID); err != nil { return err } @@ -309,7 +309,7 @@ func (bn BecomeNonWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bn BecomeNonWitness) CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (bn BecomeNonWitness) CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { if err := validateStore(ci, config, bn.StoreID); err != nil { return err } @@ -395,7 +395,7 @@ func (bsw BatchSwitchWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bsw BatchSwitchWitness) CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (bsw BatchSwitchWitness) CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { for _, w := range bsw.ToWitnesses { if err := w.CheckInProgress(ci, config, region); err != nil { return err @@ -478,7 +478,7 @@ func (al AddLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (al AddLearner) CheckInProgress(ci *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (al AddLearner) CheckInProgress(ci *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { if err := validateStore(ci, config, al.ToStore); err != nil { return err } @@ -564,7 +564,7 @@ func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { peer := region.GetStorePeer(pl.ToStore) if peer.GetId() != pl.PeerID { return errors.New("peer does not exist") @@ -615,7 +615,7 @@ func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (rp RemovePeer) CheckInProgress(_ *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (rp RemovePeer) CheckInProgress(_ *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { if rp.FromStore == region.GetLeader().GetStoreId() { return errors.New("cannot remove leader peer") } @@ -685,7 +685,7 @@ func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (mr MergeRegion) CheckInProgress(_ *core.BasicCluster, config config.Config, _ *core.RegionInfo) error { +func (mr MergeRegion) CheckInProgress(_ *core.BasicCluster, config config.SharedConfig, _ *core.RegionInfo) error { return nil } @@ -753,7 +753,7 @@ func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo } // CheckInProgress checks if the step is in the progress of advancing. -func (sr SplitRegion) CheckInProgress(_ *core.BasicCluster, config config.Config, _ *core.RegionInfo) error { +func (sr SplitRegion) CheckInProgress(_ *core.BasicCluster, config config.SharedConfig, _ *core.RegionInfo) error { return nil } @@ -878,7 +878,7 @@ func (cpe ChangePeerV2Enter) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { inJointState, notInJointState := false, false for _, pl := range cpe.PromoteLearners { peer := region.GetStorePeer(pl.ToStore) @@ -1007,7 +1007,7 @@ func (cpl ChangePeerV2Leave) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, config config.Config, region *core.RegionInfo) error { +func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, config config.SharedConfig, region *core.RegionInfo) error { inJointState, notInJointState, demoteLeader := false, false, false leaderStoreID := region.GetLeader().GetStoreId() @@ -1085,7 +1085,7 @@ func (cpl ChangePeerV2Leave) GetCmd(region *core.RegionInfo, useConfChangeV2 boo } } -func validateStore(ci *core.BasicCluster, config config.Config, id uint64) error { +func validateStore(ci *core.BasicCluster, config config.SharedConfig, id uint64) error { store := ci.GetStore(id) if store == nil { return errors.New("target store does not exist") diff --git a/pkg/schedule/operator/step_test.go b/pkg/schedule/operator/step_test.go index 41c18384da45..4865180a8bbf 100644 --- a/pkg/schedule/operator/step_test.go +++ b/pkg/schedule/operator/step_test.go @@ -566,7 +566,7 @@ func (suite *operatorStepTestSuite) check(step OpStep, desc string, testCases [] region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.Peers}, testCase.Peers[0]) suite.Equal(testCase.ConfVerChanged, step.ConfVerChanged(region)) suite.Equal(testCase.IsFinish, step.IsFinish(region)) - err := step.CheckInProgress(suite.cluster.GetBasicCluster(), suite.cluster.GetOpts(), region) + err := step.CheckInProgress(suite.cluster.GetBasicCluster(), suite.cluster.GetSharedConfig(), region) testCase.CheckInProgress(err) _ = step.GetCmd(region, true) diff --git a/pkg/schedule/placement/rule_manager.go b/pkg/schedule/placement/rule_manager.go index d3f6bda066be..ae6508f66e85 100644 --- a/pkg/schedule/placement/rule_manager.go +++ b/pkg/schedule/placement/rule_manager.go @@ -50,11 +50,11 @@ type RuleManager struct { keyType string storeSetInformer core.StoreSetInformer cache *RegionRuleFitCacheManager - conf config.Config + conf config.SharedConfig } // NewRuleManager creates a RuleManager instance. -func NewRuleManager(storage endpoint.RuleStorage, storeSetInformer core.StoreSetInformer, conf config.Config) *RuleManager { +func NewRuleManager(storage endpoint.RuleStorage, storeSetInformer core.StoreSetInformer, conf config.SharedConfig) *RuleManager { return &RuleManager{ storage: storage, storeSetInformer: storeSetInformer, diff --git a/pkg/schedule/scatter/region_scatterer.go b/pkg/schedule/scatter/region_scatterer.go index de90228f7f6a..380ee4104d62 100644 --- a/pkg/schedule/scatter/region_scatterer.go +++ b/pkg/schedule/scatter/region_scatterer.go @@ -138,7 +138,7 @@ func (s *selectedStores) getDistributionByGroupLocked(group string) (map[uint64] type RegionScatterer struct { ctx context.Context name string - cluster sche.ClusterInformer + cluster sche.ScatterCluster ordinaryEngine engineContext specialEngines sync.Map opController *operator.Controller @@ -146,7 +146,7 @@ type RegionScatterer struct { // NewRegionScatterer creates a region scatterer. // RegionScatter is used for the `Lightning`, it will scatter the specified regions before import data. -func NewRegionScatterer(ctx context.Context, cluster sche.ClusterInformer, opController *operator.Controller) *RegionScatterer { +func NewRegionScatterer(ctx context.Context, cluster sche.ScatterCluster, opController *operator.Controller) *RegionScatterer { return &RegionScatterer{ ctx: ctx, name: regionScatterName, @@ -315,7 +315,7 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string) * if store == nil { return nil } - if engineFilter.Target(r.cluster.GetOpts(), store).IsOK() { + if engineFilter.Target(r.cluster.GetSharedConfig(), store).IsOK() { ordinaryPeers[peer.GetStoreId()] = peer } else { engine := store.GetLabelValue(core.EngineKey) @@ -439,7 +439,7 @@ func (r *RegionScatterer) selectCandidates(region *core.RegionInfo, oldFit *plac filters := []filter.Filter{ filter.NewExcludedFilter(r.name, nil, selectedStores), } - scoreGuard := filter.NewPlacementSafeguard(r.name, r.cluster.GetOpts(), r.cluster.GetBasicCluster(), r.cluster.GetRuleManager(), region, sourceStore, oldFit) + scoreGuard := filter.NewPlacementSafeguard(r.name, r.cluster.GetSharedConfig(), r.cluster.GetBasicCluster(), r.cluster.GetRuleManager(), region, sourceStore, oldFit) for _, filterFunc := range context.filterFuncs { filters = append(filters, filterFunc()) } @@ -463,7 +463,7 @@ func (r *RegionScatterer) selectCandidates(region *core.RegionInfo, oldFit *plac // If the storeCount are all the same for the whole cluster(maxStoreTotalCount == minStoreTotalCount), any store // could be selected as candidate. if storeCount < maxStoreTotalCount || maxStoreTotalCount == minStoreTotalCount { - if filter.Target(r.cluster.GetOpts(), store, filters) { + if filter.Target(r.cluster.GetSharedConfig(), store, filters) { candidates = append(candidates, store.GetID()) } } @@ -533,7 +533,7 @@ func (r *RegionScatterer) Put(peers map[uint64]*metapb.Peer, leaderStoreID uint6 if store == nil { continue } - if engineFilter.Target(r.cluster.GetOpts(), store).IsOK() { + if engineFilter.Target(r.cluster.GetSharedConfig(), store).IsOK() { r.ordinaryEngine.selectedPeer.Put(storeID, group) scatterDistributionCounter.WithLabelValues( fmt.Sprintf("%v", storeID), diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index 519630f4276a..f8d3ec1ccd3a 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -91,7 +91,7 @@ func scatter(re *require.Assertions, numStores, numRegions uint64, useRules bool opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) // Add ordinary stores. @@ -148,7 +148,7 @@ func scatterSpecial(re *require.Assertions, numOrdinaryStores, numSpecialStores, opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) // Add ordinary stores. @@ -226,7 +226,7 @@ func TestStoreLimit(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add stores 1~6. for i := uint64(1); i <= 5; i++ { @@ -258,7 +258,7 @@ func TestScatterCheck(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 stores. for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) @@ -307,7 +307,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 connected stores. for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) @@ -352,7 +352,7 @@ func TestScatterGroupInConcurrency(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 stores. for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) @@ -424,7 +424,7 @@ func TestScatterForManyRegion(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 60 stores. for i := uint64(1); i <= 60; i++ { tc.AddRegionStore(i, 0) @@ -452,7 +452,7 @@ func TestScattersGroup(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 stores. for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) @@ -541,7 +541,7 @@ func TestRegionFromDifferentGroups(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 6 stores. storeCount := 6 for i := uint64(1); i <= uint64(storeCount); i++ { @@ -577,7 +577,7 @@ func TestRegionHasLearner(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 8 stores. voterCount := uint64(6) storeCount := uint64(8) @@ -665,7 +665,7 @@ func TestSelectedStoresTooFewPeers(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 4 stores. for i := uint64(1); i <= 4; i++ { tc.AddRegionStore(i, 0) @@ -702,7 +702,7 @@ func TestSelectedStoresTooManyPeers(t *testing.T) { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 4 stores. for i := uint64(1); i <= 5; i++ { tc.AddRegionStore(i, 0) @@ -740,7 +740,7 @@ func TestBalanceRegion(t *testing.T) { opt.SetLocationLabels([]string{"host"}) tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 6 stores in 3 hosts. for i := uint64(2); i <= 7; i++ { tc.AddLabelsStore(i, 0, map[string]string{"host": strconv.FormatUint(i/2, 10)}) diff --git a/pkg/schedule/schedulers/balance_benchmark_test.go b/pkg/schedule/schedulers/balance_benchmark_test.go index ace59e0caa96..694d5edb658f 100644 --- a/pkg/schedule/schedulers/balance_benchmark_test.go +++ b/pkg/schedule/schedulers/balance_benchmark_test.go @@ -46,7 +46,7 @@ func newBenchCluster(ruleEnable, labelEnable bool, tombstoneEnable bool) (contex ctx, cancel := context.WithCancel(context.Background()) opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), nil) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), nil) opt.GetScheduleConfig().TolerantSizeRatio = float64(storeCount) opt.SetPlacementRuleEnabled(ruleEnable) @@ -95,7 +95,7 @@ func newBenchBigCluster(storeNumInOneRack, regionNum int) (context.CancelFunc, * ctx, cancel := context.WithCancel(context.Background()) opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), nil) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), nil) opt.GetScheduleConfig().TolerantSizeRatio = float64(storeCount) opt.SetPlacementRuleEnabled(true) diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 3c65ddbcee81..c53f147c65d3 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -224,8 +224,8 @@ func (l *balanceLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(l.conf) } -func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := l.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := l.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() } @@ -324,7 +324,7 @@ func (cs *candidateStores) resortStoreWithPos(pos int) { } } -func (l *balanceLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *balanceLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { l.conf.mu.RLock() defer l.conf.mu.RUnlock() basePlan := plan.NewBalanceSchedulerPlan() @@ -335,7 +335,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun b batch := l.conf.Batch balanceLeaderScheduleCounter.Inc() - leaderSchedulePolicy := cluster.GetOpts().GetLeaderSchedulePolicy() + leaderSchedulePolicy := cluster.GetSchedulerConfig().GetLeaderSchedulePolicy() opInfluence := l.OpController.GetOpInfluence(cluster.GetBasicCluster()) kind := constant.NewScheduleKind(constant.LeaderKind, leaderSchedulePolicy) solver := newSolver(basePlan, kind, cluster, opInfluence) @@ -344,8 +344,8 @@ func (l *balanceLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun b scoreFunc := func(store *core.StoreInfo) float64 { return store.LeaderScore(solver.kind.Policy, solver.GetOpInfluence(store.GetID())) } - sourceCandidate := newCandidateStores(filter.SelectSourceStores(stores, l.filters, cluster.GetOpts(), collector, l.filterCounter), false, scoreFunc) - targetCandidate := newCandidateStores(filter.SelectTargetStores(stores, l.filters, cluster.GetOpts(), nil, l.filterCounter), true, scoreFunc) + sourceCandidate := newCandidateStores(filter.SelectSourceStores(stores, l.filters, cluster.GetSchedulerConfig(), collector, l.filterCounter), false, scoreFunc) + targetCandidate := newCandidateStores(filter.SelectTargetStores(stores, l.filters, cluster.GetSchedulerConfig(), nil, l.filterCounter), true, scoreFunc) usedRegions := make(map[uint64]struct{}) result := make([]*operator.Operator, 0, batch) @@ -419,7 +419,7 @@ func makeInfluence(op *operator.Operator, plan *solver, usedRegions map[uint64]s storesIDs := candidate.binarySearchStores(plan.Source, plan.Target) candidateUpdateStores[id] = storesIDs } - operator.AddOpInfluence(op, plan.opInfluence, plan.ScheduleCluster.GetBasicCluster()) + operator.AddOpInfluence(op, plan.opInfluence, plan.SchedulerCluster.GetBasicCluster()) for id, candidate := range candidates { for _, pos := range candidateUpdateStores[id] { candidate.resortStoreWithPos(pos) @@ -450,7 +450,7 @@ func (l *balanceLeaderScheduler) transferLeaderOut(solver *solver, collector *pl defer func() { solver.Step-- }() targets := solver.GetFollowerStores(solver.Region) finalFilters := l.filters - conf := solver.GetOpts() + conf := solver.GetSchedulerConfig() if leaderFilter := filter.NewPlacementLeaderSafeguard(l.GetName(), conf, solver.GetBasicCluster(), solver.GetRuleManager(), solver.Region, solver.Source, false /*allowMoveLeader*/); leaderFilter != nil { finalFilters = append(l.filters, leaderFilter) } @@ -499,7 +499,7 @@ func (l *balanceLeaderScheduler) transferLeaderIn(solver *solver, collector *pla return nil } finalFilters := l.filters - conf := solver.GetOpts() + conf := solver.GetSchedulerConfig() if leaderFilter := filter.NewPlacementLeaderSafeguard(l.GetName(), conf, solver.GetBasicCluster(), solver.GetRuleManager(), solver.Region, solver.Source, false /*allowMoveLeader*/); leaderFilter != nil { finalFilters = append(l.filters, leaderFilter) } diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 86705045e480..d27d18b086c5 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -113,15 +113,15 @@ func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() +func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() } return allowed } -func (s *balanceRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { basePlan := plan.NewBalanceSchedulerPlan() var collector *plan.Collector if dryRun { @@ -129,10 +129,10 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun b } balanceRegionScheduleCounter.Inc() stores := cluster.GetStores() - opts := cluster.GetOpts() + conf := cluster.GetSchedulerConfig() snapshotFilter := filter.NewSnapshotSendFilter(stores, constant.Medium) - faultTargets := filter.SelectUnavailableTargetStores(stores, s.filters, opts, collector, s.filterCounter) - sourceStores := filter.SelectSourceStores(stores, s.filters, opts, collector, s.filterCounter) + faultTargets := filter.SelectUnavailableTargetStores(stores, s.filters, conf, collector, s.filterCounter) + sourceStores := filter.SelectSourceStores(stores, s.filters, conf, collector, s.filterCounter) opInfluence := s.OpController.GetOpInfluence(cluster.GetBasicCluster()) s.OpController.GetFastOpInfluence(cluster.GetBasicCluster(), opInfluence) kind := constant.NewScheduleKind(constant.RegionKind, constant.BySize) @@ -141,8 +141,8 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun b sort.Slice(sourceStores, func(i, j int) bool { iOp := solver.GetOpInfluence(sourceStores[i].GetID()) jOp := solver.GetOpInfluence(sourceStores[j].GetID()) - return sourceStores[i].RegionScore(opts.GetRegionScoreFormulaVersion(), opts.GetHighSpaceRatio(), opts.GetLowSpaceRatio(), iOp) > - sourceStores[j].RegionScore(opts.GetRegionScoreFormulaVersion(), opts.GetHighSpaceRatio(), opts.GetLowSpaceRatio(), jOp) + return sourceStores[i].RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), iOp) > + sourceStores[j].RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), jOp) }) pendingFilter := filter.NewRegionPendingFilter() @@ -239,12 +239,13 @@ func (s *balanceRegionScheduler) transferPeer(solver *solver, collector *plan.Co } // the order of the filters should be sorted by the cost of the cpu overhead. // the more expensive the filter is, the later it should be placed. + conf := solver.GetSchedulerConfig() filters := []filter.Filter{ filter.NewExcludedFilter(s.GetName(), nil, excludeTargets), - filter.NewPlacementSafeguard(s.GetName(), solver.GetOpts(), solver.GetBasicCluster(), solver.GetRuleManager(), + filter.NewPlacementSafeguard(s.GetName(), conf, solver.GetBasicCluster(), solver.GetRuleManager(), solver.Region, solver.Source, solver.fit), } - candidates := filter.NewCandidates(dstStores).FilterTarget(solver.GetOpts(), collector, s.filterCounter, filters...) + candidates := filter.NewCandidates(dstStores).FilterTarget(conf, collector, s.filterCounter, filters...) if len(candidates.Stores) != 0 { solver.Step++ } diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index fd07cb1d04b5..4530217f00b7 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -229,7 +229,7 @@ type balanceLeaderSchedulerTestSuite struct { tc *mockcluster.Cluster lb Scheduler oc *operator.Controller - conf config.Config + conf config.SchedulerConfig } func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 6e575c0f932a..556743563139 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -209,15 +209,15 @@ func (b *balanceWitnessScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(b.conf) } -func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := b.OpController.OperatorCount(operator.OpWitness) < cluster.GetOpts().GetWitnessScheduleLimit() +func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := b.OpController.OperatorCount(operator.OpWitness) < cluster.GetSchedulerConfig().GetWitnessScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(b.GetType(), operator.OpWitness.String()).Inc() } return allowed } -func (b *balanceWitnessScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (b *balanceWitnessScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { b.conf.mu.RLock() defer b.conf.mu.RUnlock() basePlan := plan.NewBalanceSchedulerPlan() @@ -236,7 +236,7 @@ func (b *balanceWitnessScheduler) Schedule(cluster sche.ScheduleCluster, dryRun scoreFunc := func(store *core.StoreInfo) float64 { return store.WitnessScore(solver.GetOpInfluence(store.GetID())) } - sourceCandidate := newCandidateStores(filter.SelectSourceStores(stores, b.filters, cluster.GetOpts(), collector, b.filterCounter), false, scoreFunc) + sourceCandidate := newCandidateStores(filter.SelectSourceStores(stores, b.filters, cluster.GetSchedulerConfig(), collector, b.filterCounter), false, scoreFunc) usedRegions := make(map[uint64]struct{}) result := make([]*operator.Operator, 0, batch) @@ -296,11 +296,11 @@ func (b *balanceWitnessScheduler) transferWitnessOut(solver *solver, collector * defer func() { solver.Step-- }() targets := solver.GetNonWitnessVoterStores(solver.Region) finalFilters := b.filters - opts := solver.GetOpts() - if witnessFilter := filter.NewPlacementWitnessSafeguard(b.GetName(), opts, solver.GetBasicCluster(), solver.GetRuleManager(), solver.Region, solver.Source, solver.fit); witnessFilter != nil { + conf := solver.GetSchedulerConfig() + if witnessFilter := filter.NewPlacementWitnessSafeguard(b.GetName(), conf, solver.GetBasicCluster(), solver.GetRuleManager(), solver.Region, solver.Source, solver.fit); witnessFilter != nil { finalFilters = append(b.filters, witnessFilter) } - targets = filter.SelectTargetStores(targets, finalFilters, opts, collector, b.filterCounter) + targets = filter.SelectTargetStores(targets, finalFilters, conf, collector, b.filterCounter) sort.Slice(targets, func(i, j int) bool { iOp := solver.GetOpInfluence(targets[i].GetID()) jOp := solver.GetOpInfluence(targets[j].GetID()) diff --git a/pkg/schedule/schedulers/balance_witness_test.go b/pkg/schedule/schedulers/balance_witness_test.go index aa368b4a2d8b..6973010af202 100644 --- a/pkg/schedule/schedulers/balance_witness_test.go +++ b/pkg/schedule/schedulers/balance_witness_test.go @@ -36,7 +36,7 @@ type balanceWitnessSchedulerTestSuite struct { tc *mockcluster.Cluster lb Scheduler oc *operator.Controller - conf config.Config + conf config.SchedulerConfig } func (suite *balanceWitnessSchedulerTestSuite) SetupTest() { diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index f83043c8884d..8da0f13626bf 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -88,7 +88,7 @@ func (s *BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { } // Prepare does some prepare work -func (s *BaseScheduler) Prepare(cluster sche.ScheduleCluster) error { return nil } +func (s *BaseScheduler) Prepare(cluster sche.SchedulerCluster) error { return nil } // Cleanup does some cleanup work -func (s *BaseScheduler) Cleanup(cluster sche.ScheduleCluster) {} +func (s *BaseScheduler) Cleanup(cluster sche.SchedulerCluster) {} diff --git a/pkg/schedule/schedulers/diagnostic_recorder.go b/pkg/schedule/schedulers/diagnostic_recorder.go index 49c775c2b080..749853b8c267 100644 --- a/pkg/schedule/schedulers/diagnostic_recorder.go +++ b/pkg/schedule/schedulers/diagnostic_recorder.go @@ -54,13 +54,13 @@ var DiagnosableSummaryFunc = map[string]plan.Summary{ // DiagnosticRecorder is used to manage diagnostic for one scheduler. type DiagnosticRecorder struct { schedulerName string - config sc.Config + config sc.SchedulerConfig summaryFunc plan.Summary results *cache.FIFO } // NewDiagnosticRecorder creates a new DiagnosticRecorder. -func NewDiagnosticRecorder(name string, config sc.Config) *DiagnosticRecorder { +func NewDiagnosticRecorder(name string, config sc.SchedulerConfig) *DiagnosticRecorder { summaryFunc, ok := DiagnosableSummaryFunc[name] if !ok { return nil diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index df45d9af3af5..b307bf5fb73f 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -204,7 +204,7 @@ func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *evictLeaderScheduler) Prepare(cluster sche.ScheduleCluster) error { +func (s *evictLeaderScheduler) Prepare(cluster sche.SchedulerCluster) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -216,7 +216,7 @@ func (s *evictLeaderScheduler) Prepare(cluster sche.ScheduleCluster) error { return res } -func (s *evictLeaderScheduler) Cleanup(cluster sche.ScheduleCluster) { +func (s *evictLeaderScheduler) Cleanup(cluster sche.SchedulerCluster) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWithRanges { @@ -224,15 +224,15 @@ func (s *evictLeaderScheduler) Cleanup(cluster sche.ScheduleCluster) { } } -func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (s *evictLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize), nil } @@ -257,7 +257,7 @@ type evictLeaderStoresConf interface { getKeyRangesByID(id uint64) []core.KeyRange } -func scheduleEvictLeaderBatch(name, typ string, cluster sche.ScheduleCluster, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { +func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { var ops []*operator.Operator for i := 0; i < batchSize; i++ { once := scheduleEvictLeaderOnce(name, typ, cluster, conf) @@ -274,7 +274,7 @@ func scheduleEvictLeaderBatch(name, typ string, cluster sche.ScheduleCluster, co return ops } -func scheduleEvictLeaderOnce(name, typ string, cluster sche.ScheduleCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { stores := conf.getStores() ops := make([]*operator.Operator, 0, len(stores)) for _, storeID := range stores { @@ -306,7 +306,7 @@ func scheduleEvictLeaderOnce(name, typ string, cluster sche.ScheduleCluster, con filters = append(filters, &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) candidates := filter.NewCandidates(cluster.GetFollowerStores(region)). - FilterTarget(cluster.GetOpts(), nil, nil, filters...) + FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...) // Compatible with old TiKV transfer leader logic. target := candidates.RandomPick() targets := candidates.PickAll() diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index ebaa6e4a7bad..ead923783986 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -109,7 +109,7 @@ func (s *evictSlowStoreScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *evictSlowStoreScheduler) Prepare(cluster sche.ScheduleCluster) error { +func (s *evictSlowStoreScheduler) Prepare(cluster sche.SchedulerCluster) error { evictStore := s.conf.evictStore() if evictStore != 0 { return cluster.SlowStoreEvicted(evictStore) @@ -117,11 +117,11 @@ func (s *evictSlowStoreScheduler) Prepare(cluster sche.ScheduleCluster) error { return nil } -func (s *evictSlowStoreScheduler) Cleanup(cluster sche.ScheduleCluster) { +func (s *evictSlowStoreScheduler) Cleanup(cluster sche.SchedulerCluster) { s.cleanupEvictLeader(cluster) } -func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster sche.ScheduleCluster, storeID uint64) error { +func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster sche.SchedulerCluster, storeID uint64) error { err := s.conf.setStoreAndPersist(storeID) if err != nil { log.Info("evict-slow-store-scheduler persist config failed", zap.Uint64("store-id", storeID)) @@ -131,7 +131,7 @@ func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster sche.ScheduleCluste return cluster.SlowStoreEvicted(storeID) } -func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.ScheduleCluster) { +func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.SchedulerCluster) { evictSlowStore, err := s.conf.clearAndPersist() if err != nil { log.Info("evict-slow-store-scheduler persist config failed", zap.Uint64("store-id", evictSlowStore)) @@ -142,13 +142,13 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.ScheduleCluste cluster.SlowStoreRecovered(evictSlowStore) } -func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.ScheduleCluster) []*operator.Operator { +func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator { return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) } -func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if s.conf.evictStore() != 0 { - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } @@ -157,7 +157,7 @@ func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster return true } -func (s *evictSlowStoreScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { evictSlowStoreCounter.Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index 52d0d38012bf..067f9e517f2c 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -107,7 +107,7 @@ func (conf *evictSlowTrendSchedulerConfig) setStoreAndPersist(id uint64) error { return conf.Persist() } -func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster sche.ScheduleCluster) (oldID uint64, err error) { +func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster sche.SchedulerCluster) (oldID uint64, err error) { oldID = conf.evictedStore() if oldID == 0 { return @@ -139,7 +139,7 @@ func (s *evictSlowTrendScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *evictSlowTrendScheduler) Prepare(cluster sche.ScheduleCluster) error { +func (s *evictSlowTrendScheduler) Prepare(cluster sche.SchedulerCluster) error { evictedStoreID := s.conf.evictedStore() if evictedStoreID == 0 { return nil @@ -147,11 +147,11 @@ func (s *evictSlowTrendScheduler) Prepare(cluster sche.ScheduleCluster) error { return cluster.SlowTrendEvicted(evictedStoreID) } -func (s *evictSlowTrendScheduler) Cleanup(cluster sche.ScheduleCluster) { +func (s *evictSlowTrendScheduler) Cleanup(cluster sche.SchedulerCluster) { s.cleanupEvictLeader(cluster) } -func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster sche.ScheduleCluster, storeID uint64) error { +func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster sche.SchedulerCluster, storeID uint64) error { err := s.conf.setStoreAndPersist(storeID) if err != nil { log.Info("evict-slow-trend-scheduler persist config failed", zap.Uint64("store-id", storeID)) @@ -160,7 +160,7 @@ func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster sche.ScheduleCluste return cluster.SlowTrendEvicted(storeID) } -func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster sche.ScheduleCluster) { +func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster sche.SchedulerCluster) { evictedStoreID, err := s.conf.clearAndPersist(cluster) if err != nil { log.Info("evict-slow-trend-scheduler persist config failed", zap.Uint64("store-id", evictedStoreID)) @@ -170,7 +170,7 @@ func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster sche.ScheduleCluste } } -func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.ScheduleCluster) []*operator.Operator { +func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator { store := cluster.GetStore(s.conf.evictedStore()) if store == nil { return nil @@ -179,18 +179,18 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.ScheduleClust return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) } -func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if s.conf.evictedStore() == 0 { return true } - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (s *evictSlowTrendScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() var ops []*operator.Operator @@ -270,7 +270,7 @@ func newEvictSlowTrendScheduler(opController *operator.Controller, conf *evictSl } } -func chooseEvictCandidate(cluster sche.ScheduleCluster) (slowStore *core.StoreInfo) { +func chooseEvictCandidate(cluster sche.SchedulerCluster) (slowStore *core.StoreInfo) { stores := cluster.GetStores() if len(stores) < 3 { storeSlowTrendActionStatusGauge.WithLabelValues("cand.none:too-few").Inc() @@ -312,7 +312,7 @@ func chooseEvictCandidate(cluster sche.ScheduleCluster) (slowStore *core.StoreIn } store := candidates[0] - affectedStoreThreshold := int(float64(len(stores)) * cluster.GetOpts().GetSlowStoreEvictingAffectedStoreRatioThreshold()) + affectedStoreThreshold := int(float64(len(stores)) * cluster.GetSchedulerConfig().GetSlowStoreEvictingAffectedStoreRatioThreshold()) if affectedStoreCount < affectedStoreThreshold { log.Info("evict-slow-trend-scheduler failed to confirm candidate: it only affect a few stores", zap.Uint64("store-id", store.GetID())) storeSlowTrendActionStatusGauge.WithLabelValues("cand.none:affect-a-few").Inc() @@ -330,7 +330,7 @@ func chooseEvictCandidate(cluster sche.ScheduleCluster) (slowStore *core.StoreIn return store } -func checkStoresAreUpdated(cluster sche.ScheduleCluster, slowStoreID uint64, slowStoreRecordTS time.Time) bool { +func checkStoresAreUpdated(cluster sche.SchedulerCluster, slowStoreID uint64, slowStoreRecordTS time.Time) bool { stores := cluster.GetStores() if len(stores) <= 1 { return false @@ -359,7 +359,7 @@ func checkStoresAreUpdated(cluster sche.ScheduleCluster, slowStoreID uint64, slo return updatedStores >= expected } -func checkStoreSlowerThanOthers(cluster sche.ScheduleCluster, target *core.StoreInfo) bool { +func checkStoreSlowerThanOthers(cluster sche.SchedulerCluster, target *core.StoreInfo) bool { stores := cluster.GetStores() expected := (len(stores)*2 + 1) / 3 targetSlowTrend := target.GetSlowTrend() @@ -390,7 +390,7 @@ func checkStoreSlowerThanOthers(cluster sche.ScheduleCluster, target *core.Store return slowerThanStoresNum >= expected } -func checkStoreCanRecover(cluster sche.ScheduleCluster, target *core.StoreInfo) bool { +func checkStoreCanRecover(cluster sche.SchedulerCluster, target *core.StoreInfo) bool { /* // // This might not be necessary, @@ -413,7 +413,7 @@ func checkStoreCanRecover(cluster sche.ScheduleCluster, target *core.StoreInfo) return checkStoreFasterThanOthers(cluster, target) } -func checkStoreFasterThanOthers(cluster sche.ScheduleCluster, target *core.StoreInfo) bool { +func checkStoreFasterThanOthers(cluster sche.SchedulerCluster, target *core.StoreInfo) bool { stores := cluster.GetStores() expected := (len(stores) + 1) / 2 targetSlowTrend := target.GetSlowTrend() diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 18b6e7edd52f..d0c0cc9a1be0 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -152,9 +152,10 @@ func (s *grantHotRegionScheduler) EncodeConfig() ([]byte, error) { // IsScheduleAllowed returns whether the scheduler is allowed to schedule. // TODO it should check if there is any scheduler such as evict or hot region scheduler -func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() - leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + conf := cluster.GetSchedulerConfig() + regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < conf.GetRegionScheduleLimit() + leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < conf.GetLeaderScheduleLimit() if !regionAllowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() } @@ -226,14 +227,14 @@ func newGrantHotRegionHandler(config *grantHotRegionSchedulerConfig) http.Handle return router } -func (s *grantHotRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { grantHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) return s.dispatch(rw, cluster), nil } -func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster sche.ScheduleCluster) []*operator.Operator { +func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster sche.SchedulerCluster) []*operator.Operator { stLoadInfos := s.stLoadInfos[buildResourceType(typ, constant.RegionKind)] infos := make([]*statistics.StoreLoadDetail, len(stLoadInfos)) index := 0 @@ -247,7 +248,7 @@ func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster sche.S return s.randomSchedule(cluster, infos) } -func (s *grantHotRegionScheduler) randomSchedule(cluster sche.ScheduleCluster, srcStores []*statistics.StoreLoadDetail) (ops []*operator.Operator) { +func (s *grantHotRegionScheduler) randomSchedule(cluster sche.SchedulerCluster, srcStores []*statistics.StoreLoadDetail) (ops []*operator.Operator) { isLeader := s.r.Int()%2 == 1 for _, srcStore := range srcStores { srcStoreID := srcStore.GetID() @@ -278,7 +279,7 @@ func (s *grantHotRegionScheduler) randomSchedule(cluster sche.ScheduleCluster, s return nil } -func (s *grantHotRegionScheduler) transfer(cluster sche.ScheduleCluster, regionID uint64, srcStoreID uint64, isLeader bool) (op *operator.Operator, err error) { +func (s *grantHotRegionScheduler) transfer(cluster sche.SchedulerCluster, regionID uint64, srcStoreID uint64, isLeader bool) (op *operator.Operator, err error) { srcRegion := cluster.GetRegion(regionID) if srcRegion == nil || len(srcRegion.GetDownPeers()) != 0 || len(srcRegion.GetPendingPeers()) != 0 { return nil, errs.ErrRegionRuleNotFound @@ -289,7 +290,7 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.ScheduleCluster, regionI return nil, errs.ErrStoreNotFound } filters := []filter.Filter{ - filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore, nil), + filter.NewPlacementSafeguard(s.GetName(), cluster.GetSchedulerConfig(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore, nil), } destStoreIDs := make([]uint64, 0, len(s.conf.StoreIDs)) @@ -304,7 +305,7 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.ScheduleCluster, regionI } for _, storeID := range candidate { store := cluster.GetStore(storeID) - if !filter.Target(cluster.GetOpts(), store, filters) { + if !filter.Target(cluster.GetSchedulerConfig(), store, filters) { continue } destStoreIDs = append(destStoreIDs, storeID) diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 41918f96ed17..08856d101cc0 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -178,7 +178,7 @@ func (s *grantLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *grantLeaderScheduler) Prepare(cluster sche.ScheduleCluster) error { +func (s *grantLeaderScheduler) Prepare(cluster sche.SchedulerCluster) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -190,7 +190,7 @@ func (s *grantLeaderScheduler) Prepare(cluster sche.ScheduleCluster) error { return res } -func (s *grantLeaderScheduler) Cleanup(cluster sche.ScheduleCluster) { +func (s *grantLeaderScheduler) Cleanup(cluster sche.SchedulerCluster) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWithRanges { @@ -198,15 +198,15 @@ func (s *grantLeaderScheduler) Cleanup(cluster sche.ScheduleCluster) { } } -func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (s *grantLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { grantLeaderCounter.Inc() s.conf.mu.RLock() defer s.conf.mu.RUnlock() diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 7984aa15698e..92a7aac4dc34 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -120,11 +120,11 @@ func newBaseHotScheduler(opController *operator.Controller) *baseHotScheduler { // prepareForBalance calculate the summary of pending Influence for each store and prepare the load detail for // each store, only update read or write load detail -func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sche.ScheduleCluster) { +func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sche.SchedulerCluster) { h.stInfos = statistics.SummaryStoreInfos(cluster.GetStores()) h.summaryPendingInfluence() h.storesLoads = cluster.GetStoresLoads() - isTraceRegionFlow := cluster.GetOpts().IsTraceRegionFlow() + isTraceRegionFlow := cluster.GetSchedulerConfig().IsTraceRegionFlow() prepare := func(regionStats map[uint64][]*statistics.HotPeerStat, resource constant.ResourceKind) { ty := buildResourceType(rw, resource) @@ -268,21 +268,21 @@ func (h *hotScheduler) GetNextInterval(interval time.Duration) time.Duration { return intervalGrow(h.GetMinInterval(), maxHotScheduleInterval, exponentialGrowth) } -func (h *hotScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetOpts().GetHotRegionScheduleLimit() +func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetSchedulerConfig().GetHotRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc() } return allowed } -func (h *hotScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { hotSchedulerCounter.Inc() rw := h.randomRWType() return h.dispatch(rw, cluster), nil } -func (h *hotScheduler) dispatch(typ statistics.RWType, cluster sche.ScheduleCluster) []*operator.Operator { +func (h *hotScheduler) dispatch(typ statistics.RWType, cluster sche.SchedulerCluster) []*operator.Operator { h.Lock() defer h.Unlock() h.prepareForBalance(typ, cluster) @@ -316,7 +316,7 @@ func (h *hotScheduler) tryAddPendingInfluence(op *operator.Operator, srcStore [] return true } -func (h *hotScheduler) balanceHotReadRegions(cluster sche.ScheduleCluster) []*operator.Operator { +func (h *hotScheduler) balanceHotReadRegions(cluster sche.SchedulerCluster) []*operator.Operator { leaderSolver := newBalanceSolver(h, cluster, statistics.Read, transferLeader) leaderOps := leaderSolver.solve() peerSolver := newBalanceSolver(h, cluster, statistics.Read, movePeer) @@ -359,7 +359,7 @@ func (h *hotScheduler) balanceHotReadRegions(cluster sche.ScheduleCluster) []*op return nil } -func (h *hotScheduler) balanceHotWriteRegions(cluster sche.ScheduleCluster) []*operator.Operator { +func (h *hotScheduler) balanceHotWriteRegions(cluster sche.SchedulerCluster) []*operator.Operator { // prefer to balance by peer s := h.r.Intn(100) switch { @@ -448,7 +448,7 @@ func isAvailableV1(s *solution) bool { } type balanceSolver struct { - sche.ScheduleCluster + sche.SchedulerCluster sche *hotScheduler stLoadDetail map[uint64]*statistics.StoreLoadDetail rwTy statistics.RWType @@ -528,7 +528,7 @@ func (bs *balanceSolver) init() { bs.firstPriority, bs.secondPriority = prioritiesToDim(bs.getPriorities()) bs.greatDecRatio, bs.minorDecRatio = bs.sche.conf.GetGreatDecRatio(), bs.sche.conf.GetMinorDecRatio() bs.maxPeerNum = bs.sche.conf.GetMaxPeerNumber() - bs.minHotDegree = bs.GetOpts().GetHotRegionCacheHitsThreshold() + bs.minHotDegree = bs.GetSchedulerConfig().GetHotRegionCacheHitsThreshold() bs.isRaftKV2 = bs.GetStoreConfig().IsRaftKV2() switch bs.sche.conf.GetRankFormulaVersion() { @@ -569,7 +569,7 @@ func (bs *balanceSolver) isSelectedDim(dim int) bool { } func (bs *balanceSolver) getPriorities() []string { - querySupport := bs.sche.conf.checkQuerySupport(bs.ScheduleCluster) + querySupport := bs.sche.conf.checkQuerySupport(bs.SchedulerCluster) // For read, transfer-leader and move-peer have the same priority config // For write, they are different switch bs.resourceTy { @@ -584,19 +584,19 @@ func (bs *balanceSolver) getPriorities() []string { return []string{} } -func newBalanceSolver(sche *hotScheduler, cluster sche.ScheduleCluster, rwTy statistics.RWType, opTy opType) *balanceSolver { +func newBalanceSolver(sche *hotScheduler, cluster sche.SchedulerCluster, rwTy statistics.RWType, opTy opType) *balanceSolver { bs := &balanceSolver{ - ScheduleCluster: cluster, - sche: sche, - rwTy: rwTy, - opTy: opTy, + SchedulerCluster: cluster, + sche: sche, + rwTy: rwTy, + opTy: opTy, } bs.init() return bs } func (bs *balanceSolver) isValid() bool { - if bs.ScheduleCluster == nil || bs.sche == nil || bs.stLoadDetail == nil { + if bs.SchedulerCluster == nil || bs.sche == nil || bs.stLoadDetail == nil { return false } return true @@ -606,7 +606,7 @@ func (bs *balanceSolver) filterUniformStoreV1() (string, bool) { if !bs.enableExpectation() { return "", false } - // Because region is available for src and dst, so stddev is the same for both, only need to calcurate one. + // Because region is available for src and dst, so stddev is the same for both, only need to calculate one. isUniformFirstPriority, isUniformSecondPriority := bs.isUniformFirstPriority(bs.cur.srcStore), bs.isUniformSecondPriority(bs.cur.srcStore) if isUniformFirstPriority && isUniformSecondPriority { // If both dims are enough uniform, any schedule is unnecessary. @@ -940,7 +940,7 @@ func (bs *balanceSolver) isRegionAvailable(region *core.RegionInfo) bool { return false } - if !filter.IsRegionReplicated(bs.ScheduleCluster, region) { + if !filter.IsRegionReplicated(bs.SchedulerCluster, region) { log.Debug("region has abnormal replica count", zap.String("scheduler", bs.sche.GetName()), zap.Uint64("region-id", region.GetID())) hotSchedulerAbnormalReplicaCounter.Inc() return false @@ -994,7 +994,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai &filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true, OperatorLevel: constant.High}, filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIDs(), bs.cur.region.GetStoreIDs()), filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion), - filter.NewPlacementSafeguard(bs.sche.GetName(), bs.GetOpts(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, nil), + filter.NewPlacementSafeguard(bs.sche.GetName(), bs.GetSchedulerConfig(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, nil), } for _, detail := range bs.stLoadDetail { candidates = append(candidates, detail) @@ -1011,7 +1011,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai if bs.rwTy == statistics.Read { peers := bs.cur.region.GetPeers() moveLeaderFilters := []filter.Filter{&filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true, OperatorLevel: constant.High}} - if leaderFilter := filter.NewPlacementLeaderSafeguard(bs.sche.GetName(), bs.GetOpts(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, true /*allowMoveLeader*/); leaderFilter != nil { + if leaderFilter := filter.NewPlacementLeaderSafeguard(bs.sche.GetName(), bs.GetSchedulerConfig(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, true /*allowMoveLeader*/); leaderFilter != nil { filters = append(filters, leaderFilter) } for storeID, detail := range bs.stLoadDetail { @@ -1026,12 +1026,12 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai continue } // move leader - if filter.Target(bs.GetOpts(), detail.StoreInfo, moveLeaderFilters) { + if filter.Target(bs.GetSchedulerConfig(), detail.StoreInfo, moveLeaderFilters) { candidates = append(candidates, detail) } } } else { - if leaderFilter := filter.NewPlacementLeaderSafeguard(bs.sche.GetName(), bs.GetOpts(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, false /*allowMoveLeader*/); leaderFilter != nil { + if leaderFilter := filter.NewPlacementLeaderSafeguard(bs.sche.GetName(), bs.GetSchedulerConfig(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, false /*allowMoveLeader*/); leaderFilter != nil { filters = append(filters, leaderFilter) } for _, peer := range bs.cur.region.GetFollowers() { @@ -1063,7 +1063,7 @@ func (bs *balanceSolver) pickDstStores(filters []filter.Filter, candidates []*st } dstToleranceRatio += tiflashToleranceRatioCorrection } - if filter.Target(bs.GetOpts(), store, filters) { + if filter.Target(bs.GetSchedulerConfig(), store, filters) { id := store.GetID() if !bs.checkDstByPriorityAndTolerance(detail.LoadPred.Max(), &detail.LoadPred.Expect, dstToleranceRatio) { hotSchedulerResultCounter.WithLabelValues("dst-store-failed-"+bs.resourceTy.String(), strconv.FormatUint(id, 10)).Inc() @@ -1458,7 +1458,7 @@ func (bs *balanceSolver) buildOperators() (ops []*operator.Operator) { if region == nil { continue } - if region.GetApproximateSize() > bs.GetOpts().GetMaxMovableHotPeerSize() { + if region.GetApproximateSize() > bs.GetSchedulerConfig().GetMaxMovableHotPeerSize() { hotSchedulerNeedSplitBeforeScheduleCounter.Inc() splitRegions = append(splitRegions, region) } @@ -1514,7 +1514,7 @@ func (bs *balanceSolver) createSplitOperator(regions []*core.RegionInfo, isTooHo for i, region := range regions { ids[i] = region.GetID() } - hotBuckets := bs.ScheduleCluster.BucketsStats(bs.minHotDegree, ids...) + hotBuckets := bs.SchedulerCluster.BucketsStats(bs.minHotDegree, ids...) operators := make([]*operator.Operator, 0) createFunc := func(region *core.RegionInfo) { diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index ffbe805e0bfb..356b25d13be5 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -446,14 +446,15 @@ func (conf *hotRegionSchedulerConfig) persistLocked() error { return conf.storage.SaveScheduleConfig(HotRegionName, data) } -func (conf *hotRegionSchedulerConfig) checkQuerySupport(cluster sche.ScheduleCluster) bool { - querySupport := versioninfo.IsFeatureSupported(cluster.GetOpts().GetClusterVersion(), versioninfo.HotScheduleWithQuery) +func (conf *hotRegionSchedulerConfig) checkQuerySupport(cluster sche.SchedulerCluster) bool { + version := cluster.GetSchedulerConfig().GetClusterVersion() + querySupport := versioninfo.IsFeatureSupported(version, versioninfo.HotScheduleWithQuery) conf.Lock() defer conf.Unlock() if querySupport != conf.lastQuerySupported { log.Info("query supported changed", zap.Bool("last-query-support", conf.lastQuerySupported), - zap.String("cluster-version", cluster.GetOpts().GetClusterVersion().String()), + zap.String("cluster-version", version.String()), zap.Reflect("config", conf), zap.Reflect("valid-config", conf.getValidConf())) conf.lastQuerySupported = querySupport diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index e936f06761f2..62a1100d16be 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -75,20 +75,20 @@ func (s *labelScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *labelScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (s *labelScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { labelCounter.Inc() stores := cluster.GetStores() rejectLeaderStores := make(map[uint64]struct{}) for _, s := range stores { - if cluster.GetOpts().CheckLabelProperty(config.RejectLeader, s.GetLabels()) { + if cluster.GetSchedulerConfig().CheckLabelProperty(config.RejectLeader, s.GetLabels()) { rejectLeaderStores[s.GetID()] = struct{}{} } } @@ -110,7 +110,7 @@ func (s *labelScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([] f := filter.NewExcludedFilter(s.GetName(), nil, excludeStores) target := filter.NewCandidates(cluster.GetFollowerStores(region)). - FilterTarget(cluster.GetOpts(), nil, nil, &filter.StoreStateFilter{ActionScope: LabelName, TransferLeader: true, OperatorLevel: constant.Medium}, f). + FilterTarget(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: LabelName, TransferLeader: true, OperatorLevel: constant.Medium}, f). RandomPick() if target == nil { log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 71e33775c3e0..a621b5951986 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -77,19 +77,19 @@ func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetOpts().GetMergeScheduleLimit() +func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetSchedulerConfig().GetMergeScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpMerge.String()).Inc() } return allowed } -func (s *randomMergeScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). - FilterSource(cluster.GetOpts(), nil, nil, &filter.StoreStateFilter{ActionScope: s.conf.Name, MoveRegion: true, OperatorLevel: constant.Low}). + FilterSource(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.conf.Name, MoveRegion: true, OperatorLevel: constant.Low}). RandomPick() if store == nil { randomMergeNoSourceStoreCounter.Inc() @@ -104,7 +104,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.ScheduleCluster, dryRun boo } other, target := cluster.GetAdjacentRegions(region) - if !cluster.GetOpts().IsOneWayMergeEnabled() && ((rand.Int()%2 == 0 && other != nil) || target == nil) { + if !cluster.GetSchedulerConfig().IsOneWayMergeEnabled() && ((rand.Int()%2 == 0 && other != nil) || target == nil) { target = other } if target == nil { @@ -128,7 +128,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.ScheduleCluster, dryRun boo return ops, nil } -func (s *randomMergeScheduler) allowMerge(cluster sche.ScheduleCluster, region, target *core.RegionInfo) bool { +func (s *randomMergeScheduler) allowMerge(cluster sche.SchedulerCluster, region, target *core.RegionInfo) bool { if !filter.IsRegionHealthy(region) || !filter.IsRegionHealthy(target) { return false } diff --git a/pkg/schedule/schedulers/range_cluster.go b/pkg/schedule/schedulers/range_cluster.go index 80bff6a64c9a..e83e74145f6c 100644 --- a/pkg/schedule/schedulers/range_cluster.go +++ b/pkg/schedule/schedulers/range_cluster.go @@ -22,22 +22,22 @@ import ( // rangeCluster isolates the cluster by range. type rangeCluster struct { - sche.ScheduleCluster + sche.SchedulerCluster subCluster *core.BasicCluster // Collect all regions belong to the range. tolerantSizeRatio float64 } // genRangeCluster gets a range cluster by specifying start key and end key. // The cluster can only know the regions within [startKey, endKey). -func genRangeCluster(cluster sche.ScheduleCluster, startKey, endKey []byte) *rangeCluster { +func genRangeCluster(cluster sche.SchedulerCluster, startKey, endKey []byte) *rangeCluster { subCluster := core.NewBasicCluster() for _, r := range cluster.ScanRegions(startKey, endKey, -1) { origin, overlaps, rangeChanged := subCluster.SetRegion(r) subCluster.UpdateSubTree(r, origin, overlaps, rangeChanged) } return &rangeCluster{ - ScheduleCluster: cluster, - subCluster: subCluster, + SchedulerCluster: cluster, + subCluster: subCluster, } } @@ -70,7 +70,7 @@ func (r *rangeCluster) updateStoreInfo(s *core.StoreInfo) *core.StoreInfo { // GetStore searches for a store by ID. func (r *rangeCluster) GetStore(id uint64) *core.StoreInfo { - s := r.ScheduleCluster.GetStore(id) + s := r.SchedulerCluster.GetStore(id) if s == nil { return nil } @@ -79,7 +79,7 @@ func (r *rangeCluster) GetStore(id uint64) *core.StoreInfo { // GetStores returns all Stores in the cluster. func (r *rangeCluster) GetStores() []*core.StoreInfo { - stores := r.ScheduleCluster.GetStores() + stores := r.SchedulerCluster.GetStores() newStores := make([]*core.StoreInfo, 0, len(stores)) for _, s := range stores { newStores = append(newStores, r.updateStoreInfo(s)) @@ -97,7 +97,7 @@ func (r *rangeCluster) GetTolerantSizeRatio() float64 { if r.tolerantSizeRatio != 0 { return r.tolerantSizeRatio } - return r.ScheduleCluster.GetOpts().GetTolerantSizeRatio() + return r.SchedulerCluster.GetSchedulerConfig().GetTolerantSizeRatio() } // RandFollowerRegions returns a random region that has a follower on the store. @@ -117,7 +117,7 @@ func (r *rangeCluster) GetAverageRegionSize() int64 { // GetRegionStores returns all stores that contains the region's peer. func (r *rangeCluster) GetRegionStores(region *core.RegionInfo) []*core.StoreInfo { - stores := r.ScheduleCluster.GetRegionStores(region) + stores := r.SchedulerCluster.GetRegionStores(region) newStores := make([]*core.StoreInfo, 0, len(stores)) for _, s := range stores { newStores = append(newStores, r.updateStoreInfo(s)) @@ -127,7 +127,7 @@ func (r *rangeCluster) GetRegionStores(region *core.RegionInfo) []*core.StoreInf // GetFollowerStores returns all stores that contains the region's follower peer. func (r *rangeCluster) GetFollowerStores(region *core.RegionInfo) []*core.StoreInfo { - stores := r.ScheduleCluster.GetFollowerStores(region) + stores := r.SchedulerCluster.GetFollowerStores(region) newStores := make([]*core.StoreInfo, 0, len(stores)) for _, s := range stores { newStores = append(newStores, r.updateStoreInfo(s)) @@ -137,7 +137,7 @@ func (r *rangeCluster) GetFollowerStores(region *core.RegionInfo) []*core.StoreI // GetLeaderStore returns all stores that contains the region's leader peer. func (r *rangeCluster) GetLeaderStore(region *core.RegionInfo) *core.StoreInfo { - s := r.ScheduleCluster.GetLeaderStore(region) + s := r.SchedulerCluster.GetLeaderStore(region) if s != nil { return r.updateStoreInfo(s) } diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index e89b0abebb87..19b606f8d284 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -168,27 +168,27 @@ func (l *scatterRangeScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(l.config) } -func (l *scatterRangeScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (l *scatterRangeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return l.allowBalanceLeader(cluster) || l.allowBalanceRegion(cluster) } -func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.ScheduleCluster) bool { - allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetOpts().GetLeaderScheduleLimit() +func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.SchedulerCluster) bool { + allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.ScheduleCluster) bool { - allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetOpts().GetRegionScheduleLimit() +func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.SchedulerCluster) bool { + allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpRegion.String()).Inc() } return allowed } -func (l *scatterRangeScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { scatterRangeCounter.Inc() // isolate a new cluster according to the key range c := genRangeCluster(cluster, l.config.GetStartKey(), l.config.GetEndKey()) diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index b03044e9471a..b4c425047cd5 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -40,10 +40,10 @@ type Scheduler interface { EncodeConfig() ([]byte, error) GetMinInterval() time.Duration GetNextInterval(interval time.Duration) time.Duration - Prepare(cluster sche.ScheduleCluster) error - Cleanup(cluster sche.ScheduleCluster) - Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) - IsScheduleAllowed(cluster sche.ScheduleCluster) bool + Prepare(cluster sche.SchedulerCluster) error + Cleanup(cluster sche.SchedulerCluster) + Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) + IsScheduleAllowed(cluster sche.SchedulerCluster) bool } // EncodeConfig encode the custom config for each scheduler. diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index ec6b07196b54..1c6329fb0b1b 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -42,14 +42,14 @@ type Controller struct { sync.RWMutex wg sync.WaitGroup ctx context.Context - cluster sche.ScheduleCluster + cluster sche.SchedulerCluster storage endpoint.ConfigStorage schedulers map[string]*ScheduleController opController *operator.Controller } // NewController creates a scheduler controller. -func NewController(ctx context.Context, cluster sche.ScheduleCluster, storage endpoint.ConfigStorage, opController *operator.Controller) *Controller { +func NewController(ctx context.Context, cluster sche.SchedulerCluster, storage endpoint.ConfigStorage, opController *operator.Controller) *Controller { return &Controller{ ctx: ctx, cluster: cluster, @@ -109,7 +109,7 @@ func (c *Controller) CollectSchedulerMetrics() { } func (c *Controller) isSchedulingHalted() bool { - return c.cluster.GetOpts().IsSchedulingHalted() + return c.cluster.GetSchedulerConfig().IsSchedulingHalted() } // ResetSchedulerMetrics resets metrics of all schedulers. @@ -134,7 +134,7 @@ func (c *Controller) AddScheduler(scheduler Scheduler, args ...string) error { c.wg.Add(1) go c.runScheduler(s) c.schedulers[s.Scheduler.GetName()] = s - c.cluster.GetOpts().AddSchedulerCfg(s.Scheduler.GetType(), args) + c.cluster.GetSchedulerConfig().AddSchedulerCfg(s.Scheduler.GetType(), args) return nil } @@ -150,9 +150,9 @@ func (c *Controller) RemoveScheduler(name string) error { return errs.ErrSchedulerNotFound.FastGenByArgs() } - opt := c.cluster.GetOpts() - opt.RemoveSchedulerCfg(s.Scheduler.GetType()) - if err := opt.Persist(c.storage); err != nil { + conf := c.cluster.GetSchedulerConfig() + conf.RemoveSchedulerCfg(s.Scheduler.GetType()) + if err := conf.Persist(c.storage); err != nil { log.Error("the option can not persist scheduler config", errs.ZapError(err)) return err } @@ -239,7 +239,7 @@ func (c *Controller) IsSchedulerDisabled(name string) (bool, error) { if !ok { return false, errs.ErrSchedulerNotFound.FastGenByArgs() } - return c.cluster.GetOpts().IsSchedulerDisabled(s.Scheduler.GetType()), nil + return c.cluster.GetSchedulerConfig().IsSchedulerDisabled(s.Scheduler.GetType()), nil } // IsSchedulerExisted returns whether a scheduler is existed. @@ -332,7 +332,7 @@ func (c *Controller) CheckTransferWitnessLeader(region *core.RegionInfo) { // ScheduleController is used to manage a scheduler. type ScheduleController struct { Scheduler - cluster sche.ScheduleCluster + cluster sche.SchedulerCluster opController *operator.Controller nextInterval time.Duration ctx context.Context @@ -343,7 +343,7 @@ type ScheduleController struct { } // NewScheduleController creates a new ScheduleController. -func NewScheduleController(ctx context.Context, cluster sche.ScheduleCluster, opController *operator.Controller, s Scheduler) *ScheduleController { +func NewScheduleController(ctx context.Context, cluster sche.SchedulerCluster, opController *operator.Controller, s Scheduler) *ScheduleController { ctx, cancel := context.WithCancel(ctx) return &ScheduleController{ Scheduler: s, @@ -352,7 +352,7 @@ func NewScheduleController(ctx context.Context, cluster sche.ScheduleCluster, op nextInterval: s.GetMinInterval(), ctx: ctx, cancel: cancel, - diagnosticRecorder: NewDiagnosticRecorder(s.GetName(), cluster.GetOpts()), + diagnosticRecorder: NewDiagnosticRecorder(s.GetName(), cluster.GetSchedulerConfig()), } } @@ -450,7 +450,7 @@ func (s *ScheduleController) AllowSchedule(diagnosable bool) bool { } func (s *ScheduleController) isSchedulingHalted() bool { - return s.cluster.GetOpts().IsSchedulingHalted() + return s.cluster.GetSchedulerConfig().IsSchedulingHalted() } // IsPaused returns if a scheduler is paused. @@ -493,7 +493,7 @@ func (s *ScheduleController) IsDiagnosticAllowed() bool { // cacheCluster include cache info to improve the performance. type cacheCluster struct { - sche.ScheduleCluster + sche.SchedulerCluster stores []*core.StoreInfo } @@ -503,9 +503,9 @@ func (c *cacheCluster) GetStores() []*core.StoreInfo { } // newCacheCluster constructor for cache -func newCacheCluster(c sche.ScheduleCluster) *cacheCluster { +func newCacheCluster(c sche.SchedulerCluster) *cacheCluster { return &cacheCluster{ - ScheduleCluster: c, - stores: c.GetStores(), + SchedulerCluster: c, + stores: c.GetStores(), } } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index d88fdb390255..1d483c45d5e2 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/versioninfo" ) -func prepareSchedulersTest(needToRunStream ...bool) (context.CancelFunc, config.Config, *mockcluster.Cluster, *operator.Controller) { +func prepareSchedulersTest(needToRunStream ...bool) (context.CancelFunc, config.SchedulerConfig, *mockcluster.Cluster, *operator.Controller) { Register() ctx, cancel := context.WithCancel(context.Background()) opt := mockconfig.NewTestOptions() @@ -45,7 +45,7 @@ func prepareSchedulersTest(needToRunStream ...bool) (context.CancelFunc, config. } else { stream = hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, needToRunStream[0]) } - oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetOpts(), stream) + oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSchedulerConfig(), stream) return cancel, opt, tc, oc } diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index aaa729f5a555..d5264b904281 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -77,10 +77,11 @@ func (s *shuffleHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { hotRegionAllowed := s.OpController.OperatorCount(operator.OpHotRegion) < s.conf.Limit - regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() - leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() + conf := cluster.GetSchedulerConfig() + regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < conf.GetRegionScheduleLimit() + leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < conf.GetLeaderScheduleLimit() if !hotRegionAllowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpHotRegion.String()).Inc() } @@ -93,7 +94,7 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.ScheduleClust return hotRegionAllowed && regionAllowed && leaderAllowed } -func (s *shuffleHotRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { shuffleHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -101,7 +102,7 @@ func (s *shuffleHotRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRu return operators, nil } -func (s *shuffleHotRegionScheduler) randomSchedule(cluster sche.ScheduleCluster, loadDetail map[uint64]*statistics.StoreLoadDetail) []*operator.Operator { +func (s *shuffleHotRegionScheduler) randomSchedule(cluster sche.SchedulerCluster, loadDetail map[uint64]*statistics.StoreLoadDetail) []*operator.Operator { for _, detail := range loadDetail { if len(detail.HotPeers) < 1 { continue @@ -122,12 +123,12 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster sche.ScheduleCluster, filters := []filter.Filter{ &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true, OperatorLevel: constant.Low}, filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs()), - filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore, nil), + filter.NewPlacementSafeguard(s.GetName(), cluster.GetSchedulerConfig(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore, nil), } stores := cluster.GetStores() destStoreIDs := make([]uint64, 0, len(stores)) for _, store := range stores { - if !filter.Target(cluster.GetOpts(), store, filters) { + if !filter.Target(cluster.GetSchedulerConfig(), store, filters) { continue } destStoreIDs = append(destStoreIDs, store.GetID()) diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index f2ee8b8ff927..0e33fa802db2 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -78,21 +78,21 @@ func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } -func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (s *shuffleLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { // We shuffle leaders between stores by: // 1. random select a valid store. // 2. transfer a leader to the store. shuffleLeaderCounter.Inc() targetStore := filter.NewCandidates(cluster.GetStores()). - FilterTarget(cluster.GetOpts(), nil, nil, s.filters...). + FilterTarget(cluster.GetSchedulerConfig(), nil, nil, s.filters...). RandomPick() if targetStore == nil { shuffleLeaderNoTargetStoreCounter.Inc() diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index deb4ac5635a6..08570fe9f20a 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -80,15 +80,15 @@ func (s *shuffleRegionScheduler) EncodeConfig() ([]byte, error) { return s.conf.EncodeConfig() } -func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() +func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() } return allowed } -func (s *shuffleRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { shuffleRegionCounter.Inc() region, oldPeer := s.scheduleRemovePeer(cluster) if region == nil { @@ -112,9 +112,9 @@ func (s *shuffleRegionScheduler) Schedule(cluster sche.ScheduleCluster, dryRun b return []*operator.Operator{op}, nil } -func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster sche.ScheduleCluster) (*core.RegionInfo, *metapb.Peer) { +func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster sche.SchedulerCluster) (*core.RegionInfo, *metapb.Peer) { candidates := filter.NewCandidates(cluster.GetStores()). - FilterSource(cluster.GetOpts(), nil, nil, s.filters...). + FilterSource(cluster.GetSchedulerConfig(), nil, nil, s.filters...). Shuffle() pendingFilter := filter.NewRegionPendingFilter() @@ -144,16 +144,16 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster sche.ScheduleCluster return nil, nil } -func (s *shuffleRegionScheduler) scheduleAddPeer(cluster sche.ScheduleCluster, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { +func (s *shuffleRegionScheduler) scheduleAddPeer(cluster sche.SchedulerCluster, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { store := cluster.GetStore(oldPeer.GetStoreId()) if store == nil { return nil } - scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), region, store, nil) + scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster.GetSchedulerConfig(), cluster.GetBasicCluster(), cluster.GetRuleManager(), region, store, nil) excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIDs()) target := filter.NewCandidates(cluster.GetStores()). - FilterTarget(cluster.GetOpts(), nil, nil, append(s.filters, scoreGuard, excludedFilter)...). + FilterTarget(cluster.GetSchedulerConfig(), nil, nil, append(s.filters, scoreGuard, excludedFilter)...). RandomPick() if target == nil { return nil diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 7452d2ceafae..415e62077523 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -165,7 +165,7 @@ func (s *splitBucketScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) } // IsScheduleAllowed return true if the sum of executing opSplit operator is less . -func (s *splitBucketScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (s *splitBucketScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if !cluster.GetStoreConfig().IsEnableRegionBucket() { splitBucketDisableCounter.Inc() return false @@ -180,20 +180,20 @@ func (s *splitBucketScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) b type splitBucketPlan struct { hotBuckets map[uint64][]*buckets.BucketStat - cluster sche.ScheduleCluster + cluster sche.SchedulerCluster conf *splitBucketSchedulerConfig hotRegionSplitSize int64 } // Schedule return operators if some bucket is too hot. -func (s *splitBucketScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *splitBucketScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { splitBucketScheduleCounter.Inc() conf := s.conf.Clone() plan := &splitBucketPlan{ conf: conf, cluster: cluster, hotBuckets: cluster.BucketsStats(conf.Degree), - hotRegionSplitSize: cluster.GetOpts().GetMaxMovableHotPeerSize(), + hotRegionSplitSize: cluster.GetSchedulerConfig().GetMaxMovableHotPeerSize(), } return s.splitBucket(plan), nil } diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index c9b66ccef4ee..bddbc7cf0ecd 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -67,16 +67,16 @@ func (s *trasferWitnessLeaderScheduler) GetType() string { return TransferWitnessLeaderType } -func (s *trasferWitnessLeaderScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (s *trasferWitnessLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return true } -func (s *trasferWitnessLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *trasferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil } -func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster sche.ScheduleCluster, batchSize int) []*operator.Operator { +func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster sche.SchedulerCluster, batchSize int) []*operator.Operator { var ops []*operator.Operator for i := 0; i < batchSize; i++ { select { @@ -98,7 +98,7 @@ func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, return ops } -func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster sche.ScheduleCluster, region *core.RegionInfo) (*operator.Operator, error) { +func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { @@ -108,7 +108,7 @@ func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ unhealthyPeerStores[peer.GetStoreId()] = struct{}{} } filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores), &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) - candidates := filter.NewCandidates(cluster.GetFollowerStores(region)).FilterTarget(cluster.GetOpts(), nil, nil, filters...) + candidates := filter.NewCandidates(cluster.GetFollowerStores(region)).FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...) // Compatible with old TiKV transfer leader logic. target := candidates.RandomPick() targets := candidates.PickAll() diff --git a/pkg/schedule/schedulers/utils.go b/pkg/schedule/schedulers/utils.go index dabef0452792..998b03075309 100644 --- a/pkg/schedule/schedulers/utils.go +++ b/pkg/schedule/schedulers/utils.go @@ -44,7 +44,7 @@ const ( type solver struct { *plan.BalanceSchedulerPlan - sche.ScheduleCluster + sche.SchedulerCluster kind constant.ScheduleKind opInfluence operator.OpInfluence tolerantSizeRatio float64 @@ -55,10 +55,10 @@ type solver struct { targetScore float64 } -func newSolver(basePlan *plan.BalanceSchedulerPlan, kind constant.ScheduleKind, cluster sche.ScheduleCluster, opInfluence operator.OpInfluence) *solver { +func newSolver(basePlan *plan.BalanceSchedulerPlan, kind constant.ScheduleKind, cluster sche.SchedulerCluster, opInfluence operator.OpInfluence) *solver { return &solver{ BalanceSchedulerPlan: basePlan, - ScheduleCluster: cluster, + SchedulerCluster: cluster, kind: kind, opInfluence: opInfluence, tolerantSizeRatio: adjustTolerantRatio(cluster, kind), @@ -95,8 +95,7 @@ func (p *solver) sourceStoreScore(scheduleName string) float64 { influence = -influence } - opts := p.GetOpts() - if opts.IsDebugMetricsEnabled() { + if p.GetSchedulerConfig().IsDebugMetricsEnabled() { opInfluenceStatus.WithLabelValues(scheduleName, strconv.FormatUint(sourceID, 10), "source").Set(float64(influence)) tolerantResourceStatus.WithLabelValues(scheduleName).Set(float64(tolerantResource)) } @@ -107,7 +106,7 @@ func (p *solver) sourceStoreScore(scheduleName string) float64 { score = p.Source.LeaderScore(p.kind.Policy, sourceDelta) case constant.RegionKind: sourceDelta := influence*influenceAmp - tolerantResource - score = p.Source.RegionScore(opts.GetRegionScoreFormulaVersion(), opts.GetHighSpaceRatio(), opts.GetLowSpaceRatio(), sourceDelta) + score = p.Source.RegionScore(p.GetSchedulerConfig().GetRegionScoreFormulaVersion(), p.GetSchedulerConfig().GetHighSpaceRatio(), p.GetSchedulerConfig().GetLowSpaceRatio(), sourceDelta) case constant.WitnessKind: sourceDelta := influence - tolerantResource score = p.Source.WitnessScore(sourceDelta) @@ -127,8 +126,7 @@ func (p *solver) targetStoreScore(scheduleName string) float64 { influence = -influence } - opts := p.GetOpts() - if opts.IsDebugMetricsEnabled() { + if p.GetSchedulerConfig().IsDebugMetricsEnabled() { opInfluenceStatus.WithLabelValues(scheduleName, strconv.FormatUint(targetID, 10), "target").Set(float64(influence)) } var score float64 @@ -138,7 +136,7 @@ func (p *solver) targetStoreScore(scheduleName string) float64 { score = p.Target.LeaderScore(p.kind.Policy, targetDelta) case constant.RegionKind: targetDelta := influence*influenceAmp + tolerantResource - score = p.Target.RegionScore(opts.GetRegionScoreFormulaVersion(), opts.GetHighSpaceRatio(), opts.GetLowSpaceRatio(), targetDelta) + score = p.Target.RegionScore(p.GetSchedulerConfig().GetRegionScoreFormulaVersion(), p.GetSchedulerConfig().GetHighSpaceRatio(), p.GetSchedulerConfig().GetLowSpaceRatio(), targetDelta) case constant.WitnessKind: targetDelta := influence + tolerantResource score = p.Target.WitnessScore(targetDelta) @@ -182,14 +180,14 @@ func (p *solver) getTolerantResource() int64 { return p.tolerantSource } -func adjustTolerantRatio(cluster sche.ScheduleCluster, kind constant.ScheduleKind) float64 { +func adjustTolerantRatio(cluster sche.SchedulerCluster, kind constant.ScheduleKind) float64 { var tolerantSizeRatio float64 switch c := cluster.(type) { case *rangeCluster: // range cluster use a separate configuration tolerantSizeRatio = c.GetTolerantSizeRatio() default: - tolerantSizeRatio = cluster.GetOpts().GetTolerantSizeRatio() + tolerantSizeRatio = cluster.GetSchedulerConfig().GetTolerantSizeRatio() } if kind.Resource == constant.LeaderKind && kind.Policy == constant.ByCount { if tolerantSizeRatio == 0 { diff --git a/pkg/statistics/region_collection.go b/pkg/statistics/region_collection.go index 23791a145141..939595762828 100644 --- a/pkg/statistics/region_collection.go +++ b/pkg/statistics/region_collection.go @@ -74,7 +74,7 @@ type RegionInfo struct { // RegionStatistics is used to record the status of regions. type RegionStatistics struct { sync.RWMutex - conf sc.Config + conf sc.CheckerConfig stats map[RegionStatisticType]map[uint64]*RegionInfo offlineStats map[RegionStatisticType]map[uint64]*core.RegionInfo index map[uint64]RegionStatisticType @@ -84,7 +84,7 @@ type RegionStatistics struct { } // NewRegionStatistics creates a new RegionStatistics. -func NewRegionStatistics(conf sc.Config, ruleManager *placement.RuleManager, storeConfigManager *config.StoreConfigManager) *RegionStatistics { +func NewRegionStatistics(conf sc.CheckerConfig, ruleManager *placement.RuleManager, storeConfigManager *config.StoreConfigManager) *RegionStatistics { r := &RegionStatistics{ conf: conf, ruleManager: ruleManager, diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 6b7e90465421..91b9e518089e 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -186,7 +186,7 @@ func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return schedulers.EncodeConfig(s.conf) } -func (s *evictLeaderScheduler) Prepare(cluster sche.ScheduleCluster) error { +func (s *evictLeaderScheduler) Prepare(cluster sche.SchedulerCluster) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -198,7 +198,7 @@ func (s *evictLeaderScheduler) Prepare(cluster sche.ScheduleCluster) error { return res } -func (s *evictLeaderScheduler) Cleanup(cluster sche.ScheduleCluster) { +func (s *evictLeaderScheduler) Cleanup(cluster sche.SchedulerCluster) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWitRanges { @@ -206,15 +206,15 @@ func (s *evictLeaderScheduler) Cleanup(cluster sche.ScheduleCluster) { } } -func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { - allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() +func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { + allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() } return allowed } -func (s *evictLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { ops := make([]*operator.Operator, 0, len(s.conf.StoreIDWitRanges)) s.conf.mu.RLock() defer s.conf.mu.RUnlock() @@ -226,7 +226,7 @@ func (s *evictLeaderScheduler) Schedule(cluster sche.ScheduleCluster, dryRun boo continue } target := filter.NewCandidates(cluster.GetFollowerStores(region)). - FilterTarget(cluster.GetOpts(), nil, nil, &filter.StoreStateFilter{ActionScope: EvictLeaderName, TransferLeader: true, OperatorLevel: constant.Urgent}). + FilterTarget(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: EvictLeaderName, TransferLeader: true, OperatorLevel: constant.Urgent}). RandomPick() if target == nil { continue diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index f0c7d10cdf03..84dfd3cecfe3 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -196,6 +196,21 @@ func (c *RaftCluster) GetStoreConfig() sc.StoreConfig { return c.storeConfigManager.GetStoreConfig() } +// GetCheckerConfig returns the checker config. +func (c *RaftCluster) GetCheckerConfig() sc.CheckerConfig { + return c.GetOpts() +} + +// GetSchedulerConfig returns the scheduler config. +func (c *RaftCluster) GetSchedulerConfig() sc.SchedulerConfig { + return c.GetOpts() +} + +// GetSharedConfig returns the shared config. +func (c *RaftCluster) GetSharedConfig() sc.SharedConfig { + return c.GetOpts() +} + // LoadClusterStatus loads the cluster status. func (c *RaftCluster) LoadClusterStatus() (*Status, error) { bootstrapTime, err := c.loadBootstrapTime() diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 7bb24d79cf11..c2ff966f2285 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -2408,7 +2408,7 @@ func TestCollectMetrics(t *testing.T) { stores := co.GetCluster().GetStores() regionStats := co.GetCluster().RegionWriteStats() status1 := statistics.CollectHotPeerInfos(stores, regionStats) - status2 := statistics.GetHotStatus(stores, co.GetCluster().GetStoresLoads(), regionStats, statistics.Write, co.GetCluster().GetOpts().IsTraceRegionFlow()) + status2 := statistics.GetHotStatus(stores, co.GetCluster().GetStoresLoads(), regionStats, statistics.Write, co.GetCluster().GetSchedulerConfig().IsTraceRegionFlow()) for _, s := range status2.AsLeader { s.Stats = nil } @@ -3390,7 +3390,7 @@ type mockLimitScheduler struct { kind operator.OpKind } -func (s *mockLimitScheduler) IsScheduleAllowed(cluster sche.ScheduleCluster) bool { +func (s *mockLimitScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return s.counter.OperatorCount(s.kind) < s.limit }