diff --git a/go/flagutil/sets.go b/go/flagutil/sets.go index f03123d9488..92a7e2c6800 100644 --- a/go/flagutil/sets.go +++ b/go/flagutil/sets.go @@ -39,14 +39,14 @@ var _ pflag.Value = (*StringSetFlag)(nil) // provides an implementation of pflag.Value, so it is usable in libraries like // cobra. type StringSetFlag struct { - set sets.String + set sets.Set[string] } // ToSet returns the underlying string set, or an empty set if the underlying // set is nil. -func (set *StringSetFlag) ToSet() sets.String { +func (set *StringSetFlag) ToSet() sets.Set[string] { if set.set == nil { - set.set = sets.NewString() + set.set = sets.New[string]() } return set.set @@ -55,7 +55,7 @@ func (set *StringSetFlag) ToSet() sets.String { // Set is part of the pflag.Value and flag.Value interfaces. func (set *StringSetFlag) Set(s string) error { if set.set == nil { - set.set = sets.NewString(s) + set.set = sets.New[string]() return nil } @@ -69,7 +69,7 @@ func (set *StringSetFlag) String() string { return "" } - return strings.Join(set.set.List(), ", ") + return strings.Join(sets.List(set.set), ", ") } // Type is part of the pflag.Value interface. diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index e941b78c2cd..ab844a8ffd1 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -48,7 +48,7 @@ var ( PRIMARY KEY (id) ) Engine=InnoDB;` vschemaDDL = "alter vschema create vindex test_vdx using hash" - vschemaDDLError = fmt.Sprintf("Error 1105: cannot perform Update on keyspaces/%s/VSchema as the topology server connection is read-only", + vschemaDDLError = fmt.Sprintf("Error 1105 (HY000): cannot perform Update on keyspaces/%s/VSchema as the topology server connection is read-only", keyspaceUnshardedName) ) diff --git a/go/vt/topo/cell_info.go b/go/vt/topo/cell_info.go index 468b15157c3..2af867a8ffc 100644 --- a/go/vt/topo/cell_info.go +++ b/go/vt/topo/cell_info.go @@ -194,7 +194,7 @@ func (ts *Server) ExpandCells(ctx context.Context, cells string) ([]string, erro var ( err error inputCells []string - outputCells = sets.NewString() // Use a set to dedupe if the input cells list includes an alias and a cell in that alias. + outputCells = sets.New[string]() // Use a set to dedupe if the input cells list includes an alias and a cell in that alias. ) if cells == "" { @@ -238,5 +238,5 @@ func (ts *Server) ExpandCells(ctx context.Context, cells string) ([]string, erro } } - return outputCells.List(), nil + return sets.List(outputCells), nil } diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go index 985f7d2b6d8..395f8f0fa47 100644 --- a/go/vt/topo/topoproto/tablet.go +++ b/go/vt/topo/topoproto/tablet.go @@ -108,8 +108,8 @@ func ParseTabletAlias(aliasStr string) (*topodatapb.TabletAlias, error) { } // ParseTabletSet returns a set of tablets based on a provided comma separated list of tablets. -func ParseTabletSet(tabletListStr string) sets.String { - set := sets.NewString() +func ParseTabletSet(tabletListStr string) sets.Set[string] { + set := sets.New[string]() if tabletListStr == "" { return set } diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 714f64a59cd..a7e81bb7024 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -1402,7 +1402,7 @@ func (api *API) GetWorkflows(ctx context.Context, req *vtadminpb.GetWorkflowsReq workflows, err := c.GetWorkflows(ctx, req.Keyspaces, cluster.GetWorkflowsOptions{ ActiveOnly: req.ActiveOnly, - IgnoreKeyspaces: sets.NewString(req.IgnoreKeyspaces...), + IgnoreKeyspaces: sets.New[string](req.IgnoreKeyspaces...), }) if err != nil { rec.RecordError(err) diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index 9cdbf7c7db3..068416ee359 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -623,7 +623,7 @@ func (c *Cluster) findTablets(ctx context.Context, filter func(*vtadminpb.Tablet // FindWorkflowsOptions is the set of options for FindWorkflows requests. type FindWorkflowsOptions struct { ActiveOnly bool - IgnoreKeyspaces sets.String + IgnoreKeyspaces sets.Set[string] Filter func(workflow *vtadminpb.Workflow) bool } @@ -658,7 +658,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi } if opts.IgnoreKeyspaces == nil { - opts.IgnoreKeyspaces = sets.NewString() + opts.IgnoreKeyspaces = sets.New[string]() } if len(keyspaces) == 0 { @@ -685,7 +685,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi span.Finish() } else if opts.IgnoreKeyspaces.Len() > 0 { log.Warningf("Cluster.findWorkflows: IgnoreKeyspaces was set, but Keyspaces was not empty; ignoring IgnoreKeyspaces in favor of explicitly checking everything in Keyspaces: (%s)", strings.Join(keyspaces, ", ")) - opts.IgnoreKeyspaces = sets.NewString() + opts.IgnoreKeyspaces = sets.New[string]() } // Annotate the parent span with some additional information about the call. @@ -693,7 +693,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi span.Annotate("num_keyspaces", len(keyspaces)) span.Annotate("keyspaces", strings.Join(keyspaces, ",")) span.Annotate("num_ignore_keyspaces", opts.IgnoreKeyspaces.Len()) - span.Annotate("ignore_keyspaces", strings.Join(opts.IgnoreKeyspaces.List(), ",")) + span.Annotate("ignore_keyspaces", strings.Join(sets.List(opts.IgnoreKeyspaces), ",")) } clusterpb := c.ToProto() @@ -799,7 +799,7 @@ func (c *Cluster) GetBackups(ctx context.Context, req *vtadminpb.GetBackupsReque ) for ks, shardSet := range shardsByKeyspace { - for _, shard := range shardSet.List() { + for _, shard := range sets.List(shardSet) { wg.Add(1) go func(keyspace, shard string) { @@ -856,8 +856,8 @@ func (c *Cluster) GetBackups(ctx context.Context, req *vtadminpb.GetBackupsReque return backups, nil } -func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspaceShards []string) (map[string]sets.String, error) { - shardsByKeyspace := map[string]sets.String{} +func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspaceShards []string) (map[string]sets.Set[string], error) { + shardsByKeyspace := map[string]sets.Set[string]{} if len(keyspaces) == 0 && len(keyspaceShards) == 0 { // Special case: if nothing was explicitly passed, get all shards in @@ -868,7 +868,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace } for _, ks := range kss { - shardsByKeyspace[ks.Keyspace.Name] = sets.NewString() + shardsByKeyspace[ks.Keyspace.Name] = sets.New[string]() for _, shard := range ks.Shards { shardsByKeyspace[ks.Keyspace.Name].Insert(shard.Name) } @@ -884,7 +884,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace } if _, ok := shardsByKeyspace[ks]; !ok { - shardsByKeyspace[ks] = sets.NewString(shard) + shardsByKeyspace[ks] = sets.New[string](shard) continue } @@ -897,7 +897,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace // empty set to indicate we should take all shards in the GetKeyspace // section below. if _, ok := shardsByKeyspace[ks]; !ok { - shardsByKeyspace[ks] = sets.NewString() + shardsByKeyspace[ks] = sets.New[string]() } } @@ -912,7 +912,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace for ksName, shardSet := range shardsByKeyspace { wg.Add(1) - go func(ksName string, shardSet sets.String) { + go func(ksName string, shardSet sets.Set[string]) { defer wg.Done() keyspace, err := c.GetKeyspace(ctx, ksName) @@ -934,7 +934,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace return } - fullShardSet := sets.NewString() + fullShardSet := sets.New[string]() for _, shard := range keyspace.Shards { fullShardSet.Insert(shard.Name) } @@ -949,7 +949,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace overlap := shardSet.Intersection(fullShardSet) if overlap.Len() != shardSet.Len() { - log.Warningf("getShardSets(): keyspace %s is missing specified shards in cluster %s: %v", ksName, c.ID, shardSet.Difference(overlap).List()) + log.Warningf("getShardSets(): keyspace %s is missing specified shards in cluster %s: %v", ksName, c.ID, sets.List(shardSet.Difference(overlap))) } m.Lock() @@ -1684,7 +1684,7 @@ func (c *Cluster) GetShardReplicationPositions(ctx context.Context, req *vtadmin ) for ks, shardSet := range shardsByKeyspace { - for _, shard := range shardSet.List() { + for _, shard := range sets.List(shardSet) { wg.Add(1) go func(keyspace, shard string) { @@ -1890,7 +1890,7 @@ func (c *Cluster) GetWorkflow(ctx context.Context, keyspace string, name string, // requests. type GetWorkflowsOptions struct { ActiveOnly bool - IgnoreKeyspaces sets.String + IgnoreKeyspaces sets.Set[string] } // GetWorkflows returns a list of Workflows in this cluster, across the given @@ -2046,7 +2046,7 @@ func (c *Cluster) reloadKeyspaceSchemas(ctx context.Context, req *vtadminpb.Relo return resp.Keyspaces, nil } - keyspaceNames := sets.NewString(req.Keyspaces...) + keyspaceNames := sets.New[string](req.Keyspaces...) for _, ks := range resp.Keyspaces { if keyspaceNames.Has(ks.Name) { @@ -2184,7 +2184,7 @@ func (c *Cluster) reloadShardSchemas(ctx context.Context, req *vtadminpb.ReloadS // reloadTabletSchemas reloads schemas in one or more tablets in the cluster. func (c *Cluster) reloadTabletSchemas(ctx context.Context, req *vtadminpb.ReloadSchemasRequest) ([]*vtadminpb.ReloadSchemasResponse_TabletResult, error) { - aliasSet := sets.NewString() + aliasSet := sets.New[string]() for _, alias := range req.Tablets { aliasSet.Insert(topoproto.TabletAliasString(alias)) } diff --git a/go/vt/vtadmin/cluster/cluster_internal_test.go b/go/vt/vtadmin/cluster/cluster_internal_test.go index 4080ca54b0b..66901b06682 100644 --- a/go/vt/vtadmin/cluster/cluster_internal_test.go +++ b/go/vt/vtadmin/cluster/cluster_internal_test.go @@ -453,41 +453,41 @@ func Test_getShardSets(t *testing.T) { name string keyspaces []string keyspaceShards []string - result map[string]sets.String + result map[string]sets.Set[string] shouldErr bool }{ { name: "all keyspaces and shards", keyspaces: nil, keyspaceShards: nil, - result: map[string]sets.String{ - "ks1": sets.NewString("-80", "80-"), - "ks2": sets.NewString("-"), + result: map[string]sets.Set[string]{ + "ks1": sets.New[string]("-80", "80-"), + "ks2": sets.New[string]("-"), }, }, { name: "keyspaceShards filter", keyspaces: nil, keyspaceShards: []string{"ks1/-80", "ks2/-"}, - result: map[string]sets.String{ - "ks1": sets.NewString("-80"), - "ks2": sets.NewString("-"), + result: map[string]sets.Set[string]{ + "ks1": sets.New[string]("-80"), + "ks2": sets.New[string]("-"), }, }, { name: "keyspace and shards filters", keyspaces: []string{"ks1"}, keyspaceShards: []string{"ks1/80-"}, - result: map[string]sets.String{ - "ks1": sets.NewString("80-"), + result: map[string]sets.Set[string]{ + "ks1": sets.New[string]("80-"), }, }, { name: "skipped non-existing shards and keyspaces", keyspaces: nil, keyspaceShards: []string{"ks1/-" /* does not exist */, "ks1/-80", "ks1/80-", "ks3/-" /* does not exist */}, - result: map[string]sets.String{ - "ks1": sets.NewString("-80", "80-"), + result: map[string]sets.Set[string]{ + "ks1": sets.New[string]("-80", "80-"), }, }, } diff --git a/go/vt/vtadmin/cluster/cluster_test.go b/go/vt/vtadmin/cluster/cluster_test.go index 2313de06d6b..e2317d8ab4b 100644 --- a/go/vt/vtadmin/cluster/cluster_test.go +++ b/go/vt/vtadmin/cluster/cluster_test.go @@ -977,7 +977,7 @@ func TestFindWorkflows(t *testing.T) { }, keyspaces: []string{"ks2"}, opts: cluster.FindWorkflowsOptions{ - IgnoreKeyspaces: sets.NewString("ks2"), + IgnoreKeyspaces: sets.New[string]("ks2"), }, expected: &vtadminpb.ClusterWorkflows{ Workflows: []*vtadminpb.Workflow{ @@ -1047,7 +1047,7 @@ func TestFindWorkflows(t *testing.T) { }, keyspaces: nil, opts: cluster.FindWorkflowsOptions{ - IgnoreKeyspaces: sets.NewString("ks2"), + IgnoreKeyspaces: sets.New[string]("ks2"), }, expected: &vtadminpb.ClusterWorkflows{ Workflows: []*vtadminpb.Workflow{ diff --git a/go/vt/vtadmin/http/shards.go b/go/vt/vtadmin/http/shards.go index 81cb1299913..79a5f9fdb7d 100644 --- a/go/vt/vtadmin/http/shards.go +++ b/go/vt/vtadmin/http/shards.go @@ -70,7 +70,7 @@ func DeleteShards(ctx context.Context, r Request, api *API) *JSONResponse { } shardList := r.URL.Query()["keyspace_shard"] - shardList = sets.NewString(shardList...).List() + shardList = sets.List(sets.New[string](shardList...)) shards := make([]*vtctldatapb.Shard, len(shardList)) for i, kss := range shardList { ks, shard, err := topoproto.ParseKeyspaceShard(kss) diff --git a/go/vt/vtadmin/rbac/config.go b/go/vt/vtadmin/rbac/config.go index 196b76bca04..f3c3cfd847b 100644 --- a/go/vt/vtadmin/rbac/config.go +++ b/go/vt/vtadmin/rbac/config.go @@ -89,22 +89,22 @@ func (c *Config) Reify() error { for i, rule := range c.Rules { resourceRules := byResource[rule.Resource] - actions := sets.NewString(rule.Actions...) + actions := sets.New[string](rule.Actions...) if actions.Has("*") && actions.Len() > 1 { // error to have wildcard and something else - rec.RecordError(fmt.Errorf("rule %d: actions list cannot include wildcard and other actions, have %v", i, actions.List())) + rec.RecordError(fmt.Errorf("rule %d: actions list cannot include wildcard and other actions, have %v", i, sets.List(actions))) } - subjects := sets.NewString(rule.Subjects...) + subjects := sets.New[string](rule.Subjects...) if subjects.Has("*") && subjects.Len() > 1 { // error to have wildcard and something else - rec.RecordError(fmt.Errorf("rule %d: subjects list cannot include wildcard and other subjects, have %v", i, subjects.List())) + rec.RecordError(fmt.Errorf("rule %d: subjects list cannot include wildcard and other subjects, have %v", i, sets.List(subjects))) } - clusters := sets.NewString(rule.Clusters...) + clusters := sets.New[string](rule.Clusters...) if clusters.Has("*") && clusters.Len() > 1 { // error to have wildcard and something else - rec.RecordError(fmt.Errorf("rule %d: clusters list cannot include wildcard and other clusters, have %v", i, clusters.List())) + rec.RecordError(fmt.Errorf("rule %d: clusters list cannot include wildcard and other clusters, have %v", i, sets.List(clusters))) } resourceRules = append(resourceRules, &Rule{ @@ -188,9 +188,9 @@ func DefaultConfig() *Config { cfg := map[string][]*Rule{ "*": { { - clusters: sets.NewString(clusters...), - actions: sets.NewString(actions...), - subjects: sets.NewString(subjects...), + clusters: sets.New[string](clusters...), + actions: sets.New[string](actions...), + subjects: sets.New[string](subjects...), }, }, } diff --git a/go/vt/vtadmin/rbac/rule.go b/go/vt/vtadmin/rbac/rule.go index 7504dfe4200..c10890f5747 100644 --- a/go/vt/vtadmin/rbac/rule.go +++ b/go/vt/vtadmin/rbac/rule.go @@ -24,9 +24,9 @@ import ( // Rule is a single rule governing access to a particular resource. type Rule struct { - clusters sets.String - actions sets.String - subjects sets.String + clusters sets.Set[string] + actions sets.Set[string] + subjects sets.Set[string] } // Allows returns true if the actor is allowed to take the specified action in diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index c635479171c..d8a7275055b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -992,7 +992,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat req.Shard, reparentutil.EmergencyReparentOptions{ NewPrimaryAlias: req.NewPrimary, - IgnoreReplicas: sets.NewString(ignoreReplicaAliases...), + IgnoreReplicas: sets.New[string](ignoreReplicaAliases...), WaitReplicasTimeout: waitReplicasTimeout, PreventCrossCellPromotion: req.PreventCrossCellPromotion, }, @@ -1587,10 +1587,10 @@ func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetS // Omit any cell names in the request that don't map to existing cells if len(req.Cells) > 0 { - s1 := sets.NewString(allCells...) - s2 := sets.NewString(req.Cells...) + s1 := sets.New[string](allCells...) + s2 := sets.New[string](req.Cells...) - cells = s1.Intersection(s2).List() + cells = sets.List(s1.Intersection(s2)) } span.Annotate("cells", strings.Join(cells, ",")) @@ -3544,7 +3544,7 @@ func (s *VtctldServer) Validate(ctx context.Context, req *vtctldatapb.ValidateRe span, ctx := trace.NewSpan(ctx, "VtctldServer.validateAllTablets") defer span.Finish() - cellSet := sets.NewString() + cellSet := sets.New[string]() for _, keyspace := range keyspaces { getShardNamesCtx, getShardNamesCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) shards, err := s.ts.GetShardNames(getShardNamesCtx, keyspace) @@ -3575,7 +3575,7 @@ func (s *VtctldServer) Validate(ctx context.Context, req *vtctldatapb.ValidateRe } } - for _, cell := range cellSet.List() { + for _, cell := range sets.List(cellSet) { getTabletsByCellCtx, getTabletsByCellCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) aliases, err := s.ts.GetTabletAliasesByCell(getTabletsByCellCtx, cell) getTabletsByCellCancel() // don't defer in a loop diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 9e4ac550a8f..ef20598dce5 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -56,7 +56,7 @@ type EmergencyReparenter struct { // for callers to mutate and reuse options structs for multiple calls. type EmergencyReparentOptions struct { NewPrimaryAlias *topodatapb.TabletAlias - IgnoreReplicas sets.String + IgnoreReplicas sets.Set[string] WaitReplicasTimeout time.Duration PreventCrossCellPromotion bool diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index 19d119a6ba2..31ffce01811 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -1964,7 +1964,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { }{ { name: "success", - emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -2341,7 +2341,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { }, { name: "success in initialization", - emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3165,7 +3165,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { }{ { name: "success", - emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3561,7 +3561,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { }{ { name: "success", - emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -4205,7 +4205,7 @@ func TestParentContextCancelled(t *testing.T) { durability, err := GetDurabilityPolicy("none") require.NoError(t, err) // Setup ERS options with a very high wait replicas timeout - emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability} + emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability} // Make the replica tablet return its results after 3 seconds tmc := &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index 512b3a60221..dcda5d02cbf 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -215,7 +215,7 @@ func stopReplicationAndBuildStatusMaps( ev *events.Reparent, tabletMap map[string]*topo.TabletInfo, stopReplicationTimeout time.Duration, - ignoredTablets sets.String, + ignoredTablets sets.Set[string], tabletToWaitFor *topodatapb.TabletAlias, durability Durabler, logger logutil.Logger, diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 01f043ac827..417010a8268 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -285,7 +285,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { tmc *stopReplicationAndBuildStatusMapsTestTMClient tabletMap map[string]*topo.TabletInfo stopReplicasTimeout time.Duration - ignoredTablets sets.String + ignoredTablets sets.Set[string] tabletToWaitFor *topodatapb.TabletAlias expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus expectedPrimaryStatusMap map[string]*replicationdatapb.PrimaryStatus @@ -334,7 +334,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -427,7 +427,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -520,7 +520,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -589,7 +589,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString("zone1-0000000100"), + ignoredTablets: sets.New[string]("zone1-0000000100"), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -657,7 +657,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -731,7 +731,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -795,7 +795,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: nil, expectedPrimaryStatusMap: nil, expectedTabletsReachable: nil, @@ -847,7 +847,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, stopReplicasTimeout: time.Millisecond * 5, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -903,7 +903,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, @@ -956,7 +956,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: nil, expectedPrimaryStatusMap: nil, expectedTabletsReachable: nil, @@ -1000,7 +1000,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: nil, expectedPrimaryStatusMap: nil, expectedTabletsReachable: nil, @@ -1070,7 +1070,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { Cell: "zone1", Uid: 102, }, - ignoredTablets: sets.NewString(), + ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index 005c923ffe5..5fddd7fb8b3 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -310,9 +310,9 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows m := sync.Mutex{} // guards access to the following maps during concurrent calls to scanWorkflow workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results)) sourceKeyspaceByWorkflow := make(map[string]string, len(results)) - sourceShardsByWorkflow := make(map[string]sets.String, len(results)) + sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results)) targetKeyspaceByWorkflow := make(map[string]string, len(results)) - targetShardsByWorkflow := make(map[string]sets.String, len(results)) + targetShardsByWorkflow := make(map[string]sets.Set[string], len(results)) maxVReplicationLagByWorkflow := make(map[string]float64, len(results)) // We guarantee the following invariants when this function is called for a @@ -491,8 +491,8 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows } workflowsMap[workflowName] = workflow - sourceShardsByWorkflow[workflowName] = sets.NewString() - targetShardsByWorkflow[workflowName] = sets.NewString() + sourceShardsByWorkflow[workflowName] = sets.New[string]() + targetShardsByWorkflow[workflowName] = sets.New[string]() } scanWorkflowWg.Add(1) @@ -677,12 +677,12 @@ ORDER BY workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{ Keyspace: sourceKeyspace, - Shards: sourceShards.List(), + Shards: sets.List(sourceShards), } workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{ Keyspace: targetKeyspace, - Shards: targetShards.List(), + Shards: sets.List(targetShards), } workflow.MaxVReplicationLag = int64(maxVReplicationLag) diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 6464e796a00..0ed928391fe 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -308,7 +308,7 @@ func getVReplicationWorkflowSubType(row sqltypes.RowNamedValues) binlogdatapb.VR // this function should be unexported. Consequently, YOU SHOULD NOT DEPEND ON // THIS FUNCTION EXTERNALLY. func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInfo, ts *topo.Server) error { - shardSet := sets.NewString() + shardSet := sets.New[string]() for _, si := range shards { shardSet.Insert(si.ShardName()) } @@ -318,19 +318,19 @@ func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInf return err } - topoShardSet := sets.NewString(topoShards...) + topoShardSet := sets.New[string](topoShards...) if !shardSet.Equal(topoShardSet) { wfExtra := shardSet.Difference(topoShardSet) topoExtra := topoShardSet.Difference(shardSet) var rec concurrency.AllErrorRecorder if wfExtra.Len() > 0 { - wfExtraSorted := wfExtra.List() + wfExtraSorted := sets.List(wfExtra) rec.RecordError(fmt.Errorf("switch command shards not in topo: %v", wfExtraSorted)) } if topoExtra.Len() > 0 { - topoExtraSorted := topoExtra.List() + topoExtraSorted := sets.List(topoExtra) rec.RecordError(fmt.Errorf("topo shards not in switch command: %v", topoExtraSorted)) } diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index f4e1f702794..74e7f3e084e 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -307,7 +307,7 @@ func getBuildTags(buildTags map[string]string, skipTagsCSV string) (map[string]s } } - skippedTags := sets.NewString() + skippedTags := sets.New[string]() for tag := range buildTags { for _, skipFn := range skippers { if skipFn(tag) { diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index bcf942fd6d4..dcc9af3e489 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -94,7 +94,7 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st // EmergencyReparentShard will make the provided tablet the primary for // the shard, when the old primary is completely unreachable. -func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.String, preventCrossCellPromotion bool) (err error) { +func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.Set[string], preventCrossCellPromotion bool) (err error) { _, err = reparentutil.NewEmergencyReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, keyspace, diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 8d80fa7d68f..955782d5ff3 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -281,7 +281,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { defer moreAdvancedReplica.StopActionLoop(t) // run EmergencyReparentShard - err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.NewString(), false) + err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.New[string](), false) cancel() assert.NoError(t, err) diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index d29a438b8fe..0f30d35008c 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -573,8 +573,8 @@ func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) ( ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() var sourceKeyspace string - sourceShards := sets.NewString() - targetShards := sets.NewString() + sourceShards := sets.New[string]() + targetShards := sets.New[string]() for primary, result := range results { var rsrStatus []*ReplicationStatus nqr := sqltypes.Proto3ToResult(result).Named() @@ -641,11 +641,11 @@ func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) ( } rsr.SourceLocation = ReplicationLocation{ Keyspace: sourceKeyspace, - Shards: sourceShards.List(), + Shards: sets.List(sourceShards), } rsr.TargetLocation = ReplicationLocation{ Keyspace: keyspace, - Shards: targetShards.List(), + Shards: sets.List(targetShards), } return &rsr, nil @@ -668,7 +668,7 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ if err != nil { return nil, err } - workflowsSet := sets.NewString() + workflowsSet := sets.New[string]() for _, result := range results { if len(result.Rows) == 0 { continue @@ -681,7 +681,7 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ } } } - workflows := workflowsSet.List() + workflows := sets.List(workflowsSet) return workflows, nil }