Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: use golang built-in functions rather than mathutil #56818

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion br/cmd/br/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ go_library(
"//pkg/util",
"//pkg/util/gctuner",
"//pkg/util/logutil",
"//pkg/util/mathutil",
"//pkg/util/memory",
"//pkg/util/metricsutil",
"//pkg/util/redact",
Expand Down
3 changes: 1 addition & 2 deletions br/cmd/br/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"github.com/pingcap/tidb/pkg/config"
tidbutils "github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/mathutil"
"github.com/pingcap/tidb/pkg/util/memory"
"github.com/pingcap/tidb/pkg/util/redact"
"github.com/pingcap/tidb/pkg/util/size"
Expand Down Expand Up @@ -205,7 +204,7 @@ func Init(cmd *cobra.Command) (err error) {
memlimit := calculateMemoryLimit(memleft)
// BR command needs 256 MiB at least, if the left memory is less than 256 MiB,
// the memory limit cannot limit anyway and then finally OOM.
memlimit = mathutil.Max(memlimit, quarterGiB)
memlimit = max(memlimit, quarterGiB)
log.Info("calculate the rest memory",
zap.Uint64("memtotal", memtotal), zap.Uint64("memused", memused), zap.Uint64("memlimit", memlimit))
// No need to set memory limit because the left memory is sufficient.
Expand Down
4 changes: 2 additions & 2 deletions br/pkg/stream/stream_metas.go
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@ func MergeMigrations(m1 *pb.Migration, m2 *pb.Migration) *pb.Migration {
out.EditMeta = mergeMetaEdits(m1.GetEditMeta(), m2.GetEditMeta())
out.Compactions = append(out.Compactions, m1.GetCompactions()...)
out.Compactions = append(out.Compactions, m2.GetCompactions()...)
out.TruncatedTo = mathutil.Max(m1.GetTruncatedTo(), m2.GetTruncatedTo())
out.TruncatedTo = max(m1.GetTruncatedTo(), m2.GetTruncatedTo())
out.DestructPrefix = append(out.DestructPrefix, m1.GetDestructPrefix()...)
out.DestructPrefix = append(out.DestructPrefix, m2.GetDestructPrefix()...)
return out
Expand Down Expand Up @@ -583,7 +583,7 @@ func (m MigrationExt) Load(ctx context.Context) (Migrations, error) {
if err != nil {
return errors.Annotate(err, "failed to get the truncate safepoint for base migration")
}
t.Content.TruncatedTo = mathutil.Max(truncatedTs, t.Content.TruncatedTo)
t.Content.TruncatedTo = max(truncatedTs, t.Content.TruncatedTo)
}
return t.Content.Unmarshal(b)
})
Expand Down
1 change: 0 additions & 1 deletion cmd/importer/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ go_library(
"//pkg/statistics/handle/storage",
"//pkg/statistics/handle/util",
"//pkg/types",
"//pkg/util/mathutil",
"@com_github_burntsushi_toml//:toml",
"@com_github_go_sql_driver_mysql//:mysql",
"@com_github_pingcap_errors//:errors",
Expand Down
6 changes: 2 additions & 4 deletions cmd/importer/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ import (
"math/rand"
"sync"
"time"

"github.com/pingcap/tidb/pkg/util/mathutil"
)

type datum struct {
Expand Down Expand Up @@ -75,8 +73,8 @@ func (d *datum) nextInt64() int64 {
defer d.Unlock()

if d.useRange {
d.intValue = mathutil.Min(d.intValue, d.maxIntValue)
d.intValue = mathutil.Max(d.intValue, d.minIntValue)
d.intValue = min(d.intValue, d.maxIntValue)
d.intValue = max(d.intValue, d.minIntValue)
}
d.updateRemains()
return d.intValue
Expand Down
1 change: 0 additions & 1 deletion pkg/autoid_service/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ go_library(
"//pkg/owner",
"//pkg/util/etcd",
"//pkg/util/logutil",
"//pkg/util/mathutil",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_pingcap_kvproto//pkg/autoid",
Expand Down
13 changes: 6 additions & 7 deletions pkg/autoid_service/autoid.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ import (
"github.com/pingcap/tidb/pkg/owner"
"github.com/pingcap/tidb/pkg/util/etcd"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/mathutil"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
"google.golang.org/grpc"
Expand Down Expand Up @@ -98,7 +97,7 @@ func (alloc *autoIDValue) alloc4Unsigned(ctx context.Context, store kv.Storage,
if nextStep < n1 {
nextStep = n1
}
tmpStep := int64(mathutil.Min(math.MaxUint64-uint64(newBase), uint64(nextStep)))
tmpStep := int64(min(math.MaxUint64-uint64(newBase), uint64(nextStep)))
// The global rest is not enough for alloc.
if tmpStep < n1 {
return errAutoincReadFailed
Expand Down Expand Up @@ -174,7 +173,7 @@ func (alloc *autoIDValue) alloc4Signed(ctx context.Context,
if nextStep < n1 {
nextStep = n1
}
tmpStep := mathutil.Min(math.MaxInt64-newBase, nextStep)
tmpStep := min(math.MaxInt64-newBase, nextStep)
// The global rest is not enough for alloc.
if tmpStep < n1 {
return errAutoincReadFailed
Expand Down Expand Up @@ -229,8 +228,8 @@ func (alloc *autoIDValue) rebase4Unsigned(ctx context.Context,
}
oldValue = currentEnd
uCurrentEnd := uint64(currentEnd)
newBase = mathutil.Max(uCurrentEnd, requiredBase)
newEnd = mathutil.Min(math.MaxUint64-uint64(batch), newBase) + uint64(batch)
newBase = max(uCurrentEnd, requiredBase)
newEnd = min(math.MaxUint64-uint64(batch), newBase) + uint64(batch)
_, err1 = idAcc.Inc(int64(newEnd - uCurrentEnd))
return err1
})
Expand Down Expand Up @@ -270,8 +269,8 @@ func (alloc *autoIDValue) rebase4Signed(ctx context.Context, store kv.Storage, d
return err1
}
oldValue = currentEnd
newBase = mathutil.Max(currentEnd, requiredBase)
newEnd = mathutil.Min(math.MaxInt64-batch, newBase) + batch
newBase = max(currentEnd, requiredBase)
newEnd = min(math.MaxInt64-batch, newBase) + batch
_, err1 = idAcc.Inc(newEnd - currentEnd)
return err1
})
Expand Down
7 changes: 3 additions & 4 deletions pkg/ddl/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ import (
"github.com/pingcap/tidb/pkg/util/dbterror/exeerrors"
"github.com/pingcap/tidb/pkg/util/domainutil"
"github.com/pingcap/tidb/pkg/util/generic"
"github.com/pingcap/tidb/pkg/util/mathutil"
"github.com/pingcap/tidb/pkg/util/stringutil"
"github.com/tikv/client-go/v2/oracle"
pdhttp "github.com/tikv/pd/client/http"
Expand Down Expand Up @@ -2119,7 +2118,7 @@ func adjustNewBaseToNextGlobalID(ctx table.AllocatorContext, t table.Table, tp a
// If the user sends SQL `alter table t1 auto_increment = 100` to TiDB-B,
// and TiDB-B finds 100 < 30001 but returns without any handling,
// then TiDB-A may still allocate 99 for auto_increment column. This doesn't make sense for the user.
return int64(mathutil.Max(uint64(newBase), uint64(autoID))), nil
return int64(max(uint64(newBase), uint64(autoID))), nil
}

// ShardRowID shards the implicit row ID by adding shard value to the row ID's first few bits.
Expand Down Expand Up @@ -2378,12 +2377,12 @@ func getReplacedPartitionIDs(names []string, pi *model.PartitionInfo) (firstPart
if firstPartIdx == -1 {
firstPartIdx = partIdx
} else {
firstPartIdx = mathutil.Min[int](firstPartIdx, partIdx)
firstPartIdx = min(firstPartIdx, partIdx)
}
if lastPartIdx == -1 {
lastPartIdx = partIdx
} else {
lastPartIdx = mathutil.Max[int](lastPartIdx, partIdx)
lastPartIdx = max(lastPartIdx, partIdx)
}
}
switch pi.Type {
Expand Down
11 changes: 5 additions & 6 deletions pkg/ddl/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ import (
"github.com/pingcap/tidb/pkg/util/collate"
"github.com/pingcap/tidb/pkg/util/dbterror"
"github.com/pingcap/tidb/pkg/util/hack"
"github.com/pingcap/tidb/pkg/util/mathutil"
decoder "github.com/pingcap/tidb/pkg/util/rowDecoder"
"github.com/pingcap/tidb/pkg/util/slice"
"github.com/pingcap/tidb/pkg/util/stringutil"
Expand Down Expand Up @@ -1912,7 +1911,7 @@ func formatListPartitionValue(ctx expression.BuildContext, tblInfo *model.TableI

haveDefault := false
exprStrs := make([]string, 0)
inValueStrs := make([]string, 0, mathutil.Max(len(pi.Columns), 1))
inValueStrs := make([]string, 0, max(len(pi.Columns), 1))
for i := range defs {
inValuesLoop:
for j, vs := range defs[i].InValues {
Expand Down Expand Up @@ -2938,9 +2937,9 @@ func (w *worker) onExchangeTablePartition(jobCtx *jobContext, job *model.Job) (v
// TODO: Fix the issue of big transactions during EXCHANGE PARTITION with AutoID.
// Similar to https://github.com/pingcap/tidb/issues/46904
newAutoIDs := model.AutoIDGroup{
RowID: mathutil.Max(ptAutoIDs.RowID, ntAutoIDs.RowID),
IncrementID: mathutil.Max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID),
RandomID: mathutil.Max(ptAutoIDs.RandomID, ntAutoIDs.RandomID),
RowID: max(ptAutoIDs.RowID, ntAutoIDs.RowID),
IncrementID: max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID),
RandomID: max(ptAutoIDs.RandomID, ntAutoIDs.RandomID),
}
err = metaMut.GetAutoIDAccessors(ptSchemaID, pt.ID).Put(newAutoIDs)
if err != nil {
Expand Down Expand Up @@ -3779,7 +3778,7 @@ func newReorgPartitionWorker(i int, t table.PhysicalTable, decodeColMap map[int6
}
}
writeColOffsetMap[id] = offset
maxOffset = mathutil.Max[int](maxOffset, offset)
maxOffset = max(maxOffset, offset)
}
return &reorgPartitionWorker{
backfillCtx: bCtx,
Expand Down
9 changes: 4 additions & 5 deletions pkg/ddl/sanity_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"github.com/pingcap/tidb/pkg/parser/ast"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/util/intest"
"github.com/pingcap/tidb/pkg/util/mathutil"
"go.uber.org/zap"
)

Expand Down Expand Up @@ -125,7 +124,7 @@ func expectedDeleteRangeCnt(ctx delRangeCntCtx, job *model.Job) (int, error) {

ret := 0
for _, arg := range args.IndexArgs {
num := mathutil.Max(len(args.PartitionIDs), 1) // Add temporary index to del-range table.
num := max(len(args.PartitionIDs), 1) // Add temporary index to del-range table.
if arg.IsGlobal {
num = 1 // Global index only has one del-range.
}
Expand All @@ -144,21 +143,21 @@ func expectedDeleteRangeCnt(ctx delRangeCntCtx, job *model.Job) (int, error) {
if args.IndexArgs[0].IsVector {
return 0, nil
}
return mathutil.Max(len(args.PartitionIDs), 1), nil
return max(len(args.PartitionIDs), 1), nil
case model.ActionDropColumn:
args, err := model.GetTableColumnArgs(job)
if err != nil {
return 0, errors.Trace(err)
}

physicalCnt := mathutil.Max(len(args.PartitionIDs), 1)
physicalCnt := max(len(args.PartitionIDs), 1)
return physicalCnt * len(args.IndexIDs), nil
case model.ActionModifyColumn:
args, err := model.GetFinishedModifyColumnArgs(job)
if err != nil {
return 0, errors.Trace(err)
}
physicalCnt := mathutil.Max(len(args.PartitionIDs), 1)
physicalCnt := max(len(args.PartitionIDs), 1)
return physicalCnt * ctx.deduplicateIdxCnt(args.IndexIDs), nil
case model.ActionMultiSchemaChange:
totalExpectedCnt := 0
Expand Down
5 changes: 2 additions & 3 deletions pkg/ddl/schema_version.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"github.com/pingcap/tidb/pkg/meta"
"github.com/pingcap/tidb/pkg/meta/model"
"github.com/pingcap/tidb/pkg/metrics"
"github.com/pingcap/tidb/pkg/util/mathutil"
"go.uber.org/zap"
)

Expand Down Expand Up @@ -208,7 +207,7 @@ func SetSchemaDiffForReorganizePartition(diff *model.SchemaDiff, job *model.Job,
droppedIDs, addedIDs := args.OldPhysicalTblIDs, args.NewPartitionIDs
if len(addedIDs) > 0 {
// to use AffectedOpts we need both new and old to have the same length
maxParts := mathutil.Max[int](len(droppedIDs), len(addedIDs))
maxParts := max(len(droppedIDs), len(addedIDs))
// Also initialize them to 0!
oldIDs := make([]int64, maxParts)
copy(oldIDs, droppedIDs)
Expand All @@ -231,7 +230,7 @@ func SetSchemaDiffForPartitionModify(diff *model.SchemaDiff, job *model.Job, job
droppedIDs, addedIDs := args.OldPhysicalTblIDs, args.NewPartitionIDs
if len(addedIDs) > 0 {
// to use AffectedOpts we need both new and old to have the same length
maxParts := mathutil.Max[int](len(droppedIDs), len(addedIDs))
maxParts := max(len(droppedIDs), len(addedIDs))
// Also initialize them to 0!
oldIDs := make([]int64, maxParts)
copy(oldIDs, droppedIDs)
Expand Down
2 changes: 1 addition & 1 deletion pkg/ddl/sequence.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func handleSequenceOptions(seqOptions []*ast.SequenceOption, sequenceInfo *model
sequenceInfo.MinValue = model.DefaultPositiveSequenceMinValue
}
if !startSetFlag {
sequenceInfo.Start = mathutil.Max(sequenceInfo.MinValue, model.DefaultPositiveSequenceStartValue)
sequenceInfo.Start = max(sequenceInfo.MinValue, model.DefaultPositiveSequenceStartValue)
}
if !maxSetFlag {
sequenceInfo.MaxValue = model.DefaultPositiveSequenceMaxValue
Expand Down
1 change: 0 additions & 1 deletion pkg/ddl/tests/partition/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ go_test(
"//pkg/util/codec",
"//pkg/util/dbterror",
"//pkg/util/logutil",
"//pkg/util/mathutil",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_stretchr_testify//assert",
Expand Down
3 changes: 1 addition & 2 deletions pkg/ddl/tests/partition/reorg_partition_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ import (
"github.com/pingcap/tidb/pkg/testkit/external"
"github.com/pingcap/tidb/pkg/testkit/testfailpoint"
"github.com/pingcap/tidb/pkg/util/dbterror"
"github.com/pingcap/tidb/pkg/util/mathutil"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
Expand Down Expand Up @@ -67,7 +66,7 @@ func noNewTablesAfter(t *testing.T, tk *testkit.TestKit, ctx sessionctx.Context,
defs := pt.Meta().Partition.Definitions
{
for i := range defs {
tblID = mathutil.Max[int64](tblID, defs[i].ID)
tblID = max(tblID, defs[i].ID)
}
}
}
Expand Down
1 change: 0 additions & 1 deletion pkg/executor/importer/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ go_library(
"//pkg/util/filter",
"//pkg/util/intest",
"//pkg/util/logutil",
"//pkg/util/mathutil",
"//pkg/util/promutil",
"//pkg/util/sqlexec",
"//pkg/util/sqlkiller",
Expand Down
3 changes: 1 addition & 2 deletions pkg/executor/importer/table_import.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ import (
"github.com/pingcap/tidb/pkg/table/tables"
tidbutil "github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/etcd"
"github.com/pingcap/tidb/pkg/util/mathutil"
"github.com/pingcap/tidb/pkg/util/promutil"
"github.com/pingcap/tidb/pkg/util/sqlexec"
"github.com/pingcap/tidb/pkg/util/sqlkiller"
Expand Down Expand Up @@ -897,7 +896,7 @@ func checksumTable(ctx context.Context, se sessionctx.Context, plan *Plan, logge
logger.Warn("set tidb_backoff_weight failed", zap.Error(err))
}

newConcurrency := mathutil.Max(plan.DistSQLScanConcurrency/distSQLScanConcurrencyFactor, local.MinDistSQLScanConcurrency)
newConcurrency := max(plan.DistSQLScanConcurrency/distSQLScanConcurrencyFactor, local.MinDistSQLScanConcurrency)
logger.Info("checksum with adjusted distsql scan concurrency", zap.Int("concurrency", newConcurrency))
se.GetSessionVars().SetDistSQLScanConcurrency(newConcurrency)

Expand Down
3 changes: 1 addition & 2 deletions pkg/expression/aggregation/avg.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"github.com/pingcap/tidb/pkg/sessionctx/stmtctx"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/mathutil"
)

type avgFunction struct {
Expand Down Expand Up @@ -88,7 +87,7 @@ func (af *avgFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) {
if frac == -1 {
frac = mysql.MaxDecimalScale
}
err = to.Round(to, mathutil.Min(frac, mysql.MaxDecimalScale), types.ModeHalfUp)
err = to.Round(to, min(frac, mysql.MaxDecimalScale), types.ModeHalfUp)
terror.Log(err)
d.SetMysqlDecimal(to)
}
Expand Down
3 changes: 1 addition & 2 deletions pkg/expression/aggregation/base_func.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"github.com/pingcap/tidb/pkg/planner/cascades/base"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/mathutil"
"github.com/pingcap/tidb/pkg/util/size"
)

Expand Down Expand Up @@ -246,7 +245,7 @@ func (a *baseFuncDesc) typeInfer4Sum(ctx expression.EvalContext) {
// compatible with mysql.
func (a *baseFuncDesc) TypeInfer4AvgSum(avgRetType *types.FieldType) {
if avgRetType.GetType() == mysql.TypeNewDecimal {
a.RetTp.SetFlen(mathutil.Min(mysql.MaxDecimalWidth, a.RetTp.GetFlen()+22))
a.RetTp.SetFlen(min(mysql.MaxDecimalWidth, a.RetTp.GetFlen()+22))
}
}

Expand Down
Loading