Skip to content

Commit

Permalink
Merge branch 'master' into add-sql-require-primary-key
Browse files Browse the repository at this point in the history
* master: (27 commits)
  executor: parallel cancel mpp query (pingcap#36161)
  store/copr: adjust the cop cache admission process time for paging (pingcap#36157)
  log-backup: get can restored global-checkpoint-ts when support v3 checkpoint advance (pingcap#36197)
  executor: optimize cursor read point get by reading through pessimistic lock cache (pingcap#36149)
  *: add tidb_min_paging_size system variable (pingcap#36107)
  planner: handle the expected row count for pushed-down selection in mpp (pingcap#36195)
  *: support show ddl jobs for sub-jobs (pingcap#36168)
  table-filter: optimize table pattern message and unit tests (pingcap#36160)
  domain: fix unstable test TestAbnormalSessionPool (pingcap#36154)
  executor: check the error returned by `handleNoDelay` (pingcap#36105)
  log-backup: fix checkpoint display (pingcap#36166)
  store/mockstore/unistore: fix several issues of coprocessor paging in unistore (pingcap#36147)
  test: refactor restart test (pingcap#36174)
  ddl: support rename index and columns for multi-schema change (pingcap#36148)
  test: remove meaningless test and update bazel (pingcap#36136)
  planner: Reduce verbosity of logging unknown system variables (pingcap#36013)
  metrics/grafana: bring back the plan cache miss panel (pingcap#36081)
  ddl: implement table granularity DDL for SchemaTracker (pingcap#36077)
  *: bazel use jdk 17 (pingcap#36070)
  telemetry: add reviewer rule (pingcap#36084)
  ...
  • Loading branch information
joycse06 committed Jul 13, 2022
2 parents a3fbeb0 + 81cf12e commit 0e25f45
Show file tree
Hide file tree
Showing 134 changed files with 5,228 additions and 1,334 deletions.
5 changes: 5 additions & 0 deletions .bazelrc
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
startup --host_jvm_args=-Xmx8g
startup --unlimit_coredumps

build --java_language_version=17
build --java_runtime_version=17
build --tool_java_language_version=17
build --tool_java_runtime_version=17

run --color=yes
build:release --workspace_status_command=./build/print-workspace-status.sh --stamp
build:release --config=ci
Expand Down
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
/sessionctx/variable @pingcap/tidb-configuration-reviewer
/config/config.toml.example @pingcap/tidb-configuration-reviewer
/session/bootstrap.go @pingcap/tidb-configuration-reviewer
/telemetry/ @pingcap/telemetry-reviewer
1 change: 1 addition & 0 deletions br/cmd/br/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ go_library(
"//br/pkg/redact",
"//br/pkg/restore",
"//br/pkg/rtree",
"//br/pkg/streamhelper/config",
"//br/pkg/summary",
"//br/pkg/task",
"//br/pkg/trace",
Expand Down
24 changes: 24 additions & 0 deletions br/cmd/br/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package main

import (
"github.com/pingcap/errors"
advancercfg "github.com/pingcap/tidb/br/pkg/streamhelper/config"
"github.com/pingcap/tidb/br/pkg/task"
"github.com/pingcap/tidb/br/pkg/trace"
"github.com/pingcap/tidb/br/pkg/utils"
Expand Down Expand Up @@ -49,6 +50,7 @@ func NewStreamCommand() *cobra.Command {
newStreamStatusCommand(),
newStreamTruncateCommand(),
newStreamCheckCommand(),
newStreamAdvancerCommand(),
)
command.SetHelpFunc(func(command *cobra.Command, strings []string) {
task.HiddenFlagsForStream(command.Root().PersistentFlags())
Expand Down Expand Up @@ -157,6 +159,21 @@ func newStreamCheckCommand() *cobra.Command {
return command
}

func newStreamAdvancerCommand() *cobra.Command {
command := &cobra.Command{
Use: "advancer",
Short: "Start a central worker for advancing the checkpoint. (only for debuging, this subcommand should be integrated to TiDB)",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return streamCommand(cmd, task.StreamCtl)
},
Hidden: true,
}
task.DefineStreamCommonFlags(command.Flags())
advancercfg.DefineFlagsForCheckpointAdvancerConfig(command.Flags())
return command
}

func streamCommand(command *cobra.Command, cmdName string) error {
var cfg task.StreamConfig
var err error
Expand Down Expand Up @@ -192,6 +209,13 @@ func streamCommand(command *cobra.Command, cmdName string) error {
if err = cfg.ParseStreamPauseFromFlags(command.Flags()); err != nil {
return errors.Trace(err)
}
case task.StreamCtl:
if err = cfg.ParseStreamCommonFromFlags(command.Flags()); err != nil {
return errors.Trace(err)
}
if err = cfg.AdvancerCfg.GetFromFlags(command.Flags()); err != nil {
return errors.Trace(err)
}
default:
if err = cfg.ParseStreamCommonFromFlags(command.Flags()); err != nil {
return errors.Trace(err)
Expand Down
3 changes: 1 addition & 2 deletions br/pkg/conn/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,15 @@ go_library(
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_pingcap_kvproto//pkg/brpb",
"@com_github_pingcap_kvproto//pkg/logbackuppb",
"@com_github_pingcap_kvproto//pkg/metapb",
"@com_github_pingcap_log//:log",
"@com_github_tikv_client_go_v2//oracle",
"@com_github_tikv_client_go_v2//tikv",
"@com_github_tikv_client_go_v2//txnkv/txnlock",
"@com_github_tikv_pd_client//:client",
"@org_golang_google_grpc//:grpc",
"@org_golang_google_grpc//backoff",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//credentials",
"@org_golang_google_grpc//keepalive",
"@org_golang_google_grpc//status",
"@org_uber_go_zap//:zap",
Expand Down
214 changes: 23 additions & 191 deletions br/pkg/conn/conn.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,14 @@ import (
"fmt"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"

"github.com/docker/go-units"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
logbackup "github.com/pingcap/kvproto/pkg/logbackuppb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/log"
berrors "github.com/pingcap/tidb/br/pkg/errors"
Expand All @@ -35,9 +33,7 @@ import (
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/status"
)
Expand All @@ -49,83 +45,17 @@ const (

// DefaultMergeRegionKeyCount is the default region key count, 960000.
DefaultMergeRegionKeyCount uint64 = 960000

dialTimeout = 30 * time.Second

resetRetryTimes = 3
)

// Pool is a lazy pool of gRPC channels.
// When `Get` called, it lazily allocates new connection if connection not full.
// If it's full, then it will return allocated channels round-robin.
type Pool struct {
mu sync.Mutex

conns []*grpc.ClientConn
next int
cap int
newConn func(ctx context.Context) (*grpc.ClientConn, error)
}

func (p *Pool) takeConns() (conns []*grpc.ClientConn) {
p.mu.Lock()
defer p.mu.Unlock()
p.conns, conns = nil, p.conns
p.next = 0
return conns
}

// Close closes the conn pool.
func (p *Pool) Close() {
for _, c := range p.takeConns() {
if err := c.Close(); err != nil {
log.Warn("failed to close clientConn", zap.String("target", c.Target()), zap.Error(err))
}
}
}

// Get tries to get an existing connection from the pool, or make a new one if the pool not full.
func (p *Pool) Get(ctx context.Context) (*grpc.ClientConn, error) {
p.mu.Lock()
defer p.mu.Unlock()
if len(p.conns) < p.cap {
c, err := p.newConn(ctx)
if err != nil {
return nil, err
}
p.conns = append(p.conns, c)
return c, nil
}

conn := p.conns[p.next]
p.next = (p.next + 1) % p.cap
return conn, nil
}

// NewConnPool creates a new Pool by the specified conn factory function and capacity.
func NewConnPool(capacity int, newConn func(ctx context.Context) (*grpc.ClientConn, error)) *Pool {
return &Pool{
cap: capacity,
conns: make([]*grpc.ClientConn, 0, capacity),
newConn: newConn,

mu: sync.Mutex{},
}
}

// Mgr manages connections to a TiDB cluster.
type Mgr struct {
*pdutil.PdController
tlsConf *tls.Config
dom *domain.Domain
storage kv.Storage // Used to access SQL related interfaces.
tikvStore tikv.Storage // Used to access TiKV specific interfaces.
grpcClis struct {
mu sync.Mutex
clis map[uint64]*grpc.ClientConn
}
keepalive keepalive.ClientParameters
dom *domain.Domain
storage kv.Storage // Used to access SQL related interfaces.
tikvStore tikv.Storage // Used to access TiKV specific interfaces.
ownsStorage bool

*utils.StoreManager
}

// StoreBehavior is the action to do in GetAllTiKVStores when a non-TiKV
Expand Down Expand Up @@ -298,122 +228,31 @@ func NewMgr(
storage: storage,
tikvStore: tikvStorage,
dom: dom,
tlsConf: tlsConf,
ownsStorage: g.OwnsStorage(),
grpcClis: struct {
mu sync.Mutex
clis map[uint64]*grpc.ClientConn
}{clis: make(map[uint64]*grpc.ClientConn)},
keepalive: keepalive,
StoreManager: utils.NewStoreManager(controller.GetPDClient(), keepalive, tlsConf),
}
return mgr, nil
}

func (mgr *Mgr) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc.ClientConn, error) {
failpoint.Inject("hint-get-backup-client", func(v failpoint.Value) {
log.Info("failpoint hint-get-backup-client injected, "+
"process will notify the shell.", zap.Uint64("store", storeID))
if sigFile, ok := v.(string); ok {
file, err := os.Create(sigFile)
if err != nil {
log.Warn("failed to create file for notifying, skipping notify", zap.Error(err))
}
if file != nil {
file.Close()
}
}
time.Sleep(3 * time.Second)
})
store, err := mgr.GetPDClient().GetStore(ctx, storeID)
if err != nil {
return nil, errors.Trace(err)
}
opt := grpc.WithInsecure()
if mgr.tlsConf != nil {
opt = grpc.WithTransportCredentials(credentials.NewTLS(mgr.tlsConf))
}
ctx, cancel := context.WithTimeout(ctx, dialTimeout)
bfConf := backoff.DefaultConfig
bfConf.MaxDelay = time.Second * 3
addr := store.GetPeerAddress()
if addr == "" {
addr = store.GetAddress()
}
conn, err := grpc.DialContext(
ctx,
addr,
opt,
grpc.WithBlock(),
grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}),
grpc.WithKeepaliveParams(mgr.keepalive),
)
cancel()
if err != nil {
return nil, berrors.ErrFailedToConnect.Wrap(err).GenWithStack("failed to make connection to store %d", storeID)
}
return conn, nil
}

// GetBackupClient get or create a backup client.
func (mgr *Mgr) GetBackupClient(ctx context.Context, storeID uint64) (backuppb.BackupClient, error) {
if ctx.Err() != nil {
return nil, errors.Trace(ctx.Err())
}

mgr.grpcClis.mu.Lock()
defer mgr.grpcClis.mu.Unlock()

if conn, ok := mgr.grpcClis.clis[storeID]; ok {
// Find a cached backup client.
return backuppb.NewBackupClient(conn), nil
}

conn, err := mgr.getGrpcConnLocked(ctx, storeID)
if err != nil {
return nil, errors.Trace(err)
var cli backuppb.BackupClient
if err := mgr.WithConn(ctx, storeID, func(cc *grpc.ClientConn) {
cli = backuppb.NewBackupClient(cc)
}); err != nil {
return nil, err
}
// Cache the conn.
mgr.grpcClis.clis[storeID] = conn
return backuppb.NewBackupClient(conn), nil
return cli, nil
}

// ResetBackupClient reset the connection for backup client.
func (mgr *Mgr) ResetBackupClient(ctx context.Context, storeID uint64) (backuppb.BackupClient, error) {
if ctx.Err() != nil {
return nil, errors.Trace(ctx.Err())
}

mgr.grpcClis.mu.Lock()
defer mgr.grpcClis.mu.Unlock()

if conn, ok := mgr.grpcClis.clis[storeID]; ok {
// Find a cached backup client.
log.Info("Reset backup client", zap.Uint64("storeID", storeID))
err := conn.Close()
if err != nil {
log.Warn("close backup connection failed, ignore it", zap.Uint64("storeID", storeID))
}
delete(mgr.grpcClis.clis, storeID)
}
var (
conn *grpc.ClientConn
err error
)
for retry := 0; retry < resetRetryTimes; retry++ {
conn, err = mgr.getGrpcConnLocked(ctx, storeID)
if err != nil {
log.Warn("failed to reset grpc connection, retry it",
zap.Int("retry time", retry), logutil.ShortError(err))
time.Sleep(time.Duration(retry+3) * time.Second)
continue
}
mgr.grpcClis.clis[storeID] = conn
break
}
if err != nil {
return nil, errors.Trace(err)
func (mgr *Mgr) GetLogBackupClient(ctx context.Context, storeID uint64) (logbackup.LogBackupClient, error) {
var cli logbackup.LogBackupClient
if err := mgr.WithConn(ctx, storeID, func(cc *grpc.ClientConn) {
cli = logbackup.NewLogBackupClient(cc)
}); err != nil {
return nil, err
}
return backuppb.NewBackupClient(conn), nil
return cli, nil
}

// GetStorage returns a kv storage.
Expand All @@ -423,7 +262,7 @@ func (mgr *Mgr) GetStorage() kv.Storage {

// GetTLSConfig returns the tls config.
func (mgr *Mgr) GetTLSConfig() *tls.Config {
return mgr.tlsConf
return mgr.StoreManager.TLSConfig()
}

// GetLockResolver gets the LockResolver.
Expand All @@ -436,17 +275,10 @@ func (mgr *Mgr) GetDomain() *domain.Domain {
return mgr.dom
}

// Close closes all client in Mgr.
func (mgr *Mgr) Close() {
mgr.grpcClis.mu.Lock()
for _, cli := range mgr.grpcClis.clis {
err := cli.Close()
if err != nil {
log.Error("fail to close Mgr", zap.Error(err))
}
if mgr.StoreManager != nil {
mgr.StoreManager.Close()
}
mgr.grpcClis.mu.Unlock()

// Gracefully shutdown domain so it does not affect other TiDB DDL.
// Must close domain before closing storage, otherwise it gets stuck forever.
if mgr.ownsStorage {
Expand Down
4 changes: 3 additions & 1 deletion br/pkg/lightning/restore/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ go_library(
"//br/pkg/utils",
"//br/pkg/version",
"//br/pkg/version/build",
"//config",
"//kv",
"//meta/autoid",
"//parser",
Expand Down Expand Up @@ -109,10 +108,13 @@ go_test(
"//ddl",
"//errno",
"//kv",
"//meta",
"//meta/autoid",
"//parser",
"//parser/ast",
"//parser/model",
"//parser/mysql",
"//store/mockstore",
"//store/pdtypes",
"//table/tables",
"//types",
Expand Down
Loading

0 comments on commit 0e25f45

Please sign in to comment.