From 78e3e228e2e78fa9ce39d23446510e7440663526 Mon Sep 17 00:00:00 2001 From: Lingyu Song Date: Tue, 8 Sep 2020 20:15:51 +0800 Subject: [PATCH 1/4] cherry pick #19425 to release-4.0 Signed-off-by: ti-srebot --- ddl/db_integration_test.go | 2 +- ddl/rollingback.go | 13 + domain/domain_test.go | 5 +- executor/executor_test.go | 27 +- executor/show.go | 2 +- executor/trace.go | 4 +- expression/integration_test.go | 14 +- go.mod | 20 + go.sum | 41 + kv/error_test.go | 2 +- metrics/server.go | 4 +- planner/core/errors_test.go | 2 +- privilege/privileges/cache.go | 2 +- server/conn.go | 4 +- sessionctx/variable/sysvar_test.go | 2 +- store/mockstore/mocktikv/cop_handler_dag.go | 4 +- .../unistore/cophandler/closure_exec.go | 877 ++++++++++++++++++ .../unistore/cophandler/cop_handler.go | 418 +++++++++ structure/structure_test.go | 2 +- table/table_test.go | 35 +- tablecodec/tablecodec_test.go | 2 +- types/errors_test.go | 2 +- util/admin/admin_test.go | 2 +- util/memory/tracker_test.go | 3 +- util/testkit/testkit.go | 2 +- 25 files changed, 1434 insertions(+), 57 deletions(-) create mode 100644 store/mockstore/unistore/cophandler/closure_exec.go create mode 100644 store/mockstore/unistore/cophandler/cop_handler.go diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index d9a0ab1f6fd89..9c469db3a12db 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -272,7 +272,7 @@ func (s *testIntegrationSuite2) TestIssue6101(c *C) { tk.MustExec("create table t1 (quantity decimal(2) unsigned);") _, err := tk.Exec("insert into t1 values (500), (-500), (~0), (-1);") terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(errno.ErrWarnDataOutOfRange)) + c.Assert(terr.Code(), Equals, errors.ErrCode(errno.ErrWarnDataOutOfRange)) tk.MustExec("drop table t1") tk.MustExec("set sql_mode=''") diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 8c9de46e1abac..b529834eb7526 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -18,6 +18,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/util/logutil" @@ -326,6 +327,18 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) } if err != nil { +<<<<<<< HEAD +======= + if job.Error == nil { + job.Error = toTError(err) + } + if !job.Error.Equal(errCancelledDDLJob) { + job.Error = terror.GetErrClass(job.Error).Synthesize(terror.ErrCode(job.Error.Code()), + fmt.Sprintf("DDL job rollback, error msg: %s", terror.ToSQLError(job.Error).Message)) + } + job.ErrorCount++ + +>>>>>>> 449587a... *: using standard error to replace terror (#19425) if job.State != model.JobStateRollingback && job.State != model.JobStateCancelled { logutil.Logger(w.logCtx).Error("[ddl] run DDL job failed", zap.String("job", job.String()), zap.Error(err)) } else { diff --git a/domain/domain_test.go b/domain/domain_test.go index 1a2618d550c5a..b065c86e52428 100644 --- a/domain/domain_test.go +++ b/domain/domain_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/errno" @@ -458,6 +459,6 @@ func (*testSuite) TestSessionPool(c *C) { } func (*testSuite) TestErrorCode(c *C) { - c.Assert(int(ErrInfoSchemaExpired.ToSQLError().Code), Equals, errno.ErrInfoSchemaExpired) - c.Assert(int(ErrInfoSchemaChanged.ToSQLError().Code), Equals, errno.ErrInfoSchemaChanged) + c.Assert(int(terror.ToSQLError(ErrInfoSchemaExpired).Code), Equals, errno.ErrInfoSchemaExpired) + c.Assert(int(terror.ToSQLError(ErrInfoSchemaChanged).Code), Equals, errno.ErrInfoSchemaChanged) } diff --git a/executor/executor_test.go b/executor/executor_test.go index d49d22c2d5adf..68ac0740fc986 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -1248,12 +1248,12 @@ func (s *testSuiteP2) TestUnion(c *C) { err := tk.ExecToErr("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp") c.Assert(err, NotNil) terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage)) err = tk.ExecToErr("select 1 from (select a from t order by a union all select a from t limit 1) tmp") c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage)) _, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1") c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err)) @@ -1630,23 +1630,23 @@ func (s *testSuiteP1) TestJSON(c *C) { _, err = tk.Exec(`create table test_bad_json(a json default '{}')`) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault)) _, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault)) _, err = tk.Exec(`create table test_bad_json(a text default 'world')`) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault)) // check json fields cannot be used as key. _, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrJSONUsedAsKey)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrJSONUsedAsKey)) // check CAST AS JSON. result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`) @@ -1744,7 +1744,7 @@ func (s *testSuiteP1) TestGeneratedColumnWrite(c *C) { if tt.err != 0 { c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt)) terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err), Commentf("sql is %v", tt.stmt)) + c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err), Commentf("sql is %v", tt.stmt)) } else { c.Assert(err, IsNil) } @@ -1915,7 +1915,7 @@ func (s *testSuiteP1) TestGeneratedColumnRead(c *C) { if tt.err != 0 { c.Assert(err, NotNil) terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err)) + c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err)) } else { c.Assert(err, IsNil) } @@ -3283,7 +3283,7 @@ func (s *testSuite) TestContainDotColumn(c *C) { tk.MustExec("drop table if exists t3") _, err := tk.Exec("create table t3(s.a char);") terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongTableName)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongTableName)) } func (s *testSuite) TestCheckIndex(c *C) { @@ -4201,7 +4201,7 @@ func (s *testSuiteP2) TestSplitRegion(c *C) { _, err := tk.Exec(`split table t index idx1 by ("abcd");`) c.Assert(err, NotNil) terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.WarnDataTruncated)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.WarnDataTruncated)) // Test for split index region. // Check min value is more than max value. @@ -5944,7 +5944,14 @@ func (s *testSplitTable) TestKillTableReader(c *C) { wg.Add(1) go func() { defer wg.Done() +<<<<<<< HEAD c.Assert(int(errors.Cause(tk.QueryToErr("select * from t")).(*terror.Error).ToSQLError().Code), Equals, int(executor.ErrQueryInterrupted.Code())) +======= + time.Sleep(1 * time.Second) + err := tk.QueryToErr("select * from t") + c.Assert(err, NotNil) + c.Assert(int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code), Equals, int(executor.ErrQueryInterrupted.Code())) +>>>>>>> 449587a... *: using standard error to replace terror (#19425) }() time.Sleep(1 * time.Second) atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 1) diff --git a/executor/show.go b/executor/show.go index dea0ead1f0e42..305f3172a4ec5 100644 --- a/executor/show.go +++ b/executor/show.go @@ -1338,7 +1338,7 @@ func (e *ShowExec) fetchShowWarnings(errOnly bool) error { warn := errors.Cause(w.Err) switch x := warn.(type) { case *terror.Error: - sqlErr := x.ToSQLError() + sqlErr := terror.ToSQLError(x) e.appendRow([]interface{}{w.Level, int64(sqlErr.Code), sqlErr.Message}) default: e.appendRow([]interface{}{w.Level, int64(mysql.ErrUnknown), warn.Error()}) diff --git a/executor/trace.go b/executor/trace.go index 3d0e787db5b55..bf9150f357081 100644 --- a/executor/trace.go +++ b/executor/trace.go @@ -137,7 +137,7 @@ func (e *TraceExec) executeChild(ctx context.Context, se sqlexec.SQLExecutor) { if err != nil { var errCode uint16 if te, ok := err.(*terror.Error); ok { - errCode = te.ToSQLError().Code + errCode = terror.ToSQLError(te).Code } logutil.Eventf(ctx, "execute with error(%d): %s", errCode, err.Error()) } else { @@ -161,7 +161,7 @@ func drainRecordSet(ctx context.Context, sctx sessionctx.Context, rs sqlexec.Rec if err != nil { var errCode uint16 if te, ok := err.(*terror.Error); ok { - errCode = te.ToSQLError().Code + errCode = terror.ToSQLError(te).Code } logutil.Eventf(ctx, "execute with error(%d): %s", errCode, err.Error()) } else { diff --git a/expression/integration_test.go b/expression/integration_test.go index fec3cd1e035ee..6784b3a410ab9 100755 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -466,7 +466,7 @@ func (s *testIntegrationSuite2) TestMathBuiltin(c *C) { _, err = session.GetRows4Test(ctx, tk.Se, rs) c.Assert(err, NotNil) terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrDataOutOfRange)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrDataOutOfRange)) c.Assert(rs.Close(), IsNil) //for exp @@ -479,7 +479,7 @@ func (s *testIntegrationSuite2) TestMathBuiltin(c *C) { _, err = session.GetRows4Test(ctx, tk.Se, rs) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrDataOutOfRange)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrDataOutOfRange)) c.Assert(rs.Close(), IsNil) tk.MustExec("drop table if exists t") tk.MustExec("create table t(a float)") @@ -489,7 +489,7 @@ func (s *testIntegrationSuite2) TestMathBuiltin(c *C) { _, err = session.GetRows4Test(ctx, tk.Se, rs) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrDataOutOfRange)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrDataOutOfRange)) c.Assert(err.Error(), Equals, "[types:1690]DOUBLE value is out of range in 'exp(test.t.a)'") c.Assert(rs.Close(), IsNil) @@ -529,7 +529,7 @@ func (s *testIntegrationSuite2) TestMathBuiltin(c *C) { _, err = session.GetRows4Test(ctx, tk.Se, rs) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrDataOutOfRange)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrDataOutOfRange)) c.Assert(rs.Close(), IsNil) // for round @@ -608,7 +608,7 @@ func (s *testIntegrationSuite2) TestMathBuiltin(c *C) { _, err = session.GetRows4Test(ctx, tk.Se, rs) c.Assert(err, NotNil) terr = errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrDataOutOfRange)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrDataOutOfRange)) c.Assert(rs.Close(), IsNil) // for sign @@ -1226,7 +1226,7 @@ func (s *testIntegrationSuite2) TestEncryptionBuiltin(c *C) { _, err = session.GetRows4Test(ctx, tk.Se, rs) c.Assert(err, NotNil, Commentf("%v", len)) terr := errors.Cause(err).(*terror.Error) - c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrDataOutOfRange), Commentf("%v", len)) + c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrDataOutOfRange), Commentf("%v", len)) c.Assert(rs.Close(), IsNil) } tk.MustQuery("SELECT RANDOM_BYTES('1');") @@ -3691,7 +3691,7 @@ func (s *testIntegrationSuite) TestAggregationBuiltinGroupConcat(c *C) { tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning 1260 Some rows were cut by GROUPCONCAT(test.t.a)")) _, err := tk.Exec("insert into d select group_concat(a) from t") - c.Assert(errors.Cause(err).(*terror.Error).Code(), Equals, terror.ErrCode(mysql.ErrCutValueGroupConcat)) + c.Assert(errors.Cause(err).(*terror.Error).Code(), Equals, errors.ErrCode(mysql.ErrCutValueGroupConcat)) tk.Exec("set sql_mode=''") tk.MustExec("insert into d select group_concat(a) from t") diff --git a/go.mod b/go.mod index fd6a888d0b1c8..7587a39c94755 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,8 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 github.com/klauspost/cpuid v1.2.1 + github.com/mattn/go-colorable v0.1.7 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef github.com/opentracing/basictracer-go v1.0.0 @@ -32,7 +34,11 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200818080353-7aaed8998596 github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 +<<<<<<< HEAD github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11 +======= + github.com/pingcap/parser v0.0.0-20200908111137-8157d6307003 +>>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.6-0.20200828085514-03575b185007+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 @@ -44,20 +50,34 @@ require ( github.com/soheilhy/cmux v0.1.4 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 +<<<<<<< HEAD github.com/tikv/pd v1.1.0-beta.0.20200907080620-6830f5bb92a2 +======= + github.com/tikv/pd v1.1.0-beta.0.20200820084926-bcfa77a7a593 + github.com/twmb/murmur3 v1.1.3 +>>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/uber-go/atomic v1.3.2 github.com/uber/jaeger-client-go v2.22.1+incompatible go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.uber.org/atomic v1.6.0 go.uber.org/automaxprocs v1.2.0 +<<<<<<< HEAD go.uber.org/zap v1.15.0 +======= + go.uber.org/zap v1.16.0 +>>>>>>> 449587a... *: using standard error to replace terror (#19425) golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sys v0.0.0-20200819171115-d785dc25833f golang.org/x/text v0.3.3 golang.org/x/tools v0.0.0-20200820010801-b793a1359eac +<<<<<<< HEAD +======= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect +>>>>>>> 449587a... *: using standard error to replace terror (#19425) google.golang.org/grpc v1.26.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/yaml.v2 v2.3.0 // indirect sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 ) diff --git a/go.sum b/go.sum index fe45ac05adff4..409d3d832b7f9 100644 --- a/go.sum +++ b/go.sum @@ -423,6 +423,13 @@ github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +<<<<<<< HEAD +======= +github.com/pingcap/errors v0.11.5-0.20200729012136-4e113ddee29e h1:/EGWHNOyEgizEBuAujWsb9vXrPZtt1b7ooDPyjEkjDw= +github.com/pingcap/errors v0.11.5-0.20200729012136-4e113ddee29e/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= +github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd h1:ay+wAVWHI/Z6vIik13hsK+FT9ZCNSPBElGr0qgiZpjg= +github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= +>>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/pingcap/errors v0.11.5-0.20200902104258-eba4f1d8f6de h1:mW8hC2yXTpflfyTeJgcN4aJQfwcYODde8YgjBgAy6do= github.com/pingcap/errors v0.11.5-0.20200902104258-eba4f1d8f6de/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= @@ -457,12 +464,24 @@ github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 h1:Jboj+s4jSCp5E1WDgmR github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/parser v0.0.0-20200424075042-8222d8b724a4/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/parser v0.0.0-20200507022230-f3bf29096657/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +<<<<<<< HEAD github.com/pingcap/parser v0.0.0-20200603032439-c4ecb4508d2f/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/parser v0.0.0-20200623164729-3a18f1e5dceb/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200803072748-fdf66528323d/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200901062802-475ea5e2e0a7/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11 h1:l269UdOdRK4NGoldejKY+9Tor7XmHU+XOuS+VQDwcH8= github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +======= +github.com/pingcap/parser v0.0.0-20200518090819-ec1e13b948b1/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200522094936-3b720a0512a6/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200609110328-c65941b9fbb3/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200623082809-b74301ac298b/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200730092557-34a468e9b774/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200731033026-84f62115187c/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200813083329-a4bff035d3e2/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200908111137-8157d6307003 h1:HozkZZaBPDYLMHPkK6vy4W0gNytzml+UwxaDxkJrr34= +github.com/pingcap/parser v0.0.0-20200908111137-8157d6307003/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= +>>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.5-0.20200817114353-e465cafe8a91 h1:zCOWP+kIzM6ZsXdu2QoM/W6+3vFZj04MYboMP2Obc0E= @@ -586,8 +605,15 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +<<<<<<< HEAD github.com/tikv/pd v1.1.0-beta.0.20200907080620-6830f5bb92a2 h1:cC5v/gn9NdcmAlpBrWI5x3MiYmQcW2k7EHccg8837p4= github.com/tikv/pd v1.1.0-beta.0.20200907080620-6830f5bb92a2/go.mod h1:6OYi62ks7nFIBtWWpOjnngr5LNos4Hvi1BzArCWAlBc= +======= +github.com/tikv/pd v1.1.0-beta.0.20200818122340-ef1a4e920b2f h1:MI6OpYRLt041T2uONJzG4BwvVp12sLQ1UVCQuDv1bpw= +github.com/tikv/pd v1.1.0-beta.0.20200818122340-ef1a4e920b2f/go.mod h1:mwZ3Lip1YXgtgBx6blADUPMxrqPGCfwABlreDzuJul8= +github.com/tikv/pd v1.1.0-beta.0.20200820084926-bcfa77a7a593 h1:e5nPFsAfRPe8ybHMKOpRiQ6G3akdpAec1++3UAK5Ny0= +github.com/tikv/pd v1.1.0-beta.0.20200820084926-bcfa77a7a593/go.mod h1:quwjWtCmawAvS+YdxtSKG08sEexLzkhQgAno59wW+lI= +>>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= @@ -609,6 +635,7 @@ github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2t github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= @@ -619,6 +646,12 @@ github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Y github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +<<<<<<< HEAD +======= +github.com/zhangjinpeng1987/raft v0.0.0-20190624145930-deeb32d6553d/go.mod h1:1KDQ09J8MRHEtHze4at7BJZDW/doUAgkJ8w9KjEUhSo= +github.com/zhangjinpeng1987/raft v0.0.0-20200819064223-df31bb68a018 h1:T3OrqVdcH6z6SakR7WkECvGpdkfB0MAur/6zf66GPxQ= +github.com/zhangjinpeng1987/raft v0.0.0-20200819064223-df31bb68a018/go.mod h1:rTSjwgeYU2on64W50csWDlhyy0x9UYVYJUovHlYdt5s= +>>>>>>> 449587a... *: using standard error to replace terror (#19425) go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -699,6 +732,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +<<<<<<< HEAD +======= +golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +>>>>>>> 449587a... *: using standard error to replace terror (#19425) golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -771,6 +808,10 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +<<<<<<< HEAD +======= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +>>>>>>> 449587a... *: using standard error to replace terror (#19425) golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200819171115-d785dc25833f h1:KJuwZVtZBVzDmEDtB2zro9CXkD9O0dpCv4o2LHbQIAw= golang.org/x/sys v0.0.0-20200819171115-d785dc25833f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/kv/error_test.go b/kv/error_test.go index a5a608ac4849e..4fd73a0fdf013 100644 --- a/kv/error_test.go +++ b/kv/error_test.go @@ -37,7 +37,7 @@ func (s testErrorSuite) TestError(c *C) { ErrWriteConflictInTiDB, } for _, err := range kvErrs { - code := err.ToSQLError().Code + code := terror.ToSQLError(err).Code c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) } } diff --git a/metrics/server.go b/metrics/server.go index 250d35b68e061..c753f9612638b 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -14,8 +14,6 @@ package metrics import ( - "strconv" - "github.com/pingcap/errors" "github.com/pingcap/parser/terror" "github.com/prometheus/client_golang/prometheus" @@ -170,7 +168,7 @@ func ExecuteErrorToLabel(err error) string { err = errors.Cause(err) switch x := err.(type) { case *terror.Error: - return x.Class().String() + ":" + strconv.Itoa(int(x.Code())) + return string(x.RFCCode()) default: return "unknown" } diff --git a/planner/core/errors_test.go b/planner/core/errors_test.go index bce2a3563978d..2f17297484fe4 100644 --- a/planner/core/errors_test.go +++ b/planner/core/errors_test.go @@ -84,7 +84,7 @@ func (s testErrorSuite) TestError(c *C) { ErrAmbiguous, } for _, err := range kvErrs { - code := err.ToSQLError().Code + code := terror.ToSQLError(err).Code c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) } } diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go index 9d7e2b4404fab..62284687fb929 100644 --- a/privilege/privileges/cache.go +++ b/privilege/privileges/cache.go @@ -337,7 +337,7 @@ func (p *MySQLPrivilege) LoadAll(ctx sessionctx.Context) error { func noSuchTable(err error) bool { e1 := errors.Cause(err) if e2, ok := e1.(*terror.Error); ok { - if e2.Code() == terror.ErrCode(mysql.ErrNoSuchTable) { + if terror.ErrCode(e2.Code()) == terror.ErrCode(mysql.ErrNoSuchTable) { return true } } diff --git a/server/conn.go b/server/conn.go index 0f541376f8781..406c90b9a746a 100644 --- a/server/conn.go +++ b/server/conn.go @@ -1008,12 +1008,12 @@ func (cc *clientConn) writeError(ctx context.Context, e error) error { ) originErr := errors.Cause(e) if te, ok = originErr.(*terror.Error); ok { - m = te.ToSQLError() + m = terror.ToSQLError(te) } else { e := errors.Cause(originErr) switch y := e.(type) { case *terror.Error: - m = y.ToSQLError() + m = terror.ToSQLError(y) default: m = mysql.NewErrf(mysql.ErrUnknown, "%s", e.Error()) } diff --git a/sessionctx/variable/sysvar_test.go b/sessionctx/variable/sysvar_test.go index eaf309db70f52..51c6eba70134a 100644 --- a/sessionctx/variable/sysvar_test.go +++ b/sessionctx/variable/sysvar_test.go @@ -89,6 +89,6 @@ func (*testSysVarSuite) TestError(c *C) { ErrUnsupportedIsolationLevel, } for _, err := range kvErrs { - c.Assert(err.ToSQLError().Code != mysql.ErrUnknown, IsTrue) + c.Assert(terror.ToSQLError(err).Code != mysql.ErrUnknown, IsTrue) } } diff --git a/store/mockstore/mocktikv/cop_handler_dag.go b/store/mockstore/mocktikv/cop_handler_dag.go index 086c797ee093f..6692fd59011a1 100644 --- a/store/mockstore/mocktikv/cop_handler_dag.go +++ b/store/mockstore/mocktikv/cop_handler_dag.go @@ -814,14 +814,14 @@ func toPBError(err error) *tipb.Error { perr := new(tipb.Error) switch x := err.(type) { case *terror.Error: - sqlErr := x.ToSQLError() + sqlErr := terror.ToSQLError(x) perr.Code = int32(sqlErr.Code) perr.Msg = sqlErr.Message default: e := errors.Cause(err) switch y := e.(type) { case *terror.Error: - tmp := y.ToSQLError() + tmp := terror.ToSQLError(y) perr.Code = int32(tmp.Code) perr.Msg = tmp.Message default: diff --git a/store/mockstore/unistore/cophandler/closure_exec.go b/store/mockstore/unistore/cophandler/closure_exec.go new file mode 100644 index 0000000000000..e0d72ed421ef7 --- /dev/null +++ b/store/mockstore/unistore/cophandler/closure_exec.go @@ -0,0 +1,877 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cophandler + +import ( + "bytes" + "fmt" + "math" + "sort" + + "github.com/juju/errors" + "github.com/ngaut/unistore/tikv/dbreader" + "github.com/ngaut/unistore/tikv/mvcc" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + mockpkg "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/rowcodec" + "github.com/pingcap/tipb/go-tipb" +) + +const chunkMaxRows = 1024 + +const ( + pkColNotExists = iota + pkColIsSigned + pkColIsUnsigned + pkColIsCommon +) + +func mapPkStatusToHandleStatus(pkStatus int) tablecodec.HandleStatus { + switch pkStatus { + case pkColNotExists: + return tablecodec.HandleNotNeeded + case pkColIsCommon | pkColIsSigned: + return tablecodec.HandleDefault + case pkColIsUnsigned: + return tablecodec.HandleIsUnsigned + } + return tablecodec.HandleDefault +} + +// buildClosureExecutor build a closureExecutor for the DAGRequest. +// Currently the composition of executors are: +// tableScan|indexScan [selection] [topN | limit | agg] +func buildClosureExecutor(dagCtx *dagContext, dagReq *tipb.DAGRequest) (*closureExecutor, error) { + ce, err := newClosureExecutor(dagCtx, dagReq) + if err != nil { + return nil, errors.Trace(err) + } + executors := dagReq.Executors + scanExec := executors[0] + if scanExec.Tp == tipb.ExecType_TypeTableScan { + ce.processor = &tableScanProcessor{closureExecutor: ce} + } else { + ce.processor = &indexScanProcessor{closureExecutor: ce} + } + if len(executors) == 1 { + return ce, nil + } + if secondExec := executors[1]; secondExec.Tp == tipb.ExecType_TypeSelection { + ce.selectionCtx.conditions, err = convertToExprs(ce.sc, ce.fieldTps, secondExec.Selection.Conditions) + if err != nil { + return nil, errors.Trace(err) + } + ce.processor = &selectionProcessor{closureExecutor: ce} + } + lastExecutor := executors[len(executors)-1] + switch lastExecutor.Tp { + case tipb.ExecType_TypeLimit: + ce.limit = int(lastExecutor.Limit.Limit) + case tipb.ExecType_TypeTopN: + err = buildTopNProcessor(ce, lastExecutor.TopN) + case tipb.ExecType_TypeAggregation: + err = buildHashAggProcessor(ce, dagCtx, lastExecutor.Aggregation) + case tipb.ExecType_TypeStreamAgg: + err = buildStreamAggProcessor(ce, dagCtx, executors) + case tipb.ExecType_TypeSelection: + ce.processor = &selectionProcessor{closureExecutor: ce} + default: + panic("unknown executor type " + lastExecutor.Tp.String()) + } + if err != nil { + return nil, err + } + return ce, nil +} + +func convertToExprs(sc *stmtctx.StatementContext, fieldTps []*types.FieldType, pbExprs []*tipb.Expr) ([]expression.Expression, error) { + exprs := make([]expression.Expression, 0, len(pbExprs)) + for _, expr := range pbExprs { + e, err := expression.PBToExpr(expr, fieldTps, sc) + if err != nil { + return nil, errors.Trace(err) + } + exprs = append(exprs, e) + } + return exprs, nil +} + +func newClosureExecutor(dagCtx *dagContext, dagReq *tipb.DAGRequest) (*closureExecutor, error) { + e := &closureExecutor{ + dagContext: dagCtx, + outputOff: dagReq.OutputOffsets, + startTS: dagCtx.startTS, + limit: math.MaxInt64, + } + seCtx := mockpkg.NewContext() + seCtx.GetSessionVars().StmtCtx = e.sc + e.seCtx = seCtx + executors := dagReq.Executors + scanExec := executors[0] + switch scanExec.Tp { + case tipb.ExecType_TypeTableScan: + tblScan := executors[0].TblScan + e.unique = true + e.scanCtx.desc = tblScan.Desc + case tipb.ExecType_TypeIndexScan: + idxScan := executors[0].IdxScan + e.unique = idxScan.GetUnique() + e.scanCtx.desc = idxScan.Desc + e.initIdxScanCtx(idxScan) + default: + panic(fmt.Sprintf("unknown first executor type %s", executors[0].Tp)) + } + ranges, err := extractKVRanges(dagCtx.dbReader.StartKey, dagCtx.dbReader.EndKey, dagCtx.keyRanges, e.scanCtx.desc) + if err != nil { + return nil, errors.Trace(err) + } + if dagReq.GetCollectRangeCounts() { + e.counts = make([]int64, len(ranges)) + } + e.kvRanges = ranges + e.scanCtx.chk = chunk.NewChunkWithCapacity(e.fieldTps, 32) + if e.idxScanCtx == nil { + e.scanCtx.decoder, err = e.evalContext.newRowDecoder() + if err != nil { + return nil, errors.Trace(err) + } + } + return e, nil +} + +func (e *closureExecutor) initIdxScanCtx(idxScan *tipb.IndexScan) { + e.idxScanCtx = new(idxScanCtx) + e.idxScanCtx.columnLen = len(e.columnInfos) + e.idxScanCtx.pkStatus = pkColNotExists + + e.idxScanCtx.primaryColumnIds = idxScan.PrimaryColumnIds + + lastColumn := e.columnInfos[len(e.columnInfos)-1] + + if len(e.idxScanCtx.primaryColumnIds) == 0 { + if lastColumn.GetPkHandle() { + if mysql.HasUnsignedFlag(uint(lastColumn.GetFlag())) { + e.idxScanCtx.pkStatus = pkColIsUnsigned + } else { + e.idxScanCtx.pkStatus = pkColIsSigned + } + e.idxScanCtx.columnLen-- + } else if lastColumn.ColumnId == model.ExtraHandleID { + e.idxScanCtx.pkStatus = pkColIsSigned + e.idxScanCtx.columnLen-- + } + } else { + e.idxScanCtx.pkStatus = pkColIsCommon + e.idxScanCtx.columnLen -= len(e.idxScanCtx.primaryColumnIds) + } + + colInfos := make([]rowcodec.ColInfo, len(e.columnInfos)) + for i := range colInfos { + col := e.columnInfos[i] + colInfos[i] = rowcodec.ColInfo{ + ID: col.ColumnId, + Ft: e.fieldTps[i], + IsPKHandle: col.GetPkHandle(), + } + } + e.idxScanCtx.colInfos = colInfos + + colIDs := make(map[int64]int, len(colInfos)) + for i, col := range colInfos[:e.idxScanCtx.columnLen] { + colIDs[col.ID] = i + } + e.scanCtx.newCollationIds = colIDs + + // We don't need to decode handle here, and colIDs >= 0 always. + e.scanCtx.newCollationRd = rowcodec.NewByteDecoder(colInfos[:e.idxScanCtx.columnLen], []int64{-1}, nil, nil) +} + +func isCountAgg(pbAgg *tipb.Aggregation) bool { + if len(pbAgg.AggFunc) == 1 && len(pbAgg.GroupBy) == 0 { + aggFunc := pbAgg.AggFunc[0] + if aggFunc.Tp == tipb.ExprType_Count && len(aggFunc.Children) == 1 { + return true + } + } + return false +} + +func tryBuildCountProcessor(e *closureExecutor, executors []*tipb.Executor) (bool, error) { + if len(executors) > 2 { + return false, nil + } + agg := executors[1].Aggregation + if !isCountAgg(agg) { + return false, nil + } + child := agg.AggFunc[0].Children[0] + switch child.Tp { + case tipb.ExprType_ColumnRef: + _, idx, err := codec.DecodeInt(child.Val) + if err != nil { + return false, errors.Trace(err) + } + e.aggCtx.col = e.columnInfos[idx] + if e.aggCtx.col.PkHandle { + e.processor = &countStarProcessor{skipVal: skipVal(true), closureExecutor: e} + } else { + e.processor = &countColumnProcessor{closureExecutor: e} + } + case tipb.ExprType_Null, tipb.ExprType_ScalarFunc: + return false, nil + default: + e.processor = &countStarProcessor{skipVal: skipVal(true), closureExecutor: e} + } + return true, nil +} + +func buildTopNProcessor(e *closureExecutor, topN *tipb.TopN) error { + heap, conds, err := getTopNInfo(e.evalContext, topN) + if err != nil { + return errors.Trace(err) + } + + ctx := &topNCtx{ + heap: heap, + orderByExprs: conds, + sortRow: e.newTopNSortRow(), + } + + e.topNCtx = ctx + e.processor = &topNProcessor{closureExecutor: e} + return nil +} + +func buildHashAggProcessor(e *closureExecutor, ctx *dagContext, agg *tipb.Aggregation) error { + aggs, groupBys, err := getAggInfo(ctx, agg) + if err != nil { + return err + } + e.processor = &hashAggProcessor{ + closureExecutor: e, + aggExprs: aggs, + groupByExprs: groupBys, + groups: map[string]struct{}{}, + groupKeys: nil, + aggCtxsMap: map[string][]*aggregation.AggEvaluateContext{}, + } + return nil +} + +func buildStreamAggProcessor(e *closureExecutor, ctx *dagContext, executors []*tipb.Executor) error { + ok, err := tryBuildCountProcessor(e, executors) + if err != nil || ok { + return err + } + return buildHashAggProcessor(e, ctx, executors[len(executors)-1].Aggregation) +} + +// closureExecutor is an execution engine that flatten the DAGRequest.Executors to a single closure `processor` that +// process key/value pairs. We can define many closures for different kinds of requests, try to use the specially +// optimized one for some frequently used query. +type closureExecutor struct { + *dagContext + outputOff []uint32 + seCtx sessionctx.Context + kvRanges []kv.KeyRange + startTS uint64 + ignoreLock bool + lockChecked bool + scanCtx scanCtx + idxScanCtx *idxScanCtx + selectionCtx selectionCtx + aggCtx aggCtx + topNCtx *topNCtx + + rowCount int + unique bool + limit int + + oldChunks []tipb.Chunk + oldRowBuf []byte + processor closureProcessor + + counts []int64 +} + +type closureProcessor interface { + dbreader.ScanProcessor + Finish() error +} + +type scanCtx struct { + count int + limit int + chk *chunk.Chunk + desc bool + decoder *rowcodec.ChunkDecoder + primaryColumnIds []int64 + + newCollationRd *rowcodec.BytesDecoder + newCollationIds map[int64]int +} + +type idxScanCtx struct { + pkStatus int + columnLen int + colInfos []rowcodec.ColInfo + primaryColumnIds []int64 +} + +type aggCtx struct { + col *tipb.ColumnInfo +} + +type selectionCtx struct { + conditions []expression.Expression +} + +type topNCtx struct { + heap *topNHeap + orderByExprs []expression.Expression + sortRow *sortRow +} + +func (e *closureExecutor) execute() ([]tipb.Chunk, error) { + err := e.checkRangeLock() + if err != nil { + return nil, errors.Trace(err) + } + dbReader := e.dbReader + for i, ran := range e.kvRanges { + if e.isPointGetRange(ran) { + val, err := dbReader.Get(ran.StartKey, e.startTS) + if err != nil { + return nil, errors.Trace(err) + } + if len(val) == 0 { + continue + } + if e.counts != nil { + e.counts[i]++ + } + err = e.processor.Process(ran.StartKey, val) + if err != nil { + return nil, errors.Trace(err) + } + } else { + oldCnt := e.rowCount + if e.scanCtx.desc { + err = dbReader.ReverseScan(ran.StartKey, ran.EndKey, math.MaxInt64, e.startTS, e.processor) + } else { + err = dbReader.Scan(ran.StartKey, ran.EndKey, math.MaxInt64, e.startTS, e.processor) + } + delta := int64(e.rowCount - oldCnt) + if e.counts != nil { + e.counts[i] += delta + } + if err != nil { + return nil, errors.Trace(err) + } + } + if e.rowCount == e.limit { + break + } + } + err = e.processor.Finish() + return e.oldChunks, err +} + +func (e *closureExecutor) isPointGetRange(ran kv.KeyRange) bool { + if len(e.primaryCols) > 0 { + return false + } + return e.unique && ran.IsPoint() +} + +func (e *closureExecutor) checkRangeLock() error { + if !e.ignoreLock && !e.lockChecked { + for _, ran := range e.kvRanges { + err := e.checkRangeLockForRange(ran) + if err != nil { + return err + } + } + e.lockChecked = true + } + return nil +} + +func (e *closureExecutor) checkRangeLockForRange(ran kv.KeyRange) error { + it := e.lockStore.NewIterator() + for it.Seek(ran.StartKey); it.Valid(); it.Next() { + if exceedEndKey(it.Key(), ran.EndKey) { + break + } + lock := mvcc.DecodeLock(it.Value()) + err := checkLock(lock, it.Key(), e.startTS, e.resolvedLocks) + if err != nil { + return err + } + } + return nil +} + +type countStarProcessor struct { + skipVal + *closureExecutor +} + +// countStarProcess is used for `count(*)`. +func (e *countStarProcessor) Process(key, value []byte) error { + e.rowCount++ + return nil +} + +func (e *countStarProcessor) Finish() error { + return e.countFinish() +} + +// countFinish is used for `count(*)`. +func (e *closureExecutor) countFinish() error { + d := types.NewIntDatum(int64(e.rowCount)) + rowData, err := codec.EncodeValue(e.sc, nil, d) + if err != nil { + return errors.Trace(err) + } + e.oldChunks = appendRow(e.oldChunks, rowData, 0) + return nil +} + +type countColumnProcessor struct { + skipVal + *closureExecutor +} + +func (e *countColumnProcessor) Process(key, value []byte) error { + if e.idxScanCtx != nil { + values, _, err := tablecodec.CutIndexKeyNew(key, e.idxScanCtx.columnLen) + if err != nil { + return errors.Trace(err) + } + if values[0][0] != codec.NilFlag { + e.rowCount++ + } + } else { + // Since the handle value doesn't affect the count result, we don't need to decode the handle. + isNull, err := e.scanCtx.decoder.ColumnIsNull(value, e.aggCtx.col.ColumnId, e.aggCtx.col.DefaultVal) + if err != nil { + return errors.Trace(err) + } + if !isNull { + e.rowCount++ + } + } + return nil +} + +func (e *countColumnProcessor) Finish() error { + return e.countFinish() +} + +type skipVal bool + +func (s skipVal) SkipValue() bool { + return bool(s) +} + +type tableScanProcessor struct { + skipVal + *closureExecutor +} + +func (e *tableScanProcessor) Process(key, value []byte) error { + if e.rowCount == e.limit { + return dbreader.ScanBreak + } + e.rowCount++ + err := e.tableScanProcessCore(key, value) + if e.scanCtx.chk.NumRows() == chunkMaxRows { + err = e.chunkToOldChunk(e.scanCtx.chk) + } + return err +} + +func (e *tableScanProcessor) Finish() error { + return e.scanFinish() +} + +func (e *closureExecutor) processCore(key, value []byte) error { + if e.idxScanCtx != nil { + return e.indexScanProcessCore(key, value) + } + return e.tableScanProcessCore(key, value) +} + +func (e *closureExecutor) hasSelection() bool { + return len(e.selectionCtx.conditions) > 0 +} + +func (e *closureExecutor) processSelection() (gotRow bool, err error) { + chk := e.scanCtx.chk + row := chk.GetRow(chk.NumRows() - 1) + gotRow = true + for _, expr := range e.selectionCtx.conditions { + wc := e.sc.WarningCount() + d, err := expr.Eval(row) + if err != nil { + return false, errors.Trace(err) + } + + if d.IsNull() { + gotRow = false + } else { + isBool, err := d.ToBool(e.sc) + isBool, err = expression.HandleOverflowOnSelection(e.sc, isBool, err) + if err != nil { + return false, errors.Trace(err) + } + gotRow = isBool != 0 + } + if !gotRow { + if e.sc.WarningCount() > wc { + // Deep-copy error object here, because the data it referenced is going to be truncated. + warns := e.sc.TruncateWarnings(int(wc)) + for i, warn := range warns { + warns[i].Err = e.copyError(warn.Err) + } + e.sc.AppendWarnings(warns) + } + chk.TruncateTo(chk.NumRows() - 1) + break + } + } + return +} + +func (e *closureExecutor) copyError(err error) error { + if err == nil { + return nil + } + var ret error + x := errors.Cause(err) + switch y := x.(type) { + case *terror.Error: + ret = terror.ToSQLError(y) + default: + ret = errors.New(err.Error()) + } + return ret +} + +func (e *closureExecutor) tableScanProcessCore(key, value []byte) error { + handle, err := tablecodec.DecodeRowKey(key) + if err != nil { + return errors.Trace(err) + } + err = e.scanCtx.decoder.DecodeToChunk(value, handle, e.scanCtx.chk) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (e *closureExecutor) scanFinish() error { + return e.chunkToOldChunk(e.scanCtx.chk) +} + +type indexScanProcessor struct { + skipVal + *closureExecutor +} + +func (e *indexScanProcessor) Process(key, value []byte) error { + if e.rowCount == e.limit { + return dbreader.ScanBreak + } + e.rowCount++ + err := e.indexScanProcessCore(key, value) + if e.scanCtx.chk.NumRows() == chunkMaxRows { + err = e.chunkToOldChunk(e.scanCtx.chk) + } + return err +} + +func (e *indexScanProcessor) Finish() error { + return e.scanFinish() +} + +func (e *closureExecutor) indexScanProcessCore(key, value []byte) error { + handleStatus := mapPkStatusToHandleStatus(e.idxScanCtx.pkStatus) + restoredCols := make([]rowcodec.ColInfo, 0, len(e.idxScanCtx.colInfos)) + for _, c := range e.idxScanCtx.colInfos { + if c.ID != -1 { + restoredCols = append(restoredCols, c) + } + } + values, err := tablecodec.DecodeIndexKV(key, value, e.idxScanCtx.columnLen, handleStatus, restoredCols) + if err != nil { + return err + } + chk := e.scanCtx.chk + decoder := codec.NewDecoder(chk, e.sc.TimeZone) + for i, colVal := range values { + if i < len(e.fieldTps) { + _, err = decoder.DecodeOne(colVal, i, e.fieldTps[i]) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +func (e *closureExecutor) chunkToOldChunk(chk *chunk.Chunk) error { + var oldRow []types.Datum + for i := 0; i < chk.NumRows(); i++ { + oldRow = oldRow[:0] + for _, outputOff := range e.outputOff { + d := chk.GetRow(i).GetDatum(int(outputOff), e.fieldTps[outputOff]) + oldRow = append(oldRow, d) + } + var err error + e.oldRowBuf, err = codec.EncodeValue(e.sc, e.oldRowBuf[:0], oldRow...) + if err != nil { + return errors.Trace(err) + } + e.oldChunks = appendRow(e.oldChunks, e.oldRowBuf, i) + } + chk.Reset() + return nil +} + +type selectionProcessor struct { + skipVal + *closureExecutor +} + +func (e *selectionProcessor) Process(key, value []byte) error { + if e.rowCount == e.limit { + return dbreader.ScanBreak + } + err := e.processCore(key, value) + if err != nil { + return errors.Trace(err) + } + gotRow, err := e.processSelection() + if err != nil { + return err + } + if gotRow { + e.rowCount++ + if e.scanCtx.chk.NumRows() == chunkMaxRows { + err = e.chunkToOldChunk(e.scanCtx.chk) + } + } + return err +} + +func (e *selectionProcessor) Finish() error { + return e.scanFinish() +} + +type topNProcessor struct { + skipVal + *closureExecutor +} + +func (e *topNProcessor) Process(key, value []byte) (err error) { + if err = e.processCore(key, value); err != nil { + return err + } + if e.hasSelection() { + gotRow, err1 := e.processSelection() + if err1 != nil || !gotRow { + return err1 + } + } + + ctx := e.topNCtx + row := e.scanCtx.chk.GetRow(0) + for i, expr := range ctx.orderByExprs { + d, err := expr.Eval(row) + if err != nil { + return errors.Trace(err) + } + d.Copy(&ctx.sortRow.key[i]) + } + e.scanCtx.chk.Reset() + + if ctx.heap.tryToAddRow(ctx.sortRow) { + ctx.sortRow.data[0] = safeCopy(key) + ctx.sortRow.data[1] = safeCopy(value) + ctx.sortRow = e.newTopNSortRow() + } + return errors.Trace(ctx.heap.err) +} + +func (e *closureExecutor) newTopNSortRow() *sortRow { + return &sortRow{ + key: make([]types.Datum, len(e.evalContext.columnInfos)), + data: make([][]byte, 2), + } +} + +func (e *topNProcessor) Finish() error { + ctx := e.topNCtx + sort.Sort(&ctx.heap.topNSorter) + chk := e.scanCtx.chk + for _, row := range ctx.heap.rows { + err := e.processCore(row.data[0], row.data[1]) + if err != nil { + return err + } + if chk.NumRows() == chunkMaxRows { + if err = e.chunkToOldChunk(chk); err != nil { + return errors.Trace(err) + } + } + } + return e.chunkToOldChunk(chk) +} + +type hashAggProcessor struct { + skipVal + *closureExecutor + + aggExprs []aggregation.Aggregation + groupByExprs []expression.Expression + groups map[string]struct{} + groupKeys [][]byte + aggCtxsMap map[string][]*aggregation.AggEvaluateContext +} + +func (e *hashAggProcessor) Process(key, value []byte) (err error) { + err = e.processCore(key, value) + if err != nil { + return err + } + if e.hasSelection() { + gotRow, err1 := e.processSelection() + if err1 != nil || !gotRow { + return err1 + } + } + row := e.scanCtx.chk.GetRow(e.scanCtx.chk.NumRows() - 1) + gk, err := e.getGroupKey(row) + if _, ok := e.groups[string(gk)]; !ok { + e.groups[string(gk)] = struct{}{} + e.groupKeys = append(e.groupKeys, gk) + } + // Update aggregate expressions. + aggCtxs := e.getContexts(gk) + for i, agg := range e.aggExprs { + err = agg.Update(aggCtxs[i], e.sc, row) + if err != nil { + return errors.Trace(err) + } + } + e.scanCtx.chk.Reset() + return nil +} + +func (e *hashAggProcessor) getGroupKey(row chunk.Row) ([]byte, error) { + length := len(e.groupByExprs) + if length == 0 { + return nil, nil + } + key := make([]byte, 0, 32) + for _, item := range e.groupByExprs { + v, err := item.Eval(row) + if err != nil { + return nil, errors.Trace(err) + } + b, err := codec.EncodeValue(e.sc, nil, v) + if err != nil { + return nil, errors.Trace(err) + } + key = append(key, b...) + } + return key, nil +} + +func (e *hashAggProcessor) getContexts(groupKey []byte) []*aggregation.AggEvaluateContext { + aggCtxs, ok := e.aggCtxsMap[string(groupKey)] + if !ok { + aggCtxs = make([]*aggregation.AggEvaluateContext, 0, len(e.aggExprs)) + for _, agg := range e.aggExprs { + aggCtxs = append(aggCtxs, agg.CreateContext(e.sc)) + } + e.aggCtxsMap[string(groupKey)] = aggCtxs + } + return aggCtxs +} + +func (e *hashAggProcessor) Finish() error { + for i, gk := range e.groupKeys { + aggCtxs := e.getContexts(gk) + e.oldRowBuf = e.oldRowBuf[:0] + for i, agg := range e.aggExprs { + partialResults := agg.GetPartialResult(aggCtxs[i]) + var err error + e.oldRowBuf, err = codec.EncodeValue(e.sc, e.oldRowBuf, partialResults...) + if err != nil { + return err + } + } + e.oldRowBuf = append(e.oldRowBuf, gk...) + e.oldChunks = appendRow(e.oldChunks, e.oldRowBuf, i) + } + return nil +} + +func safeCopy(b []byte) []byte { + return append([]byte{}, b...) +} + +func checkLock(lock mvcc.MvccLock, key []byte, startTS uint64, resolved []uint64) error { + if isResolved(startTS, resolved) { + return nil + } + lockVisible := lock.StartTS < startTS + isWriteLock := lock.Op == uint8(kvrpcpb.Op_Put) || lock.Op == uint8(kvrpcpb.Op_Del) + isPrimaryGet := startTS == math.MaxUint64 && bytes.Equal(lock.Primary, key) + if lockVisible && isWriteLock && !isPrimaryGet { + return BuildLockErr(key, lock.Primary, lock.StartTS, uint64(lock.TTL), lock.Op) + } + return nil +} + +func isResolved(startTS uint64, resolved []uint64) bool { + for _, v := range resolved { + if startTS == v { + return true + } + } + return false +} + +func exceedEndKey(current, endKey []byte) bool { + if len(endKey) == 0 { + return false + } + return bytes.Compare(current, endKey) >= 0 +} diff --git a/store/mockstore/unistore/cophandler/cop_handler.go b/store/mockstore/unistore/cophandler/cop_handler.go new file mode 100644 index 0000000000000..787f3c1264ef9 --- /dev/null +++ b/store/mockstore/unistore/cophandler/cop_handler.go @@ -0,0 +1,418 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cophandler + +import ( + "bytes" + "fmt" + "time" + + "github.com/golang/protobuf/proto" + "github.com/ngaut/unistore/lockstore" + "github.com/ngaut/unistore/tikv/dbreader" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/coprocessor" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/collate" + "github.com/pingcap/tidb/util/rowcodec" + "github.com/pingcap/tipb/go-tipb" +) + +// HandleCopRequest handles coprocessor request. +func HandleCopRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *coprocessor.Request) *coprocessor.Response { + switch req.Tp { + case kv.ReqTypeDAG: + return handleCopDAGRequest(dbReader, lockStore, req) + case kv.ReqTypeAnalyze: + return handleCopAnalyzeRequest(dbReader, req) + case kv.ReqTypeChecksum: + return handleCopChecksumRequest(dbReader, req) + } + return &coprocessor.Response{OtherError: fmt.Sprintf("unsupported request type %d", req.GetTp())} +} + +type dagContext struct { + *evalContext + dbReader *dbreader.DBReader + lockStore *lockstore.MemStore + resolvedLocks []uint64 + dagReq *tipb.DAGRequest + keyRanges []*coprocessor.KeyRange + startTS uint64 +} + +// handleCopDAGRequest handles coprocessor DAG request. +func handleCopDAGRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *coprocessor.Request) *coprocessor.Response { + startTime := time.Now() + resp := &coprocessor.Response{} + dagCtx, dagReq, err := buildDAG(dbReader, lockStore, req) + if err != nil { + resp.OtherError = err.Error() + return resp + } + closureExec, err := buildClosureExecutor(dagCtx, dagReq) + if err != nil { + return buildResp(nil, nil, dagReq, err, dagCtx.sc.GetWarnings(), time.Since(startTime)) + } + chunks, err := closureExec.execute() + return buildResp(chunks, closureExec.counts, dagReq, err, dagCtx.sc.GetWarnings(), time.Since(startTime)) +} + +func buildDAG(reader *dbreader.DBReader, lockStore *lockstore.MemStore, req *coprocessor.Request) (*dagContext, *tipb.DAGRequest, error) { + if len(req.Ranges) == 0 { + return nil, nil, errors.New("request range is null") + } + if req.GetTp() != kv.ReqTypeDAG { + return nil, nil, errors.Errorf("unsupported request type %d", req.GetTp()) + } + + dagReq := new(tipb.DAGRequest) + err := proto.Unmarshal(req.Data, dagReq) + if err != nil { + return nil, nil, errors.Trace(err) + } + sc := flagsToStatementContext(dagReq.Flags) + sc.TimeZone = time.FixedZone("UTC", int(dagReq.TimeZoneOffset)) + ctx := &dagContext{ + evalContext: &evalContext{sc: sc}, + dbReader: reader, + lockStore: lockStore, + dagReq: dagReq, + keyRanges: req.Ranges, + startTS: req.StartTs, + resolvedLocks: req.Context.ResolvedLocks, + } + scanExec := dagReq.Executors[0] + if scanExec.Tp == tipb.ExecType_TypeTableScan { + ctx.setColumnInfo(scanExec.TblScan.Columns) + ctx.primaryCols = scanExec.TblScan.PrimaryColumnIds + } else { + ctx.setColumnInfo(scanExec.IdxScan.Columns) + } + return ctx, dagReq, err +} + +func getAggInfo(ctx *dagContext, pbAgg *tipb.Aggregation) ([]aggregation.Aggregation, []expression.Expression, error) { + length := len(pbAgg.AggFunc) + aggs := make([]aggregation.Aggregation, 0, length) + var err error + for _, expr := range pbAgg.AggFunc { + var aggExpr aggregation.Aggregation + aggExpr, err = aggregation.NewDistAggFunc(expr, ctx.fieldTps, ctx.sc) + if err != nil { + return nil, nil, errors.Trace(err) + } + aggs = append(aggs, aggExpr) + } + groupBys, err := convertToExprs(ctx.sc, ctx.fieldTps, pbAgg.GetGroupBy()) + if err != nil { + return nil, nil, errors.Trace(err) + } + + return aggs, groupBys, nil +} + +func getTopNInfo(ctx *evalContext, topN *tipb.TopN) (heap *topNHeap, conds []expression.Expression, err error) { + pbConds := make([]*tipb.Expr, len(topN.OrderBy)) + for i, item := range topN.OrderBy { + pbConds[i] = item.Expr + } + heap = &topNHeap{ + totalCount: int(topN.Limit), + topNSorter: topNSorter{ + orderByItems: topN.OrderBy, + sc: ctx.sc, + }, + } + if conds, err = convertToExprs(ctx.sc, ctx.fieldTps, pbConds); err != nil { + return nil, nil, errors.Trace(err) + } + + return heap, conds, nil +} + +type evalContext struct { + colIDs map[int64]int + columnInfos []*tipb.ColumnInfo + fieldTps []*types.FieldType + primaryCols []int64 + sc *stmtctx.StatementContext +} + +func (e *evalContext) setColumnInfo(cols []*tipb.ColumnInfo) { + e.columnInfos = make([]*tipb.ColumnInfo, len(cols)) + copy(e.columnInfos, cols) + + e.colIDs = make(map[int64]int, len(e.columnInfos)) + e.fieldTps = make([]*types.FieldType, 0, len(e.columnInfos)) + for i, col := range e.columnInfos { + ft := fieldTypeFromPBColumn(col) + e.fieldTps = append(e.fieldTps, ft) + e.colIDs[col.GetColumnId()] = i + } +} + +func (e *evalContext) newRowDecoder() (*rowcodec.ChunkDecoder, error) { + var ( + pkCols []int64 + cols = make([]rowcodec.ColInfo, 0, len(e.columnInfos)) + ) + for i := range e.columnInfos { + info := e.columnInfos[i] + ft := e.fieldTps[i] + col := rowcodec.ColInfo{ + ID: info.ColumnId, + Ft: ft, + IsPKHandle: info.PkHandle, + } + cols = append(cols, col) + if info.PkHandle { + pkCols = append(pkCols, info.ColumnId) + } + } + if len(pkCols) == 0 { + if e.primaryCols != nil { + pkCols = e.primaryCols + } else { + pkCols = []int64{0} + } + } + def := func(i int, chk *chunk.Chunk) error { + info := e.columnInfos[i] + if info.PkHandle || len(info.DefaultVal) == 0 { + chk.AppendNull(i) + return nil + } + decoder := codec.NewDecoder(chk, e.sc.TimeZone) + _, err := decoder.DecodeOne(info.DefaultVal, i, e.fieldTps[i]) + if err != nil { + return err + } + return nil + } + return rowcodec.NewChunkDecoder(cols, pkCols, def, e.sc.TimeZone), nil +} + +// decodeRelatedColumnVals decodes data to Datum slice according to the row information. +func (e *evalContext) decodeRelatedColumnVals(relatedColOffsets []int, value [][]byte, row []types.Datum) error { + var err error + for _, offset := range relatedColOffsets { + row[offset], err = tablecodec.DecodeColumnValue(value[offset], e.fieldTps[offset], e.sc.TimeZone) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// flagsToStatementContext creates a StatementContext from a `tipb.SelectRequest.Flags`. +func flagsToStatementContext(flags uint64) *stmtctx.StatementContext { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = (flags & model.FlagIgnoreTruncate) > 0 + sc.TruncateAsWarning = (flags & model.FlagTruncateAsWarning) > 0 + sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 + sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 + sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 + sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 + sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 + sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 + return sc +} + +// ErrLocked is returned when trying to Read/Write on a locked key. Client should +// backoff or cleanup the lock then retry. +type ErrLocked struct { + Key []byte + Primary []byte + StartTS uint64 + TTL uint64 + LockType uint8 +} + +// BuildLockErr generates ErrKeyLocked objects +func BuildLockErr(key []byte, primaryKey []byte, startTS uint64, TTL uint64, lockType uint8) *ErrLocked { + errLocked := &ErrLocked{ + Key: key, + Primary: primaryKey, + StartTS: startTS, + TTL: TTL, + LockType: lockType, + } + return errLocked +} + +// Error formats the lock to a string. +func (e *ErrLocked) Error() string { + return fmt.Sprintf("key is locked, key: %q, Type: %v, primary: %q, startTS: %v", e.Key, e.LockType, e.Primary, e.StartTS) +} + +func buildResp(chunks []tipb.Chunk, counts []int64, dagReq *tipb.DAGRequest, err error, warnings []stmtctx.SQLWarn, dur time.Duration) *coprocessor.Response { + resp := &coprocessor.Response{} + selResp := &tipb.SelectResponse{ + Error: toPBError(err), + Chunks: chunks, + OutputCounts: counts, + } + if dagReq.CollectExecutionSummaries != nil && *dagReq.CollectExecutionSummaries { + execSummary := make([]*tipb.ExecutorExecutionSummary, len(dagReq.Executors)) + for i := range execSummary { + // TODO: Add real executor execution summary information. + execSummary[i] = &tipb.ExecutorExecutionSummary{} + } + selResp.ExecutionSummaries = execSummary + } + if len(warnings) > 0 { + selResp.Warnings = make([]*tipb.Error, 0, len(warnings)) + for i := range warnings { + selResp.Warnings = append(selResp.Warnings, toPBError(warnings[i].Err)) + } + } + if locked, ok := errors.Cause(err).(*ErrLocked); ok { + resp.Locked = &kvrpcpb.LockInfo{ + Key: locked.Key, + PrimaryLock: locked.Primary, + LockVersion: locked.StartTS, + LockTtl: locked.TTL, + } + } + resp.ExecDetails = &kvrpcpb.ExecDetails{ + HandleTime: &kvrpcpb.HandleTime{ProcessMs: int64(dur / time.Millisecond)}, + } + data, err := proto.Marshal(selResp) + if err != nil { + resp.OtherError = err.Error() + return resp + } + resp.Data = data + return resp +} + +func toPBError(err error) *tipb.Error { + if err == nil { + return nil + } + perr := new(tipb.Error) + e := errors.Cause(err) + switch y := e.(type) { + case *terror.Error: + tmp := terror.ToSQLError(y) + perr.Code = int32(tmp.Code) + perr.Msg = tmp.Message + case *mysql.SQLError: + perr.Code = int32(y.Code) + perr.Msg = y.Message + default: + perr.Code = int32(1) + perr.Msg = err.Error() + } + return perr +} + +// extractKVRanges extracts kv.KeyRanges slice from a SelectRequest. +func extractKVRanges(startKey, endKey []byte, keyRanges []*coprocessor.KeyRange, descScan bool) (kvRanges []kv.KeyRange, err error) { + kvRanges = make([]kv.KeyRange, 0, len(keyRanges)) + for _, kran := range keyRanges { + if bytes.Compare(kran.GetStart(), kran.GetEnd()) >= 0 { + err = errors.Errorf("invalid range, start should be smaller than end: %v %v", kran.GetStart(), kran.GetEnd()) + return + } + + upperKey := kran.GetEnd() + if bytes.Compare(upperKey, startKey) <= 0 { + continue + } + lowerKey := kran.GetStart() + if len(endKey) != 0 && bytes.Compare(lowerKey, endKey) >= 0 { + break + } + r := kv.KeyRange{ + StartKey: kv.Key(maxStartKey(lowerKey, startKey)), + EndKey: kv.Key(minEndKey(upperKey, endKey)), + } + kvRanges = append(kvRanges, r) + } + if descScan { + reverseKVRanges(kvRanges) + } + return +} + +func reverseKVRanges(kvRanges []kv.KeyRange) { + for i := 0; i < len(kvRanges)/2; i++ { + j := len(kvRanges) - i - 1 + kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i] + } +} + +func maxStartKey(rangeStartKey kv.Key, regionStartKey []byte) []byte { + if bytes.Compare([]byte(rangeStartKey), regionStartKey) > 0 { + return []byte(rangeStartKey) + } + return regionStartKey +} + +func minEndKey(rangeEndKey kv.Key, regionEndKey []byte) []byte { + if len(regionEndKey) == 0 || bytes.Compare([]byte(rangeEndKey), regionEndKey) < 0 { + return []byte(rangeEndKey) + } + return regionEndKey +} + +const rowsPerChunk = 64 + +func appendRow(chunks []tipb.Chunk, data []byte, rowCnt int) []tipb.Chunk { + if rowCnt%rowsPerChunk == 0 { + chunks = append(chunks, tipb.Chunk{}) + } + cur := &chunks[len(chunks)-1] + cur.RowsData = append(cur.RowsData, data...) + return chunks +} + +// fieldTypeFromPBColumn creates a types.FieldType from tipb.ColumnInfo. +func fieldTypeFromPBColumn(col *tipb.ColumnInfo) *types.FieldType { + return &types.FieldType{ + Tp: byte(col.GetTp()), + Flag: uint(col.Flag), + Flen: int(col.GetColumnLen()), + Decimal: int(col.GetDecimal()), + Elems: col.Elems, + Collate: mysql.Collations[uint8(collate.RestoreCollationIDIfNeeded(col.GetCollation()))], + } +} + +// handleCopChecksumRequest handles coprocessor check sum request. +func handleCopChecksumRequest(dbReader *dbreader.DBReader, req *coprocessor.Request) *coprocessor.Response { + resp := &tipb.ChecksumResponse{ + Checksum: 1, + TotalKvs: 1, + TotalBytes: 1, + } + data, err := resp.Marshal() + if err != nil { + return &coprocessor.Response{OtherError: fmt.Sprintf("marshal checksum response error: %v", err)} + } + return &coprocessor.Response{Data: data} +} diff --git a/structure/structure_test.go b/structure/structure_test.go index 62fdeb8f60159..53c7e0dbfb33e 100644 --- a/structure/structure_test.go +++ b/structure/structure_test.go @@ -406,7 +406,7 @@ func (*testTxStructureSuite) TestError(c *C) { structure.ErrWriteOnSnapshot, } for _, err := range kvErrs { - code := err.ToSQLError().Code + code := terror.ToSQLError(err).Code c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) } } diff --git a/table/table_test.go b/table/table_test.go index d3d0e5edd172c..edcfc7d3c2bc9 100644 --- a/table/table_test.go +++ b/table/table_test.go @@ -15,6 +15,7 @@ package table import ( . "github.com/pingcap/check" + "github.com/pingcap/parser/terror" mysql "github.com/pingcap/tidb/errno" ) @@ -30,21 +31,21 @@ func (t *testTableSuite) TestSlice(c *C) { } func (t *testTableSuite) TestErrorCode(c *C) { - c.Assert(int(ErrColumnCantNull.ToSQLError().Code), Equals, mysql.ErrBadNull) - c.Assert(int(ErrUnknownColumn.ToSQLError().Code), Equals, mysql.ErrBadField) - c.Assert(int(errDuplicateColumn.ToSQLError().Code), Equals, mysql.ErrFieldSpecifiedTwice) - c.Assert(int(errGetDefaultFailed.ToSQLError().Code), Equals, mysql.ErrFieldGetDefaultFailed) - c.Assert(int(ErrNoDefaultValue.ToSQLError().Code), Equals, mysql.ErrNoDefaultForField) - c.Assert(int(ErrIndexOutBound.ToSQLError().Code), Equals, mysql.ErrIndexOutBound) - c.Assert(int(ErrUnsupportedOp.ToSQLError().Code), Equals, mysql.ErrUnsupportedOp) - c.Assert(int(ErrRowNotFound.ToSQLError().Code), Equals, mysql.ErrRowNotFound) - c.Assert(int(ErrTableStateCantNone.ToSQLError().Code), Equals, mysql.ErrTableStateCantNone) - c.Assert(int(ErrColumnStateCantNone.ToSQLError().Code), Equals, mysql.ErrColumnStateCantNone) - c.Assert(int(ErrColumnStateNonPublic.ToSQLError().Code), Equals, mysql.ErrColumnStateNonPublic) - c.Assert(int(ErrIndexStateCantNone.ToSQLError().Code), Equals, mysql.ErrIndexStateCantNone) - c.Assert(int(ErrInvalidRecordKey.ToSQLError().Code), Equals, mysql.ErrInvalidRecordKey) - c.Assert(int(ErrTruncatedWrongValueForField.ToSQLError().Code), Equals, mysql.ErrTruncatedWrongValueForField) - c.Assert(int(ErrUnknownPartition.ToSQLError().Code), Equals, mysql.ErrUnknownPartition) - c.Assert(int(ErrNoPartitionForGivenValue.ToSQLError().Code), Equals, mysql.ErrNoPartitionForGivenValue) - c.Assert(int(ErrLockOrActiveTransaction.ToSQLError().Code), Equals, mysql.ErrLockOrActiveTransaction) + c.Assert(int(terror.ToSQLError(ErrColumnCantNull).Code), Equals, mysql.ErrBadNull) + c.Assert(int(terror.ToSQLError(ErrUnknownColumn).Code), Equals, mysql.ErrBadField) + c.Assert(int(terror.ToSQLError(errDuplicateColumn).Code), Equals, mysql.ErrFieldSpecifiedTwice) + c.Assert(int(terror.ToSQLError(errGetDefaultFailed).Code), Equals, mysql.ErrFieldGetDefaultFailed) + c.Assert(int(terror.ToSQLError(ErrNoDefaultValue).Code), Equals, mysql.ErrNoDefaultForField) + c.Assert(int(terror.ToSQLError(ErrIndexOutBound).Code), Equals, mysql.ErrIndexOutBound) + c.Assert(int(terror.ToSQLError(ErrUnsupportedOp).Code), Equals, mysql.ErrUnsupportedOp) + c.Assert(int(terror.ToSQLError(ErrRowNotFound).Code), Equals, mysql.ErrRowNotFound) + c.Assert(int(terror.ToSQLError(ErrTableStateCantNone).Code), Equals, mysql.ErrTableStateCantNone) + c.Assert(int(terror.ToSQLError(ErrColumnStateCantNone).Code), Equals, mysql.ErrColumnStateCantNone) + c.Assert(int(terror.ToSQLError(ErrColumnStateNonPublic).Code), Equals, mysql.ErrColumnStateNonPublic) + c.Assert(int(terror.ToSQLError(ErrIndexStateCantNone).Code), Equals, mysql.ErrIndexStateCantNone) + c.Assert(int(terror.ToSQLError(ErrInvalidRecordKey).Code), Equals, mysql.ErrInvalidRecordKey) + c.Assert(int(terror.ToSQLError(ErrTruncatedWrongValueForField).Code), Equals, mysql.ErrTruncatedWrongValueForField) + c.Assert(int(terror.ToSQLError(ErrUnknownPartition).Code), Equals, mysql.ErrUnknownPartition) + c.Assert(int(terror.ToSQLError(ErrNoPartitionForGivenValue).Code), Equals, mysql.ErrNoPartitionForGivenValue) + c.Assert(int(terror.ToSQLError(ErrLockOrActiveTransaction).Code), Equals, mysql.ErrLockOrActiveTransaction) } diff --git a/tablecodec/tablecodec_test.go b/tablecodec/tablecodec_test.go index 74ad57e626f60..b56c3cd6b9901 100644 --- a/tablecodec/tablecodec_test.go +++ b/tablecodec/tablecodec_test.go @@ -528,7 +528,7 @@ func (s *testTableCodecSuite) TestError(c *C) { errInvalidIndexKey, } for _, err := range kvErrs { - code := err.ToSQLError().Code + code := terror.ToSQLError(err).Code c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) } } diff --git a/types/errors_test.go b/types/errors_test.go index 6f7c92b613f3b..56aa43bfc9b98 100644 --- a/types/errors_test.go +++ b/types/errors_test.go @@ -51,7 +51,7 @@ func (s testErrorSuite) TestError(c *C) { ErrWrongValue, } for _, err := range kvErrs { - code := err.ToSQLError().Code + code := terror.ToSQLError(err).Code c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) } } diff --git a/util/admin/admin_test.go b/util/admin/admin_test.go index 28a6a8206fa93..235efbd99d829 100644 --- a/util/admin/admin_test.go +++ b/util/admin/admin_test.go @@ -382,7 +382,7 @@ func (s *testSuite) TestError(c *C) { ErrCannotCancelDDLJob, } for _, err := range kvErrs { - code := err.ToSQLError().Code + code := terror.ToSQLError(err).Code c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) } } diff --git a/util/memory/tracker_test.go b/util/memory/tracker_test.go index c6b4191595c77..f9f498cfee1ee 100644 --- a/util/memory/tracker_test.go +++ b/util/memory/tracker_test.go @@ -21,6 +21,7 @@ import ( "github.com/cznic/mathutil" . "github.com/pingcap/check" + "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/testleak" @@ -338,5 +339,5 @@ func BenchmarkConsume(b *testing.B) { } func (s *testSuite) TestErrorCode(c *C) { - c.Assert(int(errMemExceedThreshold.ToSQLError().Code), Equals, errno.ErrMemExceedThreshold) + c.Assert(int(terror.ToSQLError(errMemExceedThreshold).Code), Equals, errno.ErrMemExceedThreshold) } diff --git a/util/testkit/testkit.go b/util/testkit/testkit.go index 1ae061c3b0adc..00f12ced1c832 100644 --- a/util/testkit/testkit.go +++ b/util/testkit/testkit.go @@ -279,7 +279,7 @@ func (tk *TestKit) MustGetErrCode(sql string, errCode int) { originErr := errors.Cause(err) tErr, ok := originErr.(*terror.Error) tk.c.Assert(ok, check.IsTrue, check.Commentf("expect type 'terror.Error', but obtain '%T'", originErr)) - sqlErr := tErr.ToSQLError() + sqlErr := terror.ToSQLError(tErr) tk.c.Assert(int(sqlErr.Code), check.Equals, errCode, check.Commentf("Assertion failed, origin err:\n %v", sqlErr)) } From d30aa2ca510072a97256e5f66b0a57073375d45d Mon Sep 17 00:00:00 2001 From: imtbkcat Date: Tue, 8 Sep 2020 20:31:08 +0800 Subject: [PATCH 2/4] fix conflict --- ddl/rollingback.go | 13 - executor/executor_test.go | 9 +- go.mod | 22 +- go.sum | 40 +- .../unistore/cophandler/closure_exec.go | 877 ------------------ .../unistore/cophandler/cop_handler.go | 418 --------- 6 files changed, 7 insertions(+), 1372 deletions(-) delete mode 100644 store/mockstore/unistore/cophandler/closure_exec.go delete mode 100644 store/mockstore/unistore/cophandler/cop_handler.go diff --git a/ddl/rollingback.go b/ddl/rollingback.go index b529834eb7526..8c9de46e1abac 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -18,7 +18,6 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" - "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/util/logutil" @@ -327,18 +326,6 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) } if err != nil { -<<<<<<< HEAD -======= - if job.Error == nil { - job.Error = toTError(err) - } - if !job.Error.Equal(errCancelledDDLJob) { - job.Error = terror.GetErrClass(job.Error).Synthesize(terror.ErrCode(job.Error.Code()), - fmt.Sprintf("DDL job rollback, error msg: %s", terror.ToSQLError(job.Error).Message)) - } - job.ErrorCount++ - ->>>>>>> 449587a... *: using standard error to replace terror (#19425) if job.State != model.JobStateRollingback && job.State != model.JobStateCancelled { logutil.Logger(w.logCtx).Error("[ddl] run DDL job failed", zap.String("job", job.String()), zap.Error(err)) } else { diff --git a/executor/executor_test.go b/executor/executor_test.go index 68ac0740fc986..12e6507e447ec 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -5944,14 +5944,7 @@ func (s *testSplitTable) TestKillTableReader(c *C) { wg.Add(1) go func() { defer wg.Done() -<<<<<<< HEAD - c.Assert(int(errors.Cause(tk.QueryToErr("select * from t")).(*terror.Error).ToSQLError().Code), Equals, int(executor.ErrQueryInterrupted.Code())) -======= - time.Sleep(1 * time.Second) - err := tk.QueryToErr("select * from t") - c.Assert(err, NotNil) - c.Assert(int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code), Equals, int(executor.ErrQueryInterrupted.Code())) ->>>>>>> 449587a... *: using standard error to replace terror (#19425) + c.Assert(int(terror.ToSQLError(errors.Cause(tk.QueryToErr("select * from t")).(*terror.Error)).Code), Equals, int(executor.ErrQueryInterrupted.Code())) }() time.Sleep(1 * time.Second) atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 1) diff --git a/go.mod b/go.mod index 7587a39c94755..b756b8df2e379 100644 --- a/go.mod +++ b/go.mod @@ -20,8 +20,6 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 github.com/klauspost/cpuid v1.2.1 - github.com/mattn/go-colorable v0.1.7 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef github.com/opentracing/basictracer-go v1.0.0 @@ -34,11 +32,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200818080353-7aaed8998596 github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 -<<<<<<< HEAD github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11 -======= - github.com/pingcap/parser v0.0.0-20200908111137-8157d6307003 ->>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.6-0.20200828085514-03575b185007+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 @@ -50,36 +44,24 @@ require ( github.com/soheilhy/cmux v0.1.4 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 -<<<<<<< HEAD github.com/tikv/pd v1.1.0-beta.0.20200907080620-6830f5bb92a2 -======= - github.com/tikv/pd v1.1.0-beta.0.20200820084926-bcfa77a7a593 - github.com/twmb/murmur3 v1.1.3 ->>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/uber-go/atomic v1.3.2 github.com/uber/jaeger-client-go v2.22.1+incompatible go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.uber.org/atomic v1.6.0 go.uber.org/automaxprocs v1.2.0 -<<<<<<< HEAD - go.uber.org/zap v1.15.0 -======= go.uber.org/zap v1.16.0 ->>>>>>> 449587a... *: using standard error to replace terror (#19425) golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sys v0.0.0-20200819171115-d785dc25833f golang.org/x/text v0.3.3 golang.org/x/tools v0.0.0-20200820010801-b793a1359eac -<<<<<<< HEAD -======= - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect ->>>>>>> 449587a... *: using standard error to replace terror (#19425) google.golang.org/grpc v1.26.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 - gopkg.in/yaml.v2 v2.3.0 // indirect sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 ) go 1.13 + +replace github.com/pingcap/parser => github.com/imtbkcat/parser v0.0.0-20200908120703-5fc1c5049015 diff --git a/go.sum b/go.sum index 409d3d832b7f9..21ea238d5576f 100644 --- a/go.sum +++ b/go.sum @@ -271,6 +271,8 @@ github.com/hypnoglow/gormzap v0.3.0/go.mod h1:5Wom8B7Jl2oK0Im9hs6KQ+Kl92w4Y7gKCr github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imtbkcat/parser v0.0.0-20200908120703-5fc1c5049015 h1:LoUDOHCWv5XZw+Wz7ll5FxLTAw2bgxnXQihargJ2XxQ= +github.com/imtbkcat/parser v0.0.0-20200908120703-5fc1c5049015/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -423,13 +425,10 @@ github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -<<<<<<< HEAD -======= github.com/pingcap/errors v0.11.5-0.20200729012136-4e113ddee29e h1:/EGWHNOyEgizEBuAujWsb9vXrPZtt1b7ooDPyjEkjDw= github.com/pingcap/errors v0.11.5-0.20200729012136-4e113ddee29e/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd h1:ay+wAVWHI/Z6vIik13hsK+FT9ZCNSPBElGr0qgiZpjg= github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= ->>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/pingcap/errors v0.11.5-0.20200902104258-eba4f1d8f6de h1:mW8hC2yXTpflfyTeJgcN4aJQfwcYODde8YgjBgAy6do= github.com/pingcap/errors v0.11.5-0.20200902104258-eba4f1d8f6de/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= @@ -464,24 +463,12 @@ github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 h1:Jboj+s4jSCp5E1WDgmR github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/parser v0.0.0-20200424075042-8222d8b724a4/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/parser v0.0.0-20200507022230-f3bf29096657/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= -<<<<<<< HEAD github.com/pingcap/parser v0.0.0-20200603032439-c4ecb4508d2f/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/parser v0.0.0-20200623164729-3a18f1e5dceb/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200803072748-fdf66528323d/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200901062802-475ea5e2e0a7/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11 h1:l269UdOdRK4NGoldejKY+9Tor7XmHU+XOuS+VQDwcH8= github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -======= -github.com/pingcap/parser v0.0.0-20200518090819-ec1e13b948b1/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200522094936-3b720a0512a6/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200609110328-c65941b9fbb3/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200623082809-b74301ac298b/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200730092557-34a468e9b774/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200731033026-84f62115187c/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200813083329-a4bff035d3e2/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200908111137-8157d6307003 h1:HozkZZaBPDYLMHPkK6vy4W0gNytzml+UwxaDxkJrr34= -github.com/pingcap/parser v0.0.0-20200908111137-8157d6307003/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= ->>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.5-0.20200817114353-e465cafe8a91 h1:zCOWP+kIzM6ZsXdu2QoM/W6+3vFZj04MYboMP2Obc0E= @@ -605,15 +592,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -<<<<<<< HEAD github.com/tikv/pd v1.1.0-beta.0.20200907080620-6830f5bb92a2 h1:cC5v/gn9NdcmAlpBrWI5x3MiYmQcW2k7EHccg8837p4= github.com/tikv/pd v1.1.0-beta.0.20200907080620-6830f5bb92a2/go.mod h1:6OYi62ks7nFIBtWWpOjnngr5LNos4Hvi1BzArCWAlBc= -======= -github.com/tikv/pd v1.1.0-beta.0.20200818122340-ef1a4e920b2f h1:MI6OpYRLt041T2uONJzG4BwvVp12sLQ1UVCQuDv1bpw= -github.com/tikv/pd v1.1.0-beta.0.20200818122340-ef1a4e920b2f/go.mod h1:mwZ3Lip1YXgtgBx6blADUPMxrqPGCfwABlreDzuJul8= -github.com/tikv/pd v1.1.0-beta.0.20200820084926-bcfa77a7a593 h1:e5nPFsAfRPe8ybHMKOpRiQ6G3akdpAec1++3UAK5Ny0= -github.com/tikv/pd v1.1.0-beta.0.20200820084926-bcfa77a7a593/go.mod h1:quwjWtCmawAvS+YdxtSKG08sEexLzkhQgAno59wW+lI= ->>>>>>> 449587a... *: using standard error to replace terror (#19425) github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= @@ -646,12 +626,6 @@ github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Y github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -<<<<<<< HEAD -======= -github.com/zhangjinpeng1987/raft v0.0.0-20190624145930-deeb32d6553d/go.mod h1:1KDQ09J8MRHEtHze4at7BJZDW/doUAgkJ8w9KjEUhSo= -github.com/zhangjinpeng1987/raft v0.0.0-20200819064223-df31bb68a018 h1:T3OrqVdcH6z6SakR7WkECvGpdkfB0MAur/6zf66GPxQ= -github.com/zhangjinpeng1987/raft v0.0.0-20200819064223-df31bb68a018/go.mod h1:rTSjwgeYU2on64W50csWDlhyy0x9UYVYJUovHlYdt5s= ->>>>>>> 449587a... *: using standard error to replace terror (#19425) go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -690,6 +664,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -732,10 +708,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -<<<<<<< HEAD -======= -golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= ->>>>>>> 449587a... *: using standard error to replace terror (#19425) golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -808,10 +780,6 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -<<<<<<< HEAD -======= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= ->>>>>>> 449587a... *: using standard error to replace terror (#19425) golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200819171115-d785dc25833f h1:KJuwZVtZBVzDmEDtB2zro9CXkD9O0dpCv4o2LHbQIAw= golang.org/x/sys v0.0.0-20200819171115-d785dc25833f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/store/mockstore/unistore/cophandler/closure_exec.go b/store/mockstore/unistore/cophandler/closure_exec.go deleted file mode 100644 index e0d72ed421ef7..0000000000000 --- a/store/mockstore/unistore/cophandler/closure_exec.go +++ /dev/null @@ -1,877 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package cophandler - -import ( - "bytes" - "fmt" - "math" - "sort" - - "github.com/juju/errors" - "github.com/ngaut/unistore/tikv/dbreader" - "github.com/ngaut/unistore/tikv/mvcc" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/parser/model" - "github.com/pingcap/parser/mysql" - "github.com/pingcap/parser/terror" - "github.com/pingcap/tidb/expression" - "github.com/pingcap/tidb/expression/aggregation" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/chunk" - "github.com/pingcap/tidb/util/codec" - mockpkg "github.com/pingcap/tidb/util/mock" - "github.com/pingcap/tidb/util/rowcodec" - "github.com/pingcap/tipb/go-tipb" -) - -const chunkMaxRows = 1024 - -const ( - pkColNotExists = iota - pkColIsSigned - pkColIsUnsigned - pkColIsCommon -) - -func mapPkStatusToHandleStatus(pkStatus int) tablecodec.HandleStatus { - switch pkStatus { - case pkColNotExists: - return tablecodec.HandleNotNeeded - case pkColIsCommon | pkColIsSigned: - return tablecodec.HandleDefault - case pkColIsUnsigned: - return tablecodec.HandleIsUnsigned - } - return tablecodec.HandleDefault -} - -// buildClosureExecutor build a closureExecutor for the DAGRequest. -// Currently the composition of executors are: -// tableScan|indexScan [selection] [topN | limit | agg] -func buildClosureExecutor(dagCtx *dagContext, dagReq *tipb.DAGRequest) (*closureExecutor, error) { - ce, err := newClosureExecutor(dagCtx, dagReq) - if err != nil { - return nil, errors.Trace(err) - } - executors := dagReq.Executors - scanExec := executors[0] - if scanExec.Tp == tipb.ExecType_TypeTableScan { - ce.processor = &tableScanProcessor{closureExecutor: ce} - } else { - ce.processor = &indexScanProcessor{closureExecutor: ce} - } - if len(executors) == 1 { - return ce, nil - } - if secondExec := executors[1]; secondExec.Tp == tipb.ExecType_TypeSelection { - ce.selectionCtx.conditions, err = convertToExprs(ce.sc, ce.fieldTps, secondExec.Selection.Conditions) - if err != nil { - return nil, errors.Trace(err) - } - ce.processor = &selectionProcessor{closureExecutor: ce} - } - lastExecutor := executors[len(executors)-1] - switch lastExecutor.Tp { - case tipb.ExecType_TypeLimit: - ce.limit = int(lastExecutor.Limit.Limit) - case tipb.ExecType_TypeTopN: - err = buildTopNProcessor(ce, lastExecutor.TopN) - case tipb.ExecType_TypeAggregation: - err = buildHashAggProcessor(ce, dagCtx, lastExecutor.Aggregation) - case tipb.ExecType_TypeStreamAgg: - err = buildStreamAggProcessor(ce, dagCtx, executors) - case tipb.ExecType_TypeSelection: - ce.processor = &selectionProcessor{closureExecutor: ce} - default: - panic("unknown executor type " + lastExecutor.Tp.String()) - } - if err != nil { - return nil, err - } - return ce, nil -} - -func convertToExprs(sc *stmtctx.StatementContext, fieldTps []*types.FieldType, pbExprs []*tipb.Expr) ([]expression.Expression, error) { - exprs := make([]expression.Expression, 0, len(pbExprs)) - for _, expr := range pbExprs { - e, err := expression.PBToExpr(expr, fieldTps, sc) - if err != nil { - return nil, errors.Trace(err) - } - exprs = append(exprs, e) - } - return exprs, nil -} - -func newClosureExecutor(dagCtx *dagContext, dagReq *tipb.DAGRequest) (*closureExecutor, error) { - e := &closureExecutor{ - dagContext: dagCtx, - outputOff: dagReq.OutputOffsets, - startTS: dagCtx.startTS, - limit: math.MaxInt64, - } - seCtx := mockpkg.NewContext() - seCtx.GetSessionVars().StmtCtx = e.sc - e.seCtx = seCtx - executors := dagReq.Executors - scanExec := executors[0] - switch scanExec.Tp { - case tipb.ExecType_TypeTableScan: - tblScan := executors[0].TblScan - e.unique = true - e.scanCtx.desc = tblScan.Desc - case tipb.ExecType_TypeIndexScan: - idxScan := executors[0].IdxScan - e.unique = idxScan.GetUnique() - e.scanCtx.desc = idxScan.Desc - e.initIdxScanCtx(idxScan) - default: - panic(fmt.Sprintf("unknown first executor type %s", executors[0].Tp)) - } - ranges, err := extractKVRanges(dagCtx.dbReader.StartKey, dagCtx.dbReader.EndKey, dagCtx.keyRanges, e.scanCtx.desc) - if err != nil { - return nil, errors.Trace(err) - } - if dagReq.GetCollectRangeCounts() { - e.counts = make([]int64, len(ranges)) - } - e.kvRanges = ranges - e.scanCtx.chk = chunk.NewChunkWithCapacity(e.fieldTps, 32) - if e.idxScanCtx == nil { - e.scanCtx.decoder, err = e.evalContext.newRowDecoder() - if err != nil { - return nil, errors.Trace(err) - } - } - return e, nil -} - -func (e *closureExecutor) initIdxScanCtx(idxScan *tipb.IndexScan) { - e.idxScanCtx = new(idxScanCtx) - e.idxScanCtx.columnLen = len(e.columnInfos) - e.idxScanCtx.pkStatus = pkColNotExists - - e.idxScanCtx.primaryColumnIds = idxScan.PrimaryColumnIds - - lastColumn := e.columnInfos[len(e.columnInfos)-1] - - if len(e.idxScanCtx.primaryColumnIds) == 0 { - if lastColumn.GetPkHandle() { - if mysql.HasUnsignedFlag(uint(lastColumn.GetFlag())) { - e.idxScanCtx.pkStatus = pkColIsUnsigned - } else { - e.idxScanCtx.pkStatus = pkColIsSigned - } - e.idxScanCtx.columnLen-- - } else if lastColumn.ColumnId == model.ExtraHandleID { - e.idxScanCtx.pkStatus = pkColIsSigned - e.idxScanCtx.columnLen-- - } - } else { - e.idxScanCtx.pkStatus = pkColIsCommon - e.idxScanCtx.columnLen -= len(e.idxScanCtx.primaryColumnIds) - } - - colInfos := make([]rowcodec.ColInfo, len(e.columnInfos)) - for i := range colInfos { - col := e.columnInfos[i] - colInfos[i] = rowcodec.ColInfo{ - ID: col.ColumnId, - Ft: e.fieldTps[i], - IsPKHandle: col.GetPkHandle(), - } - } - e.idxScanCtx.colInfos = colInfos - - colIDs := make(map[int64]int, len(colInfos)) - for i, col := range colInfos[:e.idxScanCtx.columnLen] { - colIDs[col.ID] = i - } - e.scanCtx.newCollationIds = colIDs - - // We don't need to decode handle here, and colIDs >= 0 always. - e.scanCtx.newCollationRd = rowcodec.NewByteDecoder(colInfos[:e.idxScanCtx.columnLen], []int64{-1}, nil, nil) -} - -func isCountAgg(pbAgg *tipb.Aggregation) bool { - if len(pbAgg.AggFunc) == 1 && len(pbAgg.GroupBy) == 0 { - aggFunc := pbAgg.AggFunc[0] - if aggFunc.Tp == tipb.ExprType_Count && len(aggFunc.Children) == 1 { - return true - } - } - return false -} - -func tryBuildCountProcessor(e *closureExecutor, executors []*tipb.Executor) (bool, error) { - if len(executors) > 2 { - return false, nil - } - agg := executors[1].Aggregation - if !isCountAgg(agg) { - return false, nil - } - child := agg.AggFunc[0].Children[0] - switch child.Tp { - case tipb.ExprType_ColumnRef: - _, idx, err := codec.DecodeInt(child.Val) - if err != nil { - return false, errors.Trace(err) - } - e.aggCtx.col = e.columnInfos[idx] - if e.aggCtx.col.PkHandle { - e.processor = &countStarProcessor{skipVal: skipVal(true), closureExecutor: e} - } else { - e.processor = &countColumnProcessor{closureExecutor: e} - } - case tipb.ExprType_Null, tipb.ExprType_ScalarFunc: - return false, nil - default: - e.processor = &countStarProcessor{skipVal: skipVal(true), closureExecutor: e} - } - return true, nil -} - -func buildTopNProcessor(e *closureExecutor, topN *tipb.TopN) error { - heap, conds, err := getTopNInfo(e.evalContext, topN) - if err != nil { - return errors.Trace(err) - } - - ctx := &topNCtx{ - heap: heap, - orderByExprs: conds, - sortRow: e.newTopNSortRow(), - } - - e.topNCtx = ctx - e.processor = &topNProcessor{closureExecutor: e} - return nil -} - -func buildHashAggProcessor(e *closureExecutor, ctx *dagContext, agg *tipb.Aggregation) error { - aggs, groupBys, err := getAggInfo(ctx, agg) - if err != nil { - return err - } - e.processor = &hashAggProcessor{ - closureExecutor: e, - aggExprs: aggs, - groupByExprs: groupBys, - groups: map[string]struct{}{}, - groupKeys: nil, - aggCtxsMap: map[string][]*aggregation.AggEvaluateContext{}, - } - return nil -} - -func buildStreamAggProcessor(e *closureExecutor, ctx *dagContext, executors []*tipb.Executor) error { - ok, err := tryBuildCountProcessor(e, executors) - if err != nil || ok { - return err - } - return buildHashAggProcessor(e, ctx, executors[len(executors)-1].Aggregation) -} - -// closureExecutor is an execution engine that flatten the DAGRequest.Executors to a single closure `processor` that -// process key/value pairs. We can define many closures for different kinds of requests, try to use the specially -// optimized one for some frequently used query. -type closureExecutor struct { - *dagContext - outputOff []uint32 - seCtx sessionctx.Context - kvRanges []kv.KeyRange - startTS uint64 - ignoreLock bool - lockChecked bool - scanCtx scanCtx - idxScanCtx *idxScanCtx - selectionCtx selectionCtx - aggCtx aggCtx - topNCtx *topNCtx - - rowCount int - unique bool - limit int - - oldChunks []tipb.Chunk - oldRowBuf []byte - processor closureProcessor - - counts []int64 -} - -type closureProcessor interface { - dbreader.ScanProcessor - Finish() error -} - -type scanCtx struct { - count int - limit int - chk *chunk.Chunk - desc bool - decoder *rowcodec.ChunkDecoder - primaryColumnIds []int64 - - newCollationRd *rowcodec.BytesDecoder - newCollationIds map[int64]int -} - -type idxScanCtx struct { - pkStatus int - columnLen int - colInfos []rowcodec.ColInfo - primaryColumnIds []int64 -} - -type aggCtx struct { - col *tipb.ColumnInfo -} - -type selectionCtx struct { - conditions []expression.Expression -} - -type topNCtx struct { - heap *topNHeap - orderByExprs []expression.Expression - sortRow *sortRow -} - -func (e *closureExecutor) execute() ([]tipb.Chunk, error) { - err := e.checkRangeLock() - if err != nil { - return nil, errors.Trace(err) - } - dbReader := e.dbReader - for i, ran := range e.kvRanges { - if e.isPointGetRange(ran) { - val, err := dbReader.Get(ran.StartKey, e.startTS) - if err != nil { - return nil, errors.Trace(err) - } - if len(val) == 0 { - continue - } - if e.counts != nil { - e.counts[i]++ - } - err = e.processor.Process(ran.StartKey, val) - if err != nil { - return nil, errors.Trace(err) - } - } else { - oldCnt := e.rowCount - if e.scanCtx.desc { - err = dbReader.ReverseScan(ran.StartKey, ran.EndKey, math.MaxInt64, e.startTS, e.processor) - } else { - err = dbReader.Scan(ran.StartKey, ran.EndKey, math.MaxInt64, e.startTS, e.processor) - } - delta := int64(e.rowCount - oldCnt) - if e.counts != nil { - e.counts[i] += delta - } - if err != nil { - return nil, errors.Trace(err) - } - } - if e.rowCount == e.limit { - break - } - } - err = e.processor.Finish() - return e.oldChunks, err -} - -func (e *closureExecutor) isPointGetRange(ran kv.KeyRange) bool { - if len(e.primaryCols) > 0 { - return false - } - return e.unique && ran.IsPoint() -} - -func (e *closureExecutor) checkRangeLock() error { - if !e.ignoreLock && !e.lockChecked { - for _, ran := range e.kvRanges { - err := e.checkRangeLockForRange(ran) - if err != nil { - return err - } - } - e.lockChecked = true - } - return nil -} - -func (e *closureExecutor) checkRangeLockForRange(ran kv.KeyRange) error { - it := e.lockStore.NewIterator() - for it.Seek(ran.StartKey); it.Valid(); it.Next() { - if exceedEndKey(it.Key(), ran.EndKey) { - break - } - lock := mvcc.DecodeLock(it.Value()) - err := checkLock(lock, it.Key(), e.startTS, e.resolvedLocks) - if err != nil { - return err - } - } - return nil -} - -type countStarProcessor struct { - skipVal - *closureExecutor -} - -// countStarProcess is used for `count(*)`. -func (e *countStarProcessor) Process(key, value []byte) error { - e.rowCount++ - return nil -} - -func (e *countStarProcessor) Finish() error { - return e.countFinish() -} - -// countFinish is used for `count(*)`. -func (e *closureExecutor) countFinish() error { - d := types.NewIntDatum(int64(e.rowCount)) - rowData, err := codec.EncodeValue(e.sc, nil, d) - if err != nil { - return errors.Trace(err) - } - e.oldChunks = appendRow(e.oldChunks, rowData, 0) - return nil -} - -type countColumnProcessor struct { - skipVal - *closureExecutor -} - -func (e *countColumnProcessor) Process(key, value []byte) error { - if e.idxScanCtx != nil { - values, _, err := tablecodec.CutIndexKeyNew(key, e.idxScanCtx.columnLen) - if err != nil { - return errors.Trace(err) - } - if values[0][0] != codec.NilFlag { - e.rowCount++ - } - } else { - // Since the handle value doesn't affect the count result, we don't need to decode the handle. - isNull, err := e.scanCtx.decoder.ColumnIsNull(value, e.aggCtx.col.ColumnId, e.aggCtx.col.DefaultVal) - if err != nil { - return errors.Trace(err) - } - if !isNull { - e.rowCount++ - } - } - return nil -} - -func (e *countColumnProcessor) Finish() error { - return e.countFinish() -} - -type skipVal bool - -func (s skipVal) SkipValue() bool { - return bool(s) -} - -type tableScanProcessor struct { - skipVal - *closureExecutor -} - -func (e *tableScanProcessor) Process(key, value []byte) error { - if e.rowCount == e.limit { - return dbreader.ScanBreak - } - e.rowCount++ - err := e.tableScanProcessCore(key, value) - if e.scanCtx.chk.NumRows() == chunkMaxRows { - err = e.chunkToOldChunk(e.scanCtx.chk) - } - return err -} - -func (e *tableScanProcessor) Finish() error { - return e.scanFinish() -} - -func (e *closureExecutor) processCore(key, value []byte) error { - if e.idxScanCtx != nil { - return e.indexScanProcessCore(key, value) - } - return e.tableScanProcessCore(key, value) -} - -func (e *closureExecutor) hasSelection() bool { - return len(e.selectionCtx.conditions) > 0 -} - -func (e *closureExecutor) processSelection() (gotRow bool, err error) { - chk := e.scanCtx.chk - row := chk.GetRow(chk.NumRows() - 1) - gotRow = true - for _, expr := range e.selectionCtx.conditions { - wc := e.sc.WarningCount() - d, err := expr.Eval(row) - if err != nil { - return false, errors.Trace(err) - } - - if d.IsNull() { - gotRow = false - } else { - isBool, err := d.ToBool(e.sc) - isBool, err = expression.HandleOverflowOnSelection(e.sc, isBool, err) - if err != nil { - return false, errors.Trace(err) - } - gotRow = isBool != 0 - } - if !gotRow { - if e.sc.WarningCount() > wc { - // Deep-copy error object here, because the data it referenced is going to be truncated. - warns := e.sc.TruncateWarnings(int(wc)) - for i, warn := range warns { - warns[i].Err = e.copyError(warn.Err) - } - e.sc.AppendWarnings(warns) - } - chk.TruncateTo(chk.NumRows() - 1) - break - } - } - return -} - -func (e *closureExecutor) copyError(err error) error { - if err == nil { - return nil - } - var ret error - x := errors.Cause(err) - switch y := x.(type) { - case *terror.Error: - ret = terror.ToSQLError(y) - default: - ret = errors.New(err.Error()) - } - return ret -} - -func (e *closureExecutor) tableScanProcessCore(key, value []byte) error { - handle, err := tablecodec.DecodeRowKey(key) - if err != nil { - return errors.Trace(err) - } - err = e.scanCtx.decoder.DecodeToChunk(value, handle, e.scanCtx.chk) - if err != nil { - return errors.Trace(err) - } - return nil -} - -func (e *closureExecutor) scanFinish() error { - return e.chunkToOldChunk(e.scanCtx.chk) -} - -type indexScanProcessor struct { - skipVal - *closureExecutor -} - -func (e *indexScanProcessor) Process(key, value []byte) error { - if e.rowCount == e.limit { - return dbreader.ScanBreak - } - e.rowCount++ - err := e.indexScanProcessCore(key, value) - if e.scanCtx.chk.NumRows() == chunkMaxRows { - err = e.chunkToOldChunk(e.scanCtx.chk) - } - return err -} - -func (e *indexScanProcessor) Finish() error { - return e.scanFinish() -} - -func (e *closureExecutor) indexScanProcessCore(key, value []byte) error { - handleStatus := mapPkStatusToHandleStatus(e.idxScanCtx.pkStatus) - restoredCols := make([]rowcodec.ColInfo, 0, len(e.idxScanCtx.colInfos)) - for _, c := range e.idxScanCtx.colInfos { - if c.ID != -1 { - restoredCols = append(restoredCols, c) - } - } - values, err := tablecodec.DecodeIndexKV(key, value, e.idxScanCtx.columnLen, handleStatus, restoredCols) - if err != nil { - return err - } - chk := e.scanCtx.chk - decoder := codec.NewDecoder(chk, e.sc.TimeZone) - for i, colVal := range values { - if i < len(e.fieldTps) { - _, err = decoder.DecodeOne(colVal, i, e.fieldTps[i]) - if err != nil { - return errors.Trace(err) - } - } - } - return nil -} - -func (e *closureExecutor) chunkToOldChunk(chk *chunk.Chunk) error { - var oldRow []types.Datum - for i := 0; i < chk.NumRows(); i++ { - oldRow = oldRow[:0] - for _, outputOff := range e.outputOff { - d := chk.GetRow(i).GetDatum(int(outputOff), e.fieldTps[outputOff]) - oldRow = append(oldRow, d) - } - var err error - e.oldRowBuf, err = codec.EncodeValue(e.sc, e.oldRowBuf[:0], oldRow...) - if err != nil { - return errors.Trace(err) - } - e.oldChunks = appendRow(e.oldChunks, e.oldRowBuf, i) - } - chk.Reset() - return nil -} - -type selectionProcessor struct { - skipVal - *closureExecutor -} - -func (e *selectionProcessor) Process(key, value []byte) error { - if e.rowCount == e.limit { - return dbreader.ScanBreak - } - err := e.processCore(key, value) - if err != nil { - return errors.Trace(err) - } - gotRow, err := e.processSelection() - if err != nil { - return err - } - if gotRow { - e.rowCount++ - if e.scanCtx.chk.NumRows() == chunkMaxRows { - err = e.chunkToOldChunk(e.scanCtx.chk) - } - } - return err -} - -func (e *selectionProcessor) Finish() error { - return e.scanFinish() -} - -type topNProcessor struct { - skipVal - *closureExecutor -} - -func (e *topNProcessor) Process(key, value []byte) (err error) { - if err = e.processCore(key, value); err != nil { - return err - } - if e.hasSelection() { - gotRow, err1 := e.processSelection() - if err1 != nil || !gotRow { - return err1 - } - } - - ctx := e.topNCtx - row := e.scanCtx.chk.GetRow(0) - for i, expr := range ctx.orderByExprs { - d, err := expr.Eval(row) - if err != nil { - return errors.Trace(err) - } - d.Copy(&ctx.sortRow.key[i]) - } - e.scanCtx.chk.Reset() - - if ctx.heap.tryToAddRow(ctx.sortRow) { - ctx.sortRow.data[0] = safeCopy(key) - ctx.sortRow.data[1] = safeCopy(value) - ctx.sortRow = e.newTopNSortRow() - } - return errors.Trace(ctx.heap.err) -} - -func (e *closureExecutor) newTopNSortRow() *sortRow { - return &sortRow{ - key: make([]types.Datum, len(e.evalContext.columnInfos)), - data: make([][]byte, 2), - } -} - -func (e *topNProcessor) Finish() error { - ctx := e.topNCtx - sort.Sort(&ctx.heap.topNSorter) - chk := e.scanCtx.chk - for _, row := range ctx.heap.rows { - err := e.processCore(row.data[0], row.data[1]) - if err != nil { - return err - } - if chk.NumRows() == chunkMaxRows { - if err = e.chunkToOldChunk(chk); err != nil { - return errors.Trace(err) - } - } - } - return e.chunkToOldChunk(chk) -} - -type hashAggProcessor struct { - skipVal - *closureExecutor - - aggExprs []aggregation.Aggregation - groupByExprs []expression.Expression - groups map[string]struct{} - groupKeys [][]byte - aggCtxsMap map[string][]*aggregation.AggEvaluateContext -} - -func (e *hashAggProcessor) Process(key, value []byte) (err error) { - err = e.processCore(key, value) - if err != nil { - return err - } - if e.hasSelection() { - gotRow, err1 := e.processSelection() - if err1 != nil || !gotRow { - return err1 - } - } - row := e.scanCtx.chk.GetRow(e.scanCtx.chk.NumRows() - 1) - gk, err := e.getGroupKey(row) - if _, ok := e.groups[string(gk)]; !ok { - e.groups[string(gk)] = struct{}{} - e.groupKeys = append(e.groupKeys, gk) - } - // Update aggregate expressions. - aggCtxs := e.getContexts(gk) - for i, agg := range e.aggExprs { - err = agg.Update(aggCtxs[i], e.sc, row) - if err != nil { - return errors.Trace(err) - } - } - e.scanCtx.chk.Reset() - return nil -} - -func (e *hashAggProcessor) getGroupKey(row chunk.Row) ([]byte, error) { - length := len(e.groupByExprs) - if length == 0 { - return nil, nil - } - key := make([]byte, 0, 32) - for _, item := range e.groupByExprs { - v, err := item.Eval(row) - if err != nil { - return nil, errors.Trace(err) - } - b, err := codec.EncodeValue(e.sc, nil, v) - if err != nil { - return nil, errors.Trace(err) - } - key = append(key, b...) - } - return key, nil -} - -func (e *hashAggProcessor) getContexts(groupKey []byte) []*aggregation.AggEvaluateContext { - aggCtxs, ok := e.aggCtxsMap[string(groupKey)] - if !ok { - aggCtxs = make([]*aggregation.AggEvaluateContext, 0, len(e.aggExprs)) - for _, agg := range e.aggExprs { - aggCtxs = append(aggCtxs, agg.CreateContext(e.sc)) - } - e.aggCtxsMap[string(groupKey)] = aggCtxs - } - return aggCtxs -} - -func (e *hashAggProcessor) Finish() error { - for i, gk := range e.groupKeys { - aggCtxs := e.getContexts(gk) - e.oldRowBuf = e.oldRowBuf[:0] - for i, agg := range e.aggExprs { - partialResults := agg.GetPartialResult(aggCtxs[i]) - var err error - e.oldRowBuf, err = codec.EncodeValue(e.sc, e.oldRowBuf, partialResults...) - if err != nil { - return err - } - } - e.oldRowBuf = append(e.oldRowBuf, gk...) - e.oldChunks = appendRow(e.oldChunks, e.oldRowBuf, i) - } - return nil -} - -func safeCopy(b []byte) []byte { - return append([]byte{}, b...) -} - -func checkLock(lock mvcc.MvccLock, key []byte, startTS uint64, resolved []uint64) error { - if isResolved(startTS, resolved) { - return nil - } - lockVisible := lock.StartTS < startTS - isWriteLock := lock.Op == uint8(kvrpcpb.Op_Put) || lock.Op == uint8(kvrpcpb.Op_Del) - isPrimaryGet := startTS == math.MaxUint64 && bytes.Equal(lock.Primary, key) - if lockVisible && isWriteLock && !isPrimaryGet { - return BuildLockErr(key, lock.Primary, lock.StartTS, uint64(lock.TTL), lock.Op) - } - return nil -} - -func isResolved(startTS uint64, resolved []uint64) bool { - for _, v := range resolved { - if startTS == v { - return true - } - } - return false -} - -func exceedEndKey(current, endKey []byte) bool { - if len(endKey) == 0 { - return false - } - return bytes.Compare(current, endKey) >= 0 -} diff --git a/store/mockstore/unistore/cophandler/cop_handler.go b/store/mockstore/unistore/cophandler/cop_handler.go deleted file mode 100644 index 787f3c1264ef9..0000000000000 --- a/store/mockstore/unistore/cophandler/cop_handler.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package cophandler - -import ( - "bytes" - "fmt" - "time" - - "github.com/golang/protobuf/proto" - "github.com/ngaut/unistore/lockstore" - "github.com/ngaut/unistore/tikv/dbreader" - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/coprocessor" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/parser/model" - "github.com/pingcap/parser/mysql" - "github.com/pingcap/parser/terror" - "github.com/pingcap/tidb/expression" - "github.com/pingcap/tidb/expression/aggregation" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/chunk" - "github.com/pingcap/tidb/util/codec" - "github.com/pingcap/tidb/util/collate" - "github.com/pingcap/tidb/util/rowcodec" - "github.com/pingcap/tipb/go-tipb" -) - -// HandleCopRequest handles coprocessor request. -func HandleCopRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *coprocessor.Request) *coprocessor.Response { - switch req.Tp { - case kv.ReqTypeDAG: - return handleCopDAGRequest(dbReader, lockStore, req) - case kv.ReqTypeAnalyze: - return handleCopAnalyzeRequest(dbReader, req) - case kv.ReqTypeChecksum: - return handleCopChecksumRequest(dbReader, req) - } - return &coprocessor.Response{OtherError: fmt.Sprintf("unsupported request type %d", req.GetTp())} -} - -type dagContext struct { - *evalContext - dbReader *dbreader.DBReader - lockStore *lockstore.MemStore - resolvedLocks []uint64 - dagReq *tipb.DAGRequest - keyRanges []*coprocessor.KeyRange - startTS uint64 -} - -// handleCopDAGRequest handles coprocessor DAG request. -func handleCopDAGRequest(dbReader *dbreader.DBReader, lockStore *lockstore.MemStore, req *coprocessor.Request) *coprocessor.Response { - startTime := time.Now() - resp := &coprocessor.Response{} - dagCtx, dagReq, err := buildDAG(dbReader, lockStore, req) - if err != nil { - resp.OtherError = err.Error() - return resp - } - closureExec, err := buildClosureExecutor(dagCtx, dagReq) - if err != nil { - return buildResp(nil, nil, dagReq, err, dagCtx.sc.GetWarnings(), time.Since(startTime)) - } - chunks, err := closureExec.execute() - return buildResp(chunks, closureExec.counts, dagReq, err, dagCtx.sc.GetWarnings(), time.Since(startTime)) -} - -func buildDAG(reader *dbreader.DBReader, lockStore *lockstore.MemStore, req *coprocessor.Request) (*dagContext, *tipb.DAGRequest, error) { - if len(req.Ranges) == 0 { - return nil, nil, errors.New("request range is null") - } - if req.GetTp() != kv.ReqTypeDAG { - return nil, nil, errors.Errorf("unsupported request type %d", req.GetTp()) - } - - dagReq := new(tipb.DAGRequest) - err := proto.Unmarshal(req.Data, dagReq) - if err != nil { - return nil, nil, errors.Trace(err) - } - sc := flagsToStatementContext(dagReq.Flags) - sc.TimeZone = time.FixedZone("UTC", int(dagReq.TimeZoneOffset)) - ctx := &dagContext{ - evalContext: &evalContext{sc: sc}, - dbReader: reader, - lockStore: lockStore, - dagReq: dagReq, - keyRanges: req.Ranges, - startTS: req.StartTs, - resolvedLocks: req.Context.ResolvedLocks, - } - scanExec := dagReq.Executors[0] - if scanExec.Tp == tipb.ExecType_TypeTableScan { - ctx.setColumnInfo(scanExec.TblScan.Columns) - ctx.primaryCols = scanExec.TblScan.PrimaryColumnIds - } else { - ctx.setColumnInfo(scanExec.IdxScan.Columns) - } - return ctx, dagReq, err -} - -func getAggInfo(ctx *dagContext, pbAgg *tipb.Aggregation) ([]aggregation.Aggregation, []expression.Expression, error) { - length := len(pbAgg.AggFunc) - aggs := make([]aggregation.Aggregation, 0, length) - var err error - for _, expr := range pbAgg.AggFunc { - var aggExpr aggregation.Aggregation - aggExpr, err = aggregation.NewDistAggFunc(expr, ctx.fieldTps, ctx.sc) - if err != nil { - return nil, nil, errors.Trace(err) - } - aggs = append(aggs, aggExpr) - } - groupBys, err := convertToExprs(ctx.sc, ctx.fieldTps, pbAgg.GetGroupBy()) - if err != nil { - return nil, nil, errors.Trace(err) - } - - return aggs, groupBys, nil -} - -func getTopNInfo(ctx *evalContext, topN *tipb.TopN) (heap *topNHeap, conds []expression.Expression, err error) { - pbConds := make([]*tipb.Expr, len(topN.OrderBy)) - for i, item := range topN.OrderBy { - pbConds[i] = item.Expr - } - heap = &topNHeap{ - totalCount: int(topN.Limit), - topNSorter: topNSorter{ - orderByItems: topN.OrderBy, - sc: ctx.sc, - }, - } - if conds, err = convertToExprs(ctx.sc, ctx.fieldTps, pbConds); err != nil { - return nil, nil, errors.Trace(err) - } - - return heap, conds, nil -} - -type evalContext struct { - colIDs map[int64]int - columnInfos []*tipb.ColumnInfo - fieldTps []*types.FieldType - primaryCols []int64 - sc *stmtctx.StatementContext -} - -func (e *evalContext) setColumnInfo(cols []*tipb.ColumnInfo) { - e.columnInfos = make([]*tipb.ColumnInfo, len(cols)) - copy(e.columnInfos, cols) - - e.colIDs = make(map[int64]int, len(e.columnInfos)) - e.fieldTps = make([]*types.FieldType, 0, len(e.columnInfos)) - for i, col := range e.columnInfos { - ft := fieldTypeFromPBColumn(col) - e.fieldTps = append(e.fieldTps, ft) - e.colIDs[col.GetColumnId()] = i - } -} - -func (e *evalContext) newRowDecoder() (*rowcodec.ChunkDecoder, error) { - var ( - pkCols []int64 - cols = make([]rowcodec.ColInfo, 0, len(e.columnInfos)) - ) - for i := range e.columnInfos { - info := e.columnInfos[i] - ft := e.fieldTps[i] - col := rowcodec.ColInfo{ - ID: info.ColumnId, - Ft: ft, - IsPKHandle: info.PkHandle, - } - cols = append(cols, col) - if info.PkHandle { - pkCols = append(pkCols, info.ColumnId) - } - } - if len(pkCols) == 0 { - if e.primaryCols != nil { - pkCols = e.primaryCols - } else { - pkCols = []int64{0} - } - } - def := func(i int, chk *chunk.Chunk) error { - info := e.columnInfos[i] - if info.PkHandle || len(info.DefaultVal) == 0 { - chk.AppendNull(i) - return nil - } - decoder := codec.NewDecoder(chk, e.sc.TimeZone) - _, err := decoder.DecodeOne(info.DefaultVal, i, e.fieldTps[i]) - if err != nil { - return err - } - return nil - } - return rowcodec.NewChunkDecoder(cols, pkCols, def, e.sc.TimeZone), nil -} - -// decodeRelatedColumnVals decodes data to Datum slice according to the row information. -func (e *evalContext) decodeRelatedColumnVals(relatedColOffsets []int, value [][]byte, row []types.Datum) error { - var err error - for _, offset := range relatedColOffsets { - row[offset], err = tablecodec.DecodeColumnValue(value[offset], e.fieldTps[offset], e.sc.TimeZone) - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -// flagsToStatementContext creates a StatementContext from a `tipb.SelectRequest.Flags`. -func flagsToStatementContext(flags uint64) *stmtctx.StatementContext { - sc := new(stmtctx.StatementContext) - sc.IgnoreTruncate = (flags & model.FlagIgnoreTruncate) > 0 - sc.TruncateAsWarning = (flags & model.FlagTruncateAsWarning) > 0 - sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 - sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 - sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 - sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 - sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 - sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 - return sc -} - -// ErrLocked is returned when trying to Read/Write on a locked key. Client should -// backoff or cleanup the lock then retry. -type ErrLocked struct { - Key []byte - Primary []byte - StartTS uint64 - TTL uint64 - LockType uint8 -} - -// BuildLockErr generates ErrKeyLocked objects -func BuildLockErr(key []byte, primaryKey []byte, startTS uint64, TTL uint64, lockType uint8) *ErrLocked { - errLocked := &ErrLocked{ - Key: key, - Primary: primaryKey, - StartTS: startTS, - TTL: TTL, - LockType: lockType, - } - return errLocked -} - -// Error formats the lock to a string. -func (e *ErrLocked) Error() string { - return fmt.Sprintf("key is locked, key: %q, Type: %v, primary: %q, startTS: %v", e.Key, e.LockType, e.Primary, e.StartTS) -} - -func buildResp(chunks []tipb.Chunk, counts []int64, dagReq *tipb.DAGRequest, err error, warnings []stmtctx.SQLWarn, dur time.Duration) *coprocessor.Response { - resp := &coprocessor.Response{} - selResp := &tipb.SelectResponse{ - Error: toPBError(err), - Chunks: chunks, - OutputCounts: counts, - } - if dagReq.CollectExecutionSummaries != nil && *dagReq.CollectExecutionSummaries { - execSummary := make([]*tipb.ExecutorExecutionSummary, len(dagReq.Executors)) - for i := range execSummary { - // TODO: Add real executor execution summary information. - execSummary[i] = &tipb.ExecutorExecutionSummary{} - } - selResp.ExecutionSummaries = execSummary - } - if len(warnings) > 0 { - selResp.Warnings = make([]*tipb.Error, 0, len(warnings)) - for i := range warnings { - selResp.Warnings = append(selResp.Warnings, toPBError(warnings[i].Err)) - } - } - if locked, ok := errors.Cause(err).(*ErrLocked); ok { - resp.Locked = &kvrpcpb.LockInfo{ - Key: locked.Key, - PrimaryLock: locked.Primary, - LockVersion: locked.StartTS, - LockTtl: locked.TTL, - } - } - resp.ExecDetails = &kvrpcpb.ExecDetails{ - HandleTime: &kvrpcpb.HandleTime{ProcessMs: int64(dur / time.Millisecond)}, - } - data, err := proto.Marshal(selResp) - if err != nil { - resp.OtherError = err.Error() - return resp - } - resp.Data = data - return resp -} - -func toPBError(err error) *tipb.Error { - if err == nil { - return nil - } - perr := new(tipb.Error) - e := errors.Cause(err) - switch y := e.(type) { - case *terror.Error: - tmp := terror.ToSQLError(y) - perr.Code = int32(tmp.Code) - perr.Msg = tmp.Message - case *mysql.SQLError: - perr.Code = int32(y.Code) - perr.Msg = y.Message - default: - perr.Code = int32(1) - perr.Msg = err.Error() - } - return perr -} - -// extractKVRanges extracts kv.KeyRanges slice from a SelectRequest. -func extractKVRanges(startKey, endKey []byte, keyRanges []*coprocessor.KeyRange, descScan bool) (kvRanges []kv.KeyRange, err error) { - kvRanges = make([]kv.KeyRange, 0, len(keyRanges)) - for _, kran := range keyRanges { - if bytes.Compare(kran.GetStart(), kran.GetEnd()) >= 0 { - err = errors.Errorf("invalid range, start should be smaller than end: %v %v", kran.GetStart(), kran.GetEnd()) - return - } - - upperKey := kran.GetEnd() - if bytes.Compare(upperKey, startKey) <= 0 { - continue - } - lowerKey := kran.GetStart() - if len(endKey) != 0 && bytes.Compare(lowerKey, endKey) >= 0 { - break - } - r := kv.KeyRange{ - StartKey: kv.Key(maxStartKey(lowerKey, startKey)), - EndKey: kv.Key(minEndKey(upperKey, endKey)), - } - kvRanges = append(kvRanges, r) - } - if descScan { - reverseKVRanges(kvRanges) - } - return -} - -func reverseKVRanges(kvRanges []kv.KeyRange) { - for i := 0; i < len(kvRanges)/2; i++ { - j := len(kvRanges) - i - 1 - kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i] - } -} - -func maxStartKey(rangeStartKey kv.Key, regionStartKey []byte) []byte { - if bytes.Compare([]byte(rangeStartKey), regionStartKey) > 0 { - return []byte(rangeStartKey) - } - return regionStartKey -} - -func minEndKey(rangeEndKey kv.Key, regionEndKey []byte) []byte { - if len(regionEndKey) == 0 || bytes.Compare([]byte(rangeEndKey), regionEndKey) < 0 { - return []byte(rangeEndKey) - } - return regionEndKey -} - -const rowsPerChunk = 64 - -func appendRow(chunks []tipb.Chunk, data []byte, rowCnt int) []tipb.Chunk { - if rowCnt%rowsPerChunk == 0 { - chunks = append(chunks, tipb.Chunk{}) - } - cur := &chunks[len(chunks)-1] - cur.RowsData = append(cur.RowsData, data...) - return chunks -} - -// fieldTypeFromPBColumn creates a types.FieldType from tipb.ColumnInfo. -func fieldTypeFromPBColumn(col *tipb.ColumnInfo) *types.FieldType { - return &types.FieldType{ - Tp: byte(col.GetTp()), - Flag: uint(col.Flag), - Flen: int(col.GetColumnLen()), - Decimal: int(col.GetDecimal()), - Elems: col.Elems, - Collate: mysql.Collations[uint8(collate.RestoreCollationIDIfNeeded(col.GetCollation()))], - } -} - -// handleCopChecksumRequest handles coprocessor check sum request. -func handleCopChecksumRequest(dbReader *dbreader.DBReader, req *coprocessor.Request) *coprocessor.Response { - resp := &tipb.ChecksumResponse{ - Checksum: 1, - TotalKvs: 1, - TotalBytes: 1, - } - data, err := resp.Marshal() - if err != nil { - return &coprocessor.Response{OtherError: fmt.Sprintf("marshal checksum response error: %v", err)} - } - return &coprocessor.Response{Data: data} -} From 6b2a643c8554c59d5dbde0693ca522b6b470c6c8 Mon Sep 17 00:00:00 2001 From: imtbkcat Date: Tue, 8 Sep 2020 21:38:16 +0800 Subject: [PATCH 3/4] update parser --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index b756b8df2e379..bb78336918d3e 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200818080353-7aaed8998596 github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 - github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11 + github.com/pingcap/parser v0.0.0-20200908132759-b65348b6244c github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.6-0.20200828085514-03575b185007+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 From fedc7afe1618e4334b0e7c77d6493a629d015603 Mon Sep 17 00:00:00 2001 From: imtbkcat Date: Tue, 8 Sep 2020 21:49:48 +0800 Subject: [PATCH 4/4] fix ci --- go.mod | 2 -- go.sum | 10 ++-------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index bb78336918d3e..7d9e7fa16ef02 100644 --- a/go.mod +++ b/go.mod @@ -63,5 +63,3 @@ require ( ) go 1.13 - -replace github.com/pingcap/parser => github.com/imtbkcat/parser v0.0.0-20200908120703-5fc1c5049015 diff --git a/go.sum b/go.sum index 21ea238d5576f..d6cec4b861e86 100644 --- a/go.sum +++ b/go.sum @@ -271,8 +271,6 @@ github.com/hypnoglow/gormzap v0.3.0/go.mod h1:5Wom8B7Jl2oK0Im9hs6KQ+Kl92w4Y7gKCr github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imtbkcat/parser v0.0.0-20200908120703-5fc1c5049015 h1:LoUDOHCWv5XZw+Wz7ll5FxLTAw2bgxnXQihargJ2XxQ= -github.com/imtbkcat/parser v0.0.0-20200908120703-5fc1c5049015/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -425,10 +423,6 @@ github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.5-0.20200729012136-4e113ddee29e h1:/EGWHNOyEgizEBuAujWsb9vXrPZtt1b7ooDPyjEkjDw= -github.com/pingcap/errors v0.11.5-0.20200729012136-4e113ddee29e/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= -github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd h1:ay+wAVWHI/Z6vIik13hsK+FT9ZCNSPBElGr0qgiZpjg= -github.com/pingcap/errors v0.11.5-0.20200820035142-66eb5bf1d1cd/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= github.com/pingcap/errors v0.11.5-0.20200902104258-eba4f1d8f6de h1:mW8hC2yXTpflfyTeJgcN4aJQfwcYODde8YgjBgAy6do= github.com/pingcap/errors v0.11.5-0.20200902104258-eba4f1d8f6de/go.mod h1:g4vx//d6VakjJ0mk7iLBlKA8LFavV/sAVINT/1PFxeQ= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= @@ -467,8 +461,8 @@ github.com/pingcap/parser v0.0.0-20200603032439-c4ecb4508d2f/go.mod h1:9v0Edh8Ib github.com/pingcap/parser v0.0.0-20200623164729-3a18f1e5dceb/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200803072748-fdf66528323d/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200901062802-475ea5e2e0a7/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= -github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11 h1:l269UdOdRK4NGoldejKY+9Tor7XmHU+XOuS+VQDwcH8= -github.com/pingcap/parser v0.0.0-20200902091735-5e6adfc24e11/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= +github.com/pingcap/parser v0.0.0-20200908132759-b65348b6244c h1:oJn1X+lZwG4LG2DV+73lppFfnfy+3wXUwpoVgtIOQq8= +github.com/pingcap/parser v0.0.0-20200908132759-b65348b6244c/go.mod h1:RlLfMRJwFBSiXd2lUaWdV5pSXtrpyvZM8k5bbZWsheU= github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.5-0.20200817114353-e465cafe8a91 h1:zCOWP+kIzM6ZsXdu2QoM/W6+3vFZj04MYboMP2Obc0E=