From aaf46836fc657bbd569217f0df506710219d8b66 Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Wed, 17 Nov 2021 13:10:24 +0800 Subject: [PATCH 01/11] implement collecting predicate columns from logical plan --- planner/core/rule_collect_predicate_column.go | 200 ++++++++++++++++++ 1 file changed, 200 insertions(+) create mode 100644 planner/core/rule_collect_predicate_column.go diff --git a/planner/core/rule_collect_predicate_column.go b/planner/core/rule_collect_predicate_column.go new file mode 100644 index 0000000000000..5c184b6823c7d --- /dev/null +++ b/planner/core/rule_collect_predicate_column.go @@ -0,0 +1,200 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "github.com/pingcap/tidb/expression" + + "github.com/pingcap/tidb/parser/model" +) + +// predicateColumnCollector collects predicate columns from logical plan. Predicate columns are the columns whose statistics +// are utilized when making query plans, which usually occur in where conditions, join conditions and so on. +type predicateColumnCollector struct{ + // colMap maps expression.Column.UniqueID to the table columns whose statistics are utilized to calculate statistics of the column. + colMap map[int64]map[model.TableColumnID]struct{} + // predicateCols records predicate columns. + predicateCols map[model.TableColumnID]struct{} +} + +func (c *predicateColumnCollector) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { + c.collectPredicateColumns(p) + return p, nil +} + +func (c *predicateColumnCollector) addPredicateColumn(col *expression.Column) { + tblColIDs, ok := c.colMap[col.UniqueID] + if !ok { + // It may happen if some leaf of logical plan is LogicalMemTable/LogicalShow/LogicalShowDDLJobs. + return + } + for tblColID := range tblColIDs { + c.predicateCols[tblColID] = struct{}{} + } +} + +func (c *predicateColumnCollector) addPredicateColumnsFromExpression(expr expression.Expression) { + cols := expression.ExtractColumns(expr) + for _, col := range cols { + c.addPredicateColumn(col) + } +} + +func (c *predicateColumnCollector) addPredicateColumnsFromExpressions(exprs []expression.Expression) { + cols := make([]*expression.Column, 0, len(exprs)) + cols = expression.ExtractColumnsFromExpressions(cols, exprs, nil) + for _, col := range cols { + c.addPredicateColumn(col) + } +} + +func (c *predicateColumnCollector) updateColMap(col *expression.Column, relatedCols []*expression.Column) { + if _, ok := c.colMap[col.UniqueID]; !ok { + c.colMap[col.UniqueID] = map[model.TableColumnID]struct{}{} + } + for _, relatedCol := range relatedCols { + tblColIDs, ok := c.colMap[relatedCol.UniqueID] + if !ok { + // It may happen if some leaf of logical plan is LogicalMemTable/LogicalShow/LogicalShowDDLJobs. + continue + } + for tblColID := range tblColIDs { + c.colMap[col.UniqueID][tblColID] = struct{}{} + } + } +} + +func (c *predicateColumnCollector) updateColMapFromExpression(col *expression.Column, expr expression.Expression) { + c.updateColMap(col, expression.ExtractColumns(expr)) +} + +func (c *predicateColumnCollector) updateColMapFromExpressions(col *expression.Column, exprs []expression.Expression) { + relatedCols := make([]*expression.Column, 0, len(exprs)) + relatedCols = expression.ExtractColumnsFromExpressions(relatedCols, exprs, nil) + c.updateColMap(col, relatedCols) +} + +func (ds *DataSource) updateColMapAndAddPredicateColumns(c *predicateColumnCollector) { + tblID := ds.TableInfo().ID + for _, col := range ds.Schema().Columns { + tblColID := model.TableColumnID{TableID: tblID, ColumnID: col.ID} + c.colMap[col.UniqueID] = map[model.TableColumnID]struct{}{tblColID: {}} + } + // TODO: use ds.pushedDownConds or ds.allConds? + c.addPredicateColumnsFromExpressions(ds.pushedDownConds) +} + +func (p *LogicalJoin) updateColMapAndAddPredicateColumns(c *predicateColumnCollector) { + // The only schema change is merging two schemas so there is no new column. + // Assume statistics of all the columns in EqualConditions/LeftConditions/RightConditions/OtherConditions are needed. + exprs := make([]expression.Expression, 0, len(p.EqualConditions) + len(p.LeftConditions) + len(p.RightConditions) + len(p.OtherConditions)) + for _, cond := range p.EqualConditions { + exprs = append(exprs, cond) + } + for _, cond := range p.LeftConditions { + exprs = append(exprs, cond) + } + for _, cond := range p.RightConditions { + exprs = append(exprs, cond) + } + for _, cond := range p.OtherConditions { + exprs = append(exprs, cond) + } + c.addPredicateColumnsFromExpressions(exprs) +} + +func (c *predicateColumnCollector) collectPredicateColumns(lp LogicalPlan) { + for _, child := range lp.Children() { + c.collectPredicateColumns(child) + } + switch x := lp.(type) { + case *DataSource: + x.updateColMapAndAddPredicateColumns(c) + case *LogicalIndexScan: + x.Source.updateColMapAndAddPredicateColumns(c) + // TODO: Is it redundant to add predicate columns from LogicalIndexScan.AccessConds? Is LogicalIndexScan.AccessConds a subset of LogicalIndexScan.Source.pushedDownConds. + c.addPredicateColumnsFromExpressions(x.AccessConds) + case *LogicalTableScan: + x.Source.updateColMapAndAddPredicateColumns(c) + // TODO: Is it redundant to add predicate columns from LogicalTableScan.AccessConds? Is LogicalTableScan.AccessConds a subset of LogicalTableScan.Source.pushedDownConds. + c.addPredicateColumnsFromExpressions(x.AccessConds) + case *TiKVSingleGather: + // TODO: Is it redundant? + x.Source.updateColMapAndAddPredicateColumns(c) + case *LogicalProjection: + // Schema change from children to self. + schema := x.Schema() + for i, expr := range x.Exprs { + c.updateColMapFromExpression(schema.Columns[i], expr) + } + case *LogicalSelection: + // Though the conditions in LogicalSelection are complex conditions which cannot be pushed down to DataSource, we still + // regard statistics of the columns in the conditions as needed. + c.addPredicateColumnsFromExpressions(x.Conditions) + case *LogicalAggregation: + // Just assume statistics of all the columns in GroupByItems are needed. + c.addPredicateColumnsFromExpressions(x.GroupByItems) + // Schema change from children to self. + schema := x.Schema() + for i, aggFunc := range x.AggFuncs { + c.updateColMapFromExpressions(schema.Columns[i], aggFunc.Args) + } + case *LogicalJoin: + x.updateColMapAndAddPredicateColumns(c) + case *LogicalApply: + x.updateColMapAndAddPredicateColumns(c) + // Assume statistics of correlated columns are needed. + // Correlated columns can be found in LogicalApply.Children()[0].Schema(). Since we already visit LogicalApply.Children()[0], + // correlated columns must have existed in predicateColumnCollector.colMap. + for _, corCols := range x.CorCols { + c.addPredicateColumn(&corCols.Column) + } + case *LogicalTopN: + // Assume statistics of all the columns in ByItems are needed. + for _, item := range x.ByItems { + c.addPredicateColumnsFromExpression(item.Expr) + } + case *LogicalSort: + // Assume statistics of all the columns in ByItems are needed. + for _, item := range x.ByItems { + c.addPredicateColumnsFromExpression(item.Expr) + } + case *LogicalUnionAll: + // nothing to do + case *LogicalPartitionUnionAll: + // nothing to do + + case *LogicalLimit: + // nothing to do + case *LogicalMaxOneRow: + // nothing to do + case *LogicalTableDual: + // nothing to do + case *LogicalShow: + // nothing to do + case *LogicalShowDDLJobs: + // nothing to do + case *LogicalMemTable: + // nothing to do + case *LogicalLock: + // nothing to do + } + +} + +func (*predicateColumnCollector) name() string { + return "collect_predicate_columns" +} From 040c8fe7ce533eb5c9e11252f98ec5c3fa9ae341 Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Fri, 19 Nov 2021 16:03:42 +0800 Subject: [PATCH 02/11] update predicateColumnCollector --- planner/core/logical_plan_builder.go | 2 +- planner/core/logical_plans.go | 3 + planner/core/rule_collect_predicate_column.go | 88 ++++++++++++------- 3 files changed, 59 insertions(+), 34 deletions(-) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 7e8bd7ba6688c..22dbcd0f11e2d 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -3826,7 +3826,7 @@ func (b *PlanBuilder) tryBuildCTE(ctx context.Context, tn *ast.TableName, asName } cte.recursiveRef = true - p := LogicalCTETable{name: cte.def.Name.String(), idForStorage: cte.storageID, seedStat: cte.seedStat}.Init(b.ctx, b.getSelectOffset()) + p := LogicalCTETable{name: cte.def.Name.String(), idForStorage: cte.storageID, seedStat: cte.seedStat, seedSchema: cte.seedLP.Schema()}.Init(b.ctx, b.getSelectOffset()) p.SetSchema(getResultCTESchema(cte.seedLP.Schema(), b.ctx.GetSessionVars())) p.SetOutputNames(cte.seedLP.OutputNames()) return p, nil diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index 2258e2b4de31d..0d2f87c02e3ba 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -1297,6 +1297,9 @@ type LogicalCTETable struct { seedStat *property.StatsInfo name string idForStorage int + + // seedSchema is only used in predicateColumnCollector to get column mapping + seedSchema *expression.Schema } // ExtractCorrelatedCols implements LogicalPlan interface. diff --git a/planner/core/rule_collect_predicate_column.go b/planner/core/rule_collect_predicate_column.go index 5c184b6823c7d..36899f2088b77 100644 --- a/planner/core/rule_collect_predicate_column.go +++ b/planner/core/rule_collect_predicate_column.go @@ -15,26 +15,19 @@ package core import ( - "context" "github.com/pingcap/tidb/expression" - "github.com/pingcap/tidb/parser/model" ) // predicateColumnCollector collects predicate columns from logical plan. Predicate columns are the columns whose statistics // are utilized when making query plans, which usually occur in where conditions, join conditions and so on. -type predicateColumnCollector struct{ +type predicateColumnCollector struct { // colMap maps expression.Column.UniqueID to the table columns whose statistics are utilized to calculate statistics of the column. colMap map[int64]map[model.TableColumnID]struct{} // predicateCols records predicate columns. predicateCols map[model.TableColumnID]struct{} } -func (c *predicateColumnCollector) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { - c.collectPredicateColumns(p) - return p, nil -} - func (c *predicateColumnCollector) addPredicateColumn(col *expression.Column) { tblColIDs, ok := c.colMap[col.UniqueID] if !ok { @@ -100,7 +93,7 @@ func (ds *DataSource) updateColMapAndAddPredicateColumns(c *predicateColumnColle func (p *LogicalJoin) updateColMapAndAddPredicateColumns(c *predicateColumnCollector) { // The only schema change is merging two schemas so there is no new column. // Assume statistics of all the columns in EqualConditions/LeftConditions/RightConditions/OtherConditions are needed. - exprs := make([]expression.Expression, 0, len(p.EqualConditions) + len(p.LeftConditions) + len(p.RightConditions) + len(p.OtherConditions)) + exprs := make([]expression.Expression, 0, len(p.EqualConditions)+len(p.LeftConditions)+len(p.RightConditions)+len(p.OtherConditions)) for _, cond := range p.EqualConditions { exprs = append(exprs, cond) } @@ -116,9 +109,25 @@ func (p *LogicalJoin) updateColMapAndAddPredicateColumns(c *predicateColumnColle c.addPredicateColumnsFromExpressions(exprs) } -func (c *predicateColumnCollector) collectPredicateColumns(lp LogicalPlan) { +func (p *LogicalUnionAll) updateColMapAndAddPredicateColumns(c *predicateColumnCollector) { + // statistics of the ith column of UnionAll come from statistics of the ith column of each child. + schemas := make([]*expression.Schema, 0, len(p.Children())) + relatedCols := make([]*expression.Column, 0, len(p.Children())) + for _, child := range p.Children() { + schemas = append(schemas, child.Schema()) + } + for i, col := range p.Schema().Columns { + relatedCols = relatedCols[:0] + for j := range p.Children() { + relatedCols = append(relatedCols, schemas[j].Columns[i]) + } + c.updateColMap(col, relatedCols) + } +} + +func (c *predicateColumnCollector) collectFromPlan(lp LogicalPlan) { for _, child := range lp.Children() { - c.collectPredicateColumns(child) + c.collectFromPlan(child) } switch x := lp.(type) { case *DataSource: @@ -152,6 +161,17 @@ func (c *predicateColumnCollector) collectPredicateColumns(lp LogicalPlan) { for i, aggFunc := range x.AggFuncs { c.updateColMapFromExpressions(schema.Columns[i], aggFunc.Args) } + case *LogicalWindow: + // Statistics of the columns in LogicalWindow.PartitionBy are used in optimizeByShuffle4Window. + // It seems that we don't use statistics of the columns in LogicalWindow.OrderBy currently? + for _, item := range x.PartitionBy { + c.addPredicateColumn(item.Col) + } + // Schema change from children to self. + windowColumns := x.GetWindowResultColumns() + for i, col := range windowColumns { + c.updateColMapFromExpressions(col, x.WindowFuncDescs[i].Args) + } case *LogicalJoin: x.updateColMapAndAddPredicateColumns(c) case *LogicalApply: @@ -173,28 +193,30 @@ func (c *predicateColumnCollector) collectPredicateColumns(lp LogicalPlan) { c.addPredicateColumnsFromExpression(item.Expr) } case *LogicalUnionAll: - // nothing to do + x.updateColMapAndAddPredicateColumns(c) case *LogicalPartitionUnionAll: - // nothing to do - - case *LogicalLimit: - // nothing to do - case *LogicalMaxOneRow: - // nothing to do - case *LogicalTableDual: - // nothing to do - case *LogicalShow: - // nothing to do - case *LogicalShowDDLJobs: - // nothing to do - case *LogicalMemTable: - // nothing to do - case *LogicalLock: - // nothing to do + x.updateColMapAndAddPredicateColumns(c) + case *LogicalCTE: + // Schema change from seedPlan/recursivePlan to self. + columns := x.Schema().Columns + seedColumns := x.cte.seedPartLogicalPlan.Schema().Columns + recursiveColumns := x.cte.recursivePartLogicalPlan.Schema().Columns + relatedCols := make([]*expression.Column, 0, 2) + for i, col := range columns { + relatedCols = append(relatedCols[:0], seedColumns[i], recursiveColumns[i]) + c.updateColMap(col, relatedCols) + } + // If IsDistinct is true, then we use getColsNDV to calculate row count(see (*LogicalCTE).DeriveStat). In this case + // statistics of all the columns are needed. + if x.cte.IsDistinct { + for _, col := range columns { + c.addPredicateColumn(col) + } + } + case *LogicalCTETable: + // Schema change from seedPlan to self. + for i, col := range x.Schema().Columns { + c.updateColMap(col, []*expression.Column{x.seedSchema.Columns[i]}) + } } - -} - -func (*predicateColumnCollector) name() string { - return "collect_predicate_columns" } From f5db07ea3e4b9f09ef27468c1711d4ad2ed9682b Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Mon, 6 Dec 2021 20:09:51 +0800 Subject: [PATCH 03/11] add tests --- expression/util.go | 21 +++ ...olumn.go => predicate_column_collector.go} | 39 +++- .../core/predicate_column_collector_test.go | 177 ++++++++++++++++++ 3 files changed, 228 insertions(+), 9 deletions(-) rename planner/core/{rule_collect_predicate_column.go => predicate_column_collector.go} (84%) create mode 100644 planner/core/predicate_column_collector_test.go diff --git a/expression/util.go b/expression/util.go index 7b34ef442067b..1b6a17f6e884d 100644 --- a/expression/util.go +++ b/expression/util.go @@ -166,6 +166,27 @@ func extractColumns(result []*Column, expr Expression, filter func(*Column) bool return result } +func ExtractColumnsAndCorColumns(result []*Column, expr Expression) []*Column { + switch v := expr.(type) { + case *Column: + result = append(result, v) + case *CorrelatedColumn: + result = append(result, &v.Column) + case *ScalarFunction: + for _, arg := range v.GetArgs() { + result = ExtractColumnsAndCorColumns(result, arg) + } + } + return result +} + +func ExtractColumnsAndCorColumnsFromExpressions(result []*Column, list []Expression) []*Column { + for _, expr := range list { + result = ExtractColumnsAndCorColumns(result, expr) + } + return result +} + // ExtractColumnSet extracts the different values of `UniqueId` for columns in expressions. func ExtractColumnSet(exprs []Expression) *intsets.Sparse { set := &intsets.Sparse{} diff --git a/planner/core/rule_collect_predicate_column.go b/planner/core/predicate_column_collector.go similarity index 84% rename from planner/core/rule_collect_predicate_column.go rename to planner/core/predicate_column_collector.go index 36899f2088b77..624b552d0ae20 100644 --- a/planner/core/rule_collect_predicate_column.go +++ b/planner/core/predicate_column_collector.go @@ -26,12 +26,24 @@ type predicateColumnCollector struct { colMap map[int64]map[model.TableColumnID]struct{} // predicateCols records predicate columns. predicateCols map[model.TableColumnID]struct{} + // cols is used to store columns collected from expressions and saves some allocation. + cols []*expression.Column +} + +func newPredicateColumnCollector() *predicateColumnCollector { + return &predicateColumnCollector{ + colMap: make(map[int64]map[model.TableColumnID]struct{}), + predicateCols: make(map[model.TableColumnID]struct{}), + // Pre-allocate a slice to reduce allocation, 8 doesn't have special meaning. + cols: make([]*expression.Column, 0, 8), + } } func (c *predicateColumnCollector) addPredicateColumn(col *expression.Column) { tblColIDs, ok := c.colMap[col.UniqueID] if !ok { // It may happen if some leaf of logical plan is LogicalMemTable/LogicalShow/LogicalShowDDLJobs. + // logutil.BgLogger().Info(fmt.Sprintf("uniqueID:%v, ID:%v, name:%s", col.UniqueID, col.ID, col.OrigName)) return } for tblColID := range tblColIDs { @@ -40,15 +52,14 @@ func (c *predicateColumnCollector) addPredicateColumn(col *expression.Column) { } func (c *predicateColumnCollector) addPredicateColumnsFromExpression(expr expression.Expression) { - cols := expression.ExtractColumns(expr) + cols := expression.ExtractColumnsAndCorColumns(c.cols[:0], expr) for _, col := range cols { c.addPredicateColumn(col) } } -func (c *predicateColumnCollector) addPredicateColumnsFromExpressions(exprs []expression.Expression) { - cols := make([]*expression.Column, 0, len(exprs)) - cols = expression.ExtractColumnsFromExpressions(cols, exprs, nil) +func (c *predicateColumnCollector) addPredicateColumnsFromExpressions(list []expression.Expression) { + cols := expression.ExtractColumnsAndCorColumnsFromExpressions(c.cols[:0], list) for _, col := range cols { c.addPredicateColumn(col) } @@ -62,6 +73,7 @@ func (c *predicateColumnCollector) updateColMap(col *expression.Column, relatedC tblColIDs, ok := c.colMap[relatedCol.UniqueID] if !ok { // It may happen if some leaf of logical plan is LogicalMemTable/LogicalShow/LogicalShowDDLJobs. + // logutil.BgLogger().Info(fmt.Sprintf("uniqueID:%v, ID:%v, name:%s", col.UniqueID, col.ID, col.OrigName)) continue } for tblColID := range tblColIDs { @@ -71,13 +83,11 @@ func (c *predicateColumnCollector) updateColMap(col *expression.Column, relatedC } func (c *predicateColumnCollector) updateColMapFromExpression(col *expression.Column, expr expression.Expression) { - c.updateColMap(col, expression.ExtractColumns(expr)) + c.updateColMap(col, expression.ExtractColumnsAndCorColumns(c.cols[:0], expr)) } -func (c *predicateColumnCollector) updateColMapFromExpressions(col *expression.Column, exprs []expression.Expression) { - relatedCols := make([]*expression.Column, 0, len(exprs)) - relatedCols = expression.ExtractColumnsFromExpressions(relatedCols, exprs, nil) - c.updateColMap(col, relatedCols) +func (c *predicateColumnCollector) updateColMapFromExpressions(col *expression.Column, list []expression.Expression) { + c.updateColMap(col, expression.ExtractColumnsAndCorColumnsFromExpressions(c.cols[:0], list)) } func (ds *DataSource) updateColMapAndAddPredicateColumns(c *predicateColumnCollector) { @@ -220,3 +230,14 @@ func (c *predicateColumnCollector) collectFromPlan(lp LogicalPlan) { } } } + +// CollectPredicateColumnsForTest collects predicate columns from logical plan. It is only for test. +func CollectPredicateColumnsForTest(lp LogicalPlan) []model.TableColumnID { + collector := newPredicateColumnCollector() + collector.collectFromPlan(lp) + tblColIDs := make([]model.TableColumnID, 0, len(collector.predicateCols)) + for tblColID := range collector.predicateCols { + tblColIDs = append(tblColIDs, tblColID) + } + return tblColIDs +} diff --git a/planner/core/predicate_column_collector_test.go b/planner/core/predicate_column_collector_test.go new file mode 100644 index 0000000000000..df51904f937ca --- /dev/null +++ b/planner/core/predicate_column_collector_test.go @@ -0,0 +1,177 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "context" + "fmt" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "sort" + "testing" + + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/util/hint" + "github.com/stretchr/testify/require" +) + +func TestPredicateColumnCollector(t *testing.T) { + t.Parallel() + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b int, c int)") + tk.MustExec("create table t2(a int, b int, c int)") + + tests := []struct { + sql string + res []string + }{ + { + sql: "select * from t1 where a > 2", + res: []string{"t1.a"}, + }, + { + sql: "select * from t1 where b in (2, 5) or c = 5", + res: []string{"t1.b", "t1.c"}, + }, + { + sql: "select * from (select a + b as ab, c from t1) as tmp where ab > 4", + res: []string{"t1.a", "t1.b"}, + }, + { + sql: "select b, count(*) from t1 group by b", + res: []string{"t1.b"}, + }, + { + sql: "select b, sum(a) from t1 group by b having sum(a) > 3", + res: []string{"t1.a", "t1.b"}, + }, + { + sql: "select count(*), sum(a), sum(c) from t1", + res: []string{}, + }, + { + sql: "(select a, b from t1) union (select a, c from t2)", + res: []string{"t1.a", "t1.b", "t2.a", "t2.c"}, + }, + { + sql: "select avg(b) over(partition by a) from t1", + res: []string{"t1.a"}, + }, + { + sql: "select * from (select avg(b) over(partition by a) as w from t1) as tmp where w > 4", + res: []string{"t1.a", "t1.b"}, + }, + { + sql: "select row_number() over(partition by a order by c) from t1", + res: []string{"t1.a"}, + }, + { + sql: "select * from t1, t2 where t1.a = t2.a", + res: []string{"t1.a", "t2.a"}, + }, + { + sql: "select * from t1 as x join t2 as y on x.b + y.c > 2", + res: []string{"t1.b", "t2.c"}, + }, + { + sql: "select * from t1 as x join t2 as y on x.a = y.a and x.b < 3 and y.c > 2", + res: []string{"t1.a", "t1.b", "t2.a", "t2.c"}, + }, + { + sql: "select x.b, y.c, sum(x.c), sum(y.b) from t1 as x join t2 as y on x.a = y.a group by x.b, y.c order by x.b", + res: []string{"t1.a", "t1.b", "t2.a", "t2.c"}, + }, + { + sql: "select * from t1 where t1.b > all(select b from t2 where t2.c > 2)", + res: []string{"t1.b", "t2.b", "t2.c"}, + }, + { + sql: "select * from t1 where t1.b > (select count(b) from t2 where t2.c > t1.a)", + res: []string{"t1.a", "t1.b", "t2.b", "t2.c"}, + }, + { + sql: "select * from t1 where t1.b > (select count(*) from t2 where t2.c > t1.a)", + res: []string{"t1.a", "t1.b", "t2.c"}, + }, + { + sql: "select * from t1 order by c", + res: []string{"t1.c"}, + }, + { + sql: "select * from t1 order by a + b limit 10", + res: []string{"t1.a", "t1.b"}, + }, + { + sql: "select * from ((select a, b from t1) union all (select a, c from t2)) as tmp where tmp.b > 2", + res: []string{"t1.b", "t2.c"}, + }, + } + + ctx := context.Background() + sctx := tk.Session() + is := dom.InfoSchema() + getColName := func(tblColID model.TableColumnID) (string, bool) { + tbl, ok := is.TableByID(tblColID.TableID) + if !ok { + return "", false + } + tblInfo := tbl.Meta() + for _, col := range tblInfo.Columns { + if tblColID.ColumnID == col.ID { + return tblInfo.Name.L + "." + col.Name.L, true + } + } + return "", false + } + checkPredicateColumns := func(lp plannercore.LogicalPlan, expected []string, comment string) { + tblColIDs := plannercore.CollectPredicateColumnsForTest(lp) + cols := make([]string, 0, len(tblColIDs)) + for _, tblColID := range tblColIDs { + col, ok := getColName(tblColID) + require.Equal(t, true, ok, comment) + cols = append(cols, col) + } + sort.Strings(cols) + require.Equal(t, expected, cols, comment) + } + + for _, tt := range tests { + comment := fmt.Sprintf("for %s", tt.sql) + logutil.BgLogger().Info(comment) + stmts, err := tk.Session().Parse(ctx, tt.sql) + require.NoError(t, err, comment) + stmt := stmts[0] + err = plannercore.Preprocess(sctx, stmt, plannercore.WithPreprocessorReturn(&plannercore.PreprocessorReturn{InfoSchema: is})) + require.NoError(t, err, comment) + builder, _ := plannercore.NewPlanBuilder().Init(sctx, is, &hint.BlockHintProcessor{}) + p, err := builder.Build(ctx, stmt) + require.NoError(t, err, comment) + lp, ok := p.(plannercore.LogicalPlan) + require.Equal(t, true, ok, comment) + // We check predicate columns twice, before and after logical optimization. + logutil.BgLogger().Info("before logical opt", zap.String("lp", plannercore.ToString(lp))) + checkPredicateColumns(lp, tt.res, comment) + lp, err = plannercore.LogicalOptimize(ctx, builder.GetOptFlag(), lp) + require.NoError(t, err, comment) + logutil.BgLogger().Info("after logical opt", zap.String("lp", plannercore.ToString(lp))) + checkPredicateColumns(lp, tt.res, comment) + } +} From f22919bb71adc72bf68f7ceddf505907ffeaa770 Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Tue, 7 Dec 2021 18:02:43 +0800 Subject: [PATCH 04/11] add more tests --- planner/core/predicate_column_collector.go | 21 +++++-- .../core/predicate_column_collector_test.go | 55 +++++++++++++++++-- 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/planner/core/predicate_column_collector.go b/planner/core/predicate_column_collector.go index 624b552d0ae20..1856c76302aa0 100644 --- a/planner/core/predicate_column_collector.go +++ b/planner/core/predicate_column_collector.go @@ -43,7 +43,6 @@ func (c *predicateColumnCollector) addPredicateColumn(col *expression.Column) { tblColIDs, ok := c.colMap[col.UniqueID] if !ok { // It may happen if some leaf of logical plan is LogicalMemTable/LogicalShow/LogicalShowDDLJobs. - // logutil.BgLogger().Info(fmt.Sprintf("uniqueID:%v, ID:%v, name:%s", col.UniqueID, col.ID, col.OrigName)) return } for tblColID := range tblColIDs { @@ -73,7 +72,6 @@ func (c *predicateColumnCollector) updateColMap(col *expression.Column, relatedC tblColIDs, ok := c.colMap[relatedCol.UniqueID] if !ok { // It may happen if some leaf of logical plan is LogicalMemTable/LogicalShow/LogicalShowDDLJobs. - // logutil.BgLogger().Info(fmt.Sprintf("uniqueID:%v, ID:%v, name:%s", col.UniqueID, col.ID, col.OrigName)) continue } for tblColID := range tblColIDs { @@ -192,12 +190,12 @@ func (c *predicateColumnCollector) collectFromPlan(lp LogicalPlan) { for _, corCols := range x.CorCols { c.addPredicateColumn(&corCols.Column) } - case *LogicalTopN: + case *LogicalSort: // Assume statistics of all the columns in ByItems are needed. for _, item := range x.ByItems { c.addPredicateColumnsFromExpression(item.Expr) } - case *LogicalSort: + case *LogicalTopN: // Assume statistics of all the columns in ByItems are needed. for _, item := range x.ByItems { c.addPredicateColumnsFromExpression(item.Expr) @@ -207,13 +205,24 @@ func (c *predicateColumnCollector) collectFromPlan(lp LogicalPlan) { case *LogicalPartitionUnionAll: x.updateColMapAndAddPredicateColumns(c) case *LogicalCTE: + // Visit seedPartLogicalPlan and recursivePartLogicalPlan first. + c.collectFromPlan(x.cte.seedPartLogicalPlan) + if x.cte.recursivePartLogicalPlan != nil { + c.collectFromPlan(x.cte.recursivePartLogicalPlan) + } // Schema change from seedPlan/recursivePlan to self. columns := x.Schema().Columns seedColumns := x.cte.seedPartLogicalPlan.Schema().Columns - recursiveColumns := x.cte.recursivePartLogicalPlan.Schema().Columns + var recursiveColumns []*expression.Column + if x.cte.recursivePartLogicalPlan != nil { + recursiveColumns = x.cte.recursivePartLogicalPlan.Schema().Columns + } relatedCols := make([]*expression.Column, 0, 2) for i, col := range columns { - relatedCols = append(relatedCols[:0], seedColumns[i], recursiveColumns[i]) + relatedCols = append(relatedCols[:0], seedColumns[i]) + if recursiveColumns != nil { + relatedCols = append(relatedCols, recursiveColumns[i]) + } c.updateColMap(col, relatedCols) } // If IsDistinct is true, then we use getColsNDV to calculate row count(see (*LogicalCTE).DeriveStat). In this case diff --git a/planner/core/predicate_column_collector_test.go b/planner/core/predicate_column_collector_test.go index df51904f937ca..ef835ce8bb8f8 100644 --- a/planner/core/predicate_column_collector_test.go +++ b/planner/core/predicate_column_collector_test.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "github.com/pingcap/tidb/util/logutil" - "go.uber.org/zap" "sort" "testing" @@ -36,93 +35,140 @@ func TestPredicateColumnCollector(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t1, t2") + tk.MustExec("set @@session.tidb_partition_prune_mode = 'static'") tk.MustExec("create table t1(a int, b int, c int)") tk.MustExec("create table t2(a int, b int, c int)") + tk.MustExec("create table t3(a int, b int, c int) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20), partition p2 values less than maxvalue)") tests := []struct { sql string res []string }{ { + // DataSource sql: "select * from t1 where a > 2", res: []string{"t1.a"}, }, { + // DataSource sql: "select * from t1 where b in (2, 5) or c = 5", res: []string{"t1.b", "t1.c"}, }, { + // LogicalProjection sql: "select * from (select a + b as ab, c from t1) as tmp where ab > 4", res: []string{"t1.a", "t1.b"}, }, { + // LogicalAggregation sql: "select b, count(*) from t1 group by b", res: []string{"t1.b"}, }, { + // LogicalAggregation sql: "select b, sum(a) from t1 group by b having sum(a) > 3", res: []string{"t1.a", "t1.b"}, }, { + // LogicalAggregation sql: "select count(*), sum(a), sum(c) from t1", res: []string{}, }, { + // LogicalAggregation sql: "(select a, b from t1) union (select a, c from t2)", res: []string{"t1.a", "t1.b", "t2.a", "t2.c"}, }, { + // LogicalWindow sql: "select avg(b) over(partition by a) from t1", res: []string{"t1.a"}, }, { + // LogicalWindow sql: "select * from (select avg(b) over(partition by a) as w from t1) as tmp where w > 4", res: []string{"t1.a", "t1.b"}, }, { + // LogicalWindow sql: "select row_number() over(partition by a order by c) from t1", res: []string{"t1.a"}, }, { + // LogicalJoin sql: "select * from t1, t2 where t1.a = t2.a", res: []string{"t1.a", "t2.a"}, }, { + // LogicalJoin sql: "select * from t1 as x join t2 as y on x.b + y.c > 2", res: []string{"t1.b", "t2.c"}, }, { + // LogicalJoin sql: "select * from t1 as x join t2 as y on x.a = y.a and x.b < 3 and y.c > 2", res: []string{"t1.a", "t1.b", "t2.a", "t2.c"}, }, { + // LogicalJoin sql: "select x.b, y.c, sum(x.c), sum(y.b) from t1 as x join t2 as y on x.a = y.a group by x.b, y.c order by x.b", res: []string{"t1.a", "t1.b", "t2.a", "t2.c"}, }, { + // LogicalApply sql: "select * from t1 where t1.b > all(select b from t2 where t2.c > 2)", res: []string{"t1.b", "t2.b", "t2.c"}, }, { + // LogicalApply sql: "select * from t1 where t1.b > (select count(b) from t2 where t2.c > t1.a)", res: []string{"t1.a", "t1.b", "t2.b", "t2.c"}, }, { + // LogicalApply sql: "select * from t1 where t1.b > (select count(*) from t2 where t2.c > t1.a)", res: []string{"t1.a", "t1.b", "t2.c"}, }, { + // LogicalSort sql: "select * from t1 order by c", res: []string{"t1.c"}, }, { + // LogicalTopN sql: "select * from t1 order by a + b limit 10", res: []string{"t1.a", "t1.b"}, }, { + // LogicalUnionAll sql: "select * from ((select a, b from t1) union all (select a, c from t2)) as tmp where tmp.b > 2", res: []string{"t1.b", "t2.c"}, }, + { + // LogicalPartitionUnionAll + sql: "select * from t3 where a < 15 and b > 1", + res: []string{"t3.a", "t3.b"}, + }, + { + // LogicalCTE + sql: "with cte(x, y) as (select a + 1, b from t1 where b > 1) select * from cte where x > 3", + res: []string{"t1.a", "t1.b"}, + }, + { + // LogicalCTE, LogicalCTETable + sql: "with recursive cte(x, y) as (select c, 1 from t1 union all select x + 1, y from cte where x < 5) select * from cte", + res: []string{"t1.c"}, + }, + { + // LogicalCTE, LogicalCTETable + sql: "with recursive cte(x, y) as (select 1, c from t1 union all select x + 1, y from cte where x < 5) select * from cte where y > 1", + res: []string{"t1.c"}, + }, + { + // LogicalCTE, LogicalCTETable + sql: "with recursive cte(x, y) as (select a, b from t1 union select x + 1, y from cte where x < 5) select * from cte", + res: []string{"t1.a", "t1.b"}, + }, } ctx := context.Background() @@ -166,12 +212,13 @@ func TestPredicateColumnCollector(t *testing.T) { require.NoError(t, err, comment) lp, ok := p.(plannercore.LogicalPlan) require.Equal(t, true, ok, comment) - // We check predicate columns twice, before and after logical optimization. - logutil.BgLogger().Info("before logical opt", zap.String("lp", plannercore.ToString(lp))) + // We check predicate columns twice, before and after logical optimization. Some logical plan patterns may occur before + // logical optimization while others may occur after logical optimization. + // logutil.BgLogger().Info("before logical opt", zap.String("lp", plannercore.ToString(lp))) checkPredicateColumns(lp, tt.res, comment) lp, err = plannercore.LogicalOptimize(ctx, builder.GetOptFlag(), lp) require.NoError(t, err, comment) - logutil.BgLogger().Info("after logical opt", zap.String("lp", plannercore.ToString(lp))) + // logutil.BgLogger().Info("after logical opt", zap.String("lp", plannercore.ToString(lp))) checkPredicateColumns(lp, tt.res, comment) } } From 0fb0e7d27b4b5443f1c899b8aa0b9ff29927a037 Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Tue, 7 Dec 2021 18:16:48 +0800 Subject: [PATCH 05/11] add comment --- expression/util.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/expression/util.go b/expression/util.go index 1b6a17f6e884d..d7b92329d51f6 100644 --- a/expression/util.go +++ b/expression/util.go @@ -166,6 +166,7 @@ func extractColumns(result []*Column, expr Expression, filter func(*Column) bool return result } +// ExtractColumnsAndCorColumns extracts columns and correlated columns from `expr` and append them to `result`. func ExtractColumnsAndCorColumns(result []*Column, expr Expression) []*Column { switch v := expr.(type) { case *Column: @@ -180,6 +181,7 @@ func ExtractColumnsAndCorColumns(result []*Column, expr Expression) []*Column { return result } +// ExtractColumnsAndCorColumnsFromExpressions extracts columns and correlated columns from expressions and append them to `result`. func ExtractColumnsAndCorColumnsFromExpressions(result []*Column, list []Expression) []*Column { for _, expr := range list { result = ExtractColumnsAndCorColumns(result, expr) From 72998e78dfb5df8ece85318fc3a6b65811c82a6e Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Wed, 8 Dec 2021 10:56:54 +0800 Subject: [PATCH 06/11] fmt --- planner/core/predicate_column_collector_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/planner/core/predicate_column_collector_test.go b/planner/core/predicate_column_collector_test.go index ef835ce8bb8f8..045db0d32ebf3 100644 --- a/planner/core/predicate_column_collector_test.go +++ b/planner/core/predicate_column_collector_test.go @@ -17,7 +17,6 @@ package core_test import ( "context" "fmt" - "github.com/pingcap/tidb/util/logutil" "sort" "testing" @@ -25,6 +24,7 @@ import ( plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util/hint" + "github.com/pingcap/tidb/util/logutil" "github.com/stretchr/testify/require" ) From 70f97dc085bc1dc857e84117293a7652ccd163c1 Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Fri, 10 Dec 2021 13:59:47 +0800 Subject: [PATCH 07/11] upd --- ...ector.go => collect_column_stats_usage.go} | 39 ++++++++++++++++++- ....go => collect_column_stats_usage_test.go} | 6 +-- 2 files changed, 41 insertions(+), 4 deletions(-) rename planner/core/{predicate_column_collector.go => collect_column_stats_usage.go} (87%) rename planner/core/{predicate_column_collector_test.go => collect_column_stats_usage_test.go} (98%) diff --git a/planner/core/predicate_column_collector.go b/planner/core/collect_column_stats_usage.go similarity index 87% rename from planner/core/predicate_column_collector.go rename to planner/core/collect_column_stats_usage.go index 1856c76302aa0..5ae6d19459c44 100644 --- a/planner/core/predicate_column_collector.go +++ b/planner/core/collect_column_stats_usage.go @@ -17,6 +17,7 @@ package core import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" ) // predicateColumnCollector collects predicate columns from logical plan. Predicate columns are the columns whose statistics @@ -94,7 +95,7 @@ func (ds *DataSource) updateColMapAndAddPredicateColumns(c *predicateColumnColle tblColID := model.TableColumnID{TableID: tblID, ColumnID: col.ID} c.colMap[col.UniqueID] = map[model.TableColumnID]struct{}{tblColID: {}} } - // TODO: use ds.pushedDownConds or ds.allConds? + // We should use `pushedDownConds` here. `allConds` is used for partition pruning, which doesn't need stats. c.addPredicateColumnsFromExpressions(ds.pushedDownConds) } @@ -250,3 +251,39 @@ func CollectPredicateColumnsForTest(lp LogicalPlan) []model.TableColumnID { } return tblColIDs } + +// collectPredicateColumns collects predicate columns from logical plan and stores them to session. +func collectPredicateColumns(sctx sessionctx.Context, lp LogicalPlan) { + // TODO: +} + +// CollectHistNeededColumns collects histogram-needed columns from logical plan +func CollectHistNeededColumns(plan LogicalPlan) []model.TableColumnID { + colMap := map[model.TableColumnID]struct{}{} + collectHistNeededColumnsFromPlan(plan, colMap) + histColumns := make([]model.TableColumnID, 0, len(colMap)) + for col := range colMap { + histColumns = append(histColumns, col) + } + return histColumns +} + +func collectHistNeededColumnsFromPlan(plan LogicalPlan, colMap map[model.TableColumnID]struct{}) { + for _, child := range plan.Children() { + collectHistNeededColumnsFromPlan(child, colMap) + } + switch x := plan.(type) { + case *DataSource: + tblID := x.TableInfo().ID + columns := expression.ExtractColumnsFromExpressions(nil, x.pushedDownConds, nil) + for _, col := range columns { + tblColID := model.TableColumnID{TableID: tblID, ColumnID: col.ID} + colMap[tblColID] = struct{}{} + } + case *LogicalCTE: + collectHistNeededColumnsFromPlan(x.cte.seedPartLogicalPlan, colMap) + if x.cte.recursivePartLogicalPlan != nil { + collectHistNeededColumnsFromPlan(x.cte.recursivePartLogicalPlan, colMap) + } + } +} diff --git a/planner/core/predicate_column_collector_test.go b/planner/core/collect_column_stats_usage_test.go similarity index 98% rename from planner/core/predicate_column_collector_test.go rename to planner/core/collect_column_stats_usage_test.go index 045db0d32ebf3..fb9ee5e99cb1e 100644 --- a/planner/core/predicate_column_collector_test.go +++ b/planner/core/collect_column_stats_usage_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestPredicateColumnCollector(t *testing.T) { +func TestCollectPredicateColumns(t *testing.T) { t.Parallel() store, dom, clean := testkit.CreateMockStoreAndDomain(t) defer clean() @@ -192,11 +192,11 @@ func TestPredicateColumnCollector(t *testing.T) { cols := make([]string, 0, len(tblColIDs)) for _, tblColID := range tblColIDs { col, ok := getColName(tblColID) - require.Equal(t, true, ok, comment) + require.True(t, ok, comment) cols = append(cols, col) } sort.Strings(cols) - require.Equal(t, expected, cols, comment) + require.ElementsMatch(t, expected, cols, comment) } for _, tt := range tests { From 479f2de019f7f4e5728b55641e8514e2eb8c279b Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Fri, 10 Dec 2021 15:11:26 +0800 Subject: [PATCH 08/11] upd --- planner/core/collect_column_stats_usage_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/planner/core/collect_column_stats_usage_test.go b/planner/core/collect_column_stats_usage_test.go index fb9ee5e99cb1e..912765549fa7b 100644 --- a/planner/core/collect_column_stats_usage_test.go +++ b/planner/core/collect_column_stats_usage_test.go @@ -17,7 +17,6 @@ package core_test import ( "context" "fmt" - "sort" "testing" "github.com/pingcap/tidb/parser/model" @@ -195,7 +194,6 @@ func TestCollectPredicateColumns(t *testing.T) { require.True(t, ok, comment) cols = append(cols, col) } - sort.Strings(cols) require.ElementsMatch(t, expected, cols, comment) } @@ -211,7 +209,7 @@ func TestCollectPredicateColumns(t *testing.T) { p, err := builder.Build(ctx, stmt) require.NoError(t, err, comment) lp, ok := p.(plannercore.LogicalPlan) - require.Equal(t, true, ok, comment) + require.True(t, ok, comment) // We check predicate columns twice, before and after logical optimization. Some logical plan patterns may occur before // logical optimization while others may occur after logical optimization. // logutil.BgLogger().Info("before logical opt", zap.String("lp", plannercore.ToString(lp))) From a295cff42df4b4f676c5f4c1bcb6f145c3c32a17 Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Fri, 10 Dec 2021 15:22:59 +0800 Subject: [PATCH 09/11] move CollectHistNeededColumns to next pr --- planner/core/collect_column_stats_usage.go | 31 ---------------------- 1 file changed, 31 deletions(-) diff --git a/planner/core/collect_column_stats_usage.go b/planner/core/collect_column_stats_usage.go index 5ae6d19459c44..45cc3ff7ea238 100644 --- a/planner/core/collect_column_stats_usage.go +++ b/planner/core/collect_column_stats_usage.go @@ -256,34 +256,3 @@ func CollectPredicateColumnsForTest(lp LogicalPlan) []model.TableColumnID { func collectPredicateColumns(sctx sessionctx.Context, lp LogicalPlan) { // TODO: } - -// CollectHistNeededColumns collects histogram-needed columns from logical plan -func CollectHistNeededColumns(plan LogicalPlan) []model.TableColumnID { - colMap := map[model.TableColumnID]struct{}{} - collectHistNeededColumnsFromPlan(plan, colMap) - histColumns := make([]model.TableColumnID, 0, len(colMap)) - for col := range colMap { - histColumns = append(histColumns, col) - } - return histColumns -} - -func collectHistNeededColumnsFromPlan(plan LogicalPlan, colMap map[model.TableColumnID]struct{}) { - for _, child := range plan.Children() { - collectHistNeededColumnsFromPlan(child, colMap) - } - switch x := plan.(type) { - case *DataSource: - tblID := x.TableInfo().ID - columns := expression.ExtractColumnsFromExpressions(nil, x.pushedDownConds, nil) - for _, col := range columns { - tblColID := model.TableColumnID{TableID: tblID, ColumnID: col.ID} - colMap[tblColID] = struct{}{} - } - case *LogicalCTE: - collectHistNeededColumnsFromPlan(x.cte.seedPartLogicalPlan, colMap) - if x.cte.recursivePartLogicalPlan != nil { - collectHistNeededColumnsFromPlan(x.cte.recursivePartLogicalPlan, colMap) - } - } -} From f7f5666fe940ae16bd04740c9bfedf6f347f359f Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Fri, 10 Dec 2021 15:40:13 +0800 Subject: [PATCH 10/11] remove todo func --- planner/core/collect_column_stats_usage.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/planner/core/collect_column_stats_usage.go b/planner/core/collect_column_stats_usage.go index 45cc3ff7ea238..6b3ca3017f2dd 100644 --- a/planner/core/collect_column_stats_usage.go +++ b/planner/core/collect_column_stats_usage.go @@ -251,8 +251,3 @@ func CollectPredicateColumnsForTest(lp LogicalPlan) []model.TableColumnID { } return tblColIDs } - -// collectPredicateColumns collects predicate columns from logical plan and stores them to session. -func collectPredicateColumns(sctx sessionctx.Context, lp LogicalPlan) { - // TODO: -} From 22202c3a0ffb74ea3d3b37709eb75320d26893fe Mon Sep 17 00:00:00 2001 From: xuyifan <675434007@qq.com> Date: Fri, 10 Dec 2021 16:54:07 +0800 Subject: [PATCH 11/11] fix build --- planner/core/collect_column_stats_usage.go | 1 - 1 file changed, 1 deletion(-) diff --git a/planner/core/collect_column_stats_usage.go b/planner/core/collect_column_stats_usage.go index 6b3ca3017f2dd..6396b1ddad34f 100644 --- a/planner/core/collect_column_stats_usage.go +++ b/planner/core/collect_column_stats_usage.go @@ -17,7 +17,6 @@ package core import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/sessionctx" ) // predicateColumnCollector collects predicate columns from logical plan. Predicate columns are the columns whose statistics