From ecaa1c518cc9367844ebb5206f2e970461c8bf28 Mon Sep 17 00:00:00 2001 From: Hangjie Mo Date: Wed, 25 Oct 2023 14:38:04 +0800 Subject: [PATCH 01/33] executor: move IT in `pkg/executor/partition_table_test` to `tests/integrationtest` (#47957) ref pingcap/tidb#47076 --- pkg/executor/main_test.go | 3 - pkg/executor/partition_table_test.go | 1512 ----- pkg/executor/test/partitiontest/BUILD.bazel | 2 +- .../test/partitiontest/partition_test.go | 54 - pkg/executor/testdata/executor_suite_in.json | 559 -- pkg/executor/testdata/executor_suite_out.json | 5773 ----------------- .../r/executor/partition/issues.result | 461 ++ .../partition/partition_boundaries.result | 5256 +++++++++++++++ .../partition_with_expression.result | 1250 ++++ .../r/executor/partition/table.result | 545 ++ .../t/executor/partition/issues.test | 255 + .../partition/partition_boundaries.test | 1577 +++++ .../partition/partition_with_expression.test | 454 ++ .../t/executor/partition/table.test | 358 + 14 files changed, 10157 insertions(+), 7902 deletions(-) delete mode 100644 pkg/executor/testdata/executor_suite_in.json delete mode 100644 pkg/executor/testdata/executor_suite_out.json create mode 100644 tests/integrationtest/r/executor/partition/issues.result create mode 100644 tests/integrationtest/r/executor/partition/partition_boundaries.result create mode 100644 tests/integrationtest/r/executor/partition/partition_with_expression.result create mode 100644 tests/integrationtest/r/executor/partition/table.result create mode 100644 tests/integrationtest/t/executor/partition/issues.test create mode 100644 tests/integrationtest/t/executor/partition/partition_boundaries.test create mode 100644 tests/integrationtest/t/executor/partition/partition_with_expression.test create mode 100644 tests/integrationtest/t/executor/partition/table.test diff --git a/pkg/executor/main_test.go b/pkg/executor/main_test.go index 078e657065617..43223ccdc7234 100644 --- a/pkg/executor/main_test.go +++ b/pkg/executor/main_test.go @@ -29,15 +29,12 @@ import ( var testDataMap = make(testdata.BookKeeper) var prepareMergeSuiteData testdata.TestData -var executorSuiteData testdata.TestData var slowQuerySuiteData testdata.TestData func TestMain(m *testing.M) { testsetup.SetupForCommonTest() - testDataMap.LoadTestSuiteData("testdata", "executor_suite") testDataMap.LoadTestSuiteData("testdata", "prepare_suite") testDataMap.LoadTestSuiteData("testdata", "slow_query_suite") - executorSuiteData = testDataMap["executor_suite"] prepareMergeSuiteData = testDataMap["prepare_suite"] slowQuerySuiteData = testDataMap["slow_query_suite"] diff --git a/pkg/executor/partition_table_test.go b/pkg/executor/partition_table_test.go index 21af7c27b321e..764c689f51364 100644 --- a/pkg/executor/partition_table_test.go +++ b/pkg/executor/partition_table_test.go @@ -27,140 +27,12 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" - "github.com/pingcap/tidb/pkg/testkit/testdata" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) -func TestSetPartitionPruneMode(t *testing.T) { - store := testkit.CreateMockStore(t) - - tkInit := testkit.NewTestKit(t, store) - tkInit.MustExec(`set @@session.tidb_partition_prune_mode = DEFAULT`) - tkInit.MustQuery("show warnings").Check(testkit.Rows()) - tkInit.MustExec(`set @@global.tidb_partition_prune_mode = DEFAULT`) - tkInit.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 Please analyze all partition tables again for consistency between partition and global stats")) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select @@global.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) - tk.MustQuery("select @@session.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) - tk.MustExec(`set @@session.tidb_partition_prune_mode = "static"`) - tk.MustQuery("show warnings").Check(testkit.Rows()) - tk.MustExec(`set @@global.tidb_partition_prune_mode = "static"`) - tk.MustQuery("show warnings").Check(testkit.Rows()) - tk2 := testkit.NewTestKit(t, store) - tk2.MustQuery("select @@session.tidb_partition_prune_mode").Check(testkit.Rows("static")) - tk2.MustQuery("show warnings").Check(testkit.Rows()) - tk2.MustQuery("select @@global.tidb_partition_prune_mode").Check(testkit.Rows("static")) - tk2.MustExec(`set @@session.tidb_partition_prune_mode = "dynamic"`) - tk2.MustQuery("show warnings").Sort().Check(testkit.Rows( - `Warning 1105 Please analyze all partition tables again for consistency between partition and global stats`, - `Warning 1105 Please avoid setting partition prune mode to dynamic at session level and set partition prune mode to dynamic at global level`)) - tk2.MustExec(`set @@global.tidb_partition_prune_mode = "dynamic"`) - tk2.MustQuery("show warnings").Check(testkit.Rows(`Warning 1105 Please analyze all partition tables again for consistency between partition and global stats`)) - tk3 := testkit.NewTestKit(t, store) - tk3.MustQuery("select @@global.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) - tk3.MustQuery("select @@session.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) -} - -func TestFourReader(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists pt") - tk.MustExec(`create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( -partition p0 values less than (4), -partition p1 values less than (7), -partition p2 values less than (10))`) - tk.MustExec("insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null)") - - // Table reader - tk.MustQuery("select * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", " ")) - // Table reader: table dual - tk.MustQuery("select * from pt where c > 10").Check(testkit.Rows()) - // Table reader: one partition - tk.MustQuery("select * from pt where c > 8").Check(testkit.Rows("9 9")) - // Table reader: more than one partition - tk.MustQuery("select * from pt where c < 2 or c >= 9").Sort().Check(testkit.Rows("0 0", "9 9")) - - // Index reader - tk.MustQuery("select c from pt").Sort().Check(testkit.Rows("0", "2", "4", "6", "7", "9", "")) - tk.MustQuery("select c from pt where c > 10").Check(testkit.Rows()) - tk.MustQuery("select c from pt where c > 8").Check(testkit.Rows("9")) - tk.MustQuery("select c from pt where c < 2 or c >= 9").Sort().Check(testkit.Rows("0", "9")) - - // Index lookup - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", " ")) - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10").Check(testkit.Rows()) - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8").Check(testkit.Rows("9 9")) - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9").Sort().Check(testkit.Rows("0 0", "9 9")) - - // Index Merge - tk.MustExec("set @@tidb_enable_index_merge = 1") - tk.MustQuery("select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6")) -} - -func TestPartitionIndexJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_enable_table_partition = 1") - tk.MustExec("set @@session.tidb_enable_list_partition = 1") - for i := 0; i < 3; i++ { - tk.MustExec("drop table if exists p, t") - if i == 0 { - // Test for range partition - tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( - partition p0 values less than (4), - partition p1 values less than (7), - partition p2 values less than (10))`) - } else if i == 1 { - // Test for list partition - tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by list (c) ( - partition p0 values in (1,2,3,4), - partition p1 values in (5,6,7), - partition p2 values in (8, 9,10))`) - } else { - // Test for hash partition - tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by hash(c) partitions 5;`) - } - - tk.MustExec("create table t (id int)") - tk.MustExec("insert into p values (3,3), (4,4), (6,6), (9,9)") - tk.MustExec("insert into t values (4), (9)") - - // Build indexLookUp in index join - tk.MustQuery("select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id").Sort().Check(testkit.Rows("4 4 4", "9 9 9")) - // Build index reader in index join - tk.MustQuery("select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id").Sort().Check(testkit.Rows("4", "9")) - } -} - -func TestPartitionUnionScanIndexJoin(t *testing.T) { - // For issue https://github.com/pingcap/tidb/issues/19152 - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue)") - tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4") - tk.MustExec("insert into t1 values (10, 'interesting neumann')") - tk.MustExec("insert into t2 select * from t1") - tk.MustExec("begin") - tk.MustExec("insert into t2 values (11, 'hopeful hoover');") - tk.MustQuery("select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11)").Check(testkit.Rows("10 interesting neumann 10 interesting neumann")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11)").Check(testkit.Rows("10 interesting neumann 10 interesting neumann")) - tk.MustExec("commit") -} - func TestPointGetwithRangeAndListPartitionTable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -248,83 +120,6 @@ func TestPointGetwithRangeAndListPartitionTable(t *testing.T) { tk.MustQuery(queryOnePartition).Check(testkit.Rows(fmt.Sprintf("%v", -1))) } -func TestPartitionReaderUnderApply(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - // For issue 19458. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(c_int int)") - tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9)") - tk.MustExec("DROP TABLE IF EXISTS `t1`") - tk.MustExec(`CREATE TABLE t1 ( - c_int int NOT NULL, - c_str varchar(40) NOT NULL, - c_datetime datetime NOT NULL, - c_timestamp timestamp NULL DEFAULT NULL, - c_double double DEFAULT NULL, - c_decimal decimal(12,6) DEFAULT NULL, - PRIMARY KEY (c_int,c_str,c_datetime) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci - PARTITION BY RANGE (c_int) - (PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB, - PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB, - PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB, - PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB, - PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB, - PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB, - PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB, - PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB)`) - tk.MustExec("INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000)") - tk.MustExec("begin") - tk.MustExec("insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088)") - tk.MustQuery("select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int").Check(testkit.Rows( - "1", "2", "3", "4", "5", "6", "7", "8", "9")) - tk.MustExec("rollback") - - // For issue 19450. - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int))") - tk.MustExec("create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4") - tk.MustExec("insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524)") - tk.MustExec("insert into t2 select * from t1") - tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows( - "1 romantic robinson 4.436000", - "2 stoic chaplygin 9.826000", - "3 vibrant shamir 6.300000", - "4 hungry wilson 4.900000", - "5 naughty swartz 9.524000")) - - // For issue 19450 release-4.0 - tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`) - tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows( - "1 romantic robinson 4.436000", - "2 stoic chaplygin 9.826000", - "3 vibrant shamir 6.300000", - "4 hungry wilson 4.900000", - "5 naughty swartz 9.524000")) -} - -func TestImproveCoverage(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`create table coverage_rr ( -pk1 varchar(35) NOT NULL, -pk2 int NOT NULL, -c int, -PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4;`) - tk.MustExec("create table coverage_dt (pk1 varchar(35), pk2 int)") - tk.MustExec("insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1)") - tk.MustExec("insert into coverage_dt values ('apple',3),('ios',3),('linux',5)") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustQuery("select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1")) -} - func TestPartitionInfoDisable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -829,118 +624,6 @@ func TestOrderByAndLimit(t *testing.T) { tk.MustExec(fmt.Sprintf("set global tidb_mem_oom_action=%s", originOOMAction)) } -func TestOrderByOnUnsignedPk(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6") - tk.MustExec("insert into tunsigned_hash values(25), (9279808998424041135)") - tk.MustQuery("select min(a) from tunsigned_hash").Check(testkit.Rows("25")) - tk.MustQuery("select max(a) from tunsigned_hash").Check(testkit.Rows("9279808998424041135")) -} - -func TestPartitionHandleWithKeepOrder(t *testing.T) { - // https://github.com/pingcap/tidb/issues/44312 - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (id int not null, store_id int not null )" + - "partition by range (store_id)" + - "(partition p0 values less than (6)," + - "partition p1 values less than (11)," + - "partition p2 values less than (16)," + - "partition p3 values less than (21))") - tk.MustExec("create table t1(id int not null, store_id int not null)") - tk.MustExec("insert into t values (1, 1)") - tk.MustExec("insert into t values (2, 17)") - tk.MustExec("insert into t1 values (0, 18)") - tk.MustExec("alter table t exchange partition p3 with table t1") - tk.MustExec("alter table t add index idx(id)") - tk.MustExec("analyze table t") - tk.MustQuery("select *,_tidb_rowid from t use index(idx) order by id limit 2").Check(testkit.Rows("0 18 1", "1 1 1")) - - tk.MustExec("drop table t, t1") - tk.MustExec("create table t (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))" + - "partition by range (b)" + - "(partition p0 values less than (6)," + - "partition p1 values less than (11)," + - "partition p2 values less than (16)," + - "partition p3 values less than (21))") - tk.MustExec("create table t1 (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))") - tk.MustExec("insert into t values (1,2,3), (2,3,4), (3,4,5)") - tk.MustExec("insert into t1 values (1,18,3)") - tk.MustExec("alter table t exchange partition p3 with table t1") - tk.MustExec("analyze table t") - tk.MustQuery("select * from t where a = 1 or b = 5 order by c limit 2").Sort().Check(testkit.Rows("1 18 3", "1 2 3")) -} - -func TestOrderByOnHandle(t *testing.T) { - // https://github.com/pingcap/tidb/issues/44266 - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - for i := 0; i < 2; i++ { - // indexLookUp + _tidb_rowid - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b) order by b, _tidb_rowid limit 10;").Check(testkit.Rows("2 -1 3", "1 1 1", "3 2 2")) - - // indexLookUp + pkIsHandle - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "primary key(`a`)," + - "KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b) order by b, a limit 10;").Check(testkit.Rows("2 -1 3", "1 1 1", "3 2 2")) - - // indexMerge + _tidb_rowid - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "KEY `idx_b` (`b`)," + - "KEY `idx_c` (`c`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10;").Check(testkit.Rows("3 2 2", "1 1 1")) - - // indexMerge + pkIsHandle - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "KEY `idx_b` (`b`)," + - "KEY `idx_c` (`c`)," + - "PRIMARY KEY (`a`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10;").Check(testkit.Rows("1 1 1", "3 2 2")) - } -} - func TestBatchGetandPointGetwithHashPartition(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1243,70 +926,6 @@ func TestDynamicPruningUnderIndexJoin(t *testing.T) { tk.MustQuery(`select /*+ INL_JOIN(touter, tnormal) */ tnormal.* from touter join tnormal use index(idx_b) on touter.b = tnormal.b`).Sort().Rows()) } -func TestIssue25527(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25527") - tk.MustExec("use test_issue_25527") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - - // the original case - tk.MustExec(`CREATE TABLE t ( - col1 tinyint(4) primary key - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) - PARTITIONS 6`) - tk.MustExec(`insert into t values(-128), (107)`) - tk.MustExec(`prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'`) - tk.MustExec(`set @a=-128, @b=107, @c=-128`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Sort().Check(testkit.Rows("-128", "107")) - - // the minimal reproducible case for hash partitioning - tk.MustExec(`CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2`) - tk.MustExec(`insert into t0 values (1)`) - tk.MustQuery(`select a from t0 where a in (1)`).Check(testkit.Rows("1")) - - // the minimal reproducible case for range partitioning - tk.MustExec(`create table t1 (a int primary key) partition by range (a+5) ( - partition p0 values less than(10), partition p1 values less than(20))`) - tk.MustExec(`insert into t1 values (5)`) - tk.MustQuery(`select a from t1 where a in (5)`).Check(testkit.Rows("5")) - - // the minimal reproducible case for list partitioning - tk.MustExec(`create table t2 (a int primary key) partition by list (a+5) ( - partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12))`) - tk.MustExec(`insert into t2 values (5)`) - tk.MustQuery(`select a from t2 where a in (5)`).Check(testkit.Rows("5")) -} - -func TestIssue25598(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25598") - tk.MustExec("use test_issue_25598") - tk.MustExec(`CREATE TABLE UK_HP16726 ( - COL1 bigint(16) DEFAULT NULL, - COL2 varchar(20) DEFAULT NULL, - COL4 datetime DEFAULT NULL, - COL3 bigint(20) DEFAULT NULL, - COL5 float DEFAULT NULL, - UNIQUE KEY UK_COL1 (COL1) /*!80000 INVISIBLE */ - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin - PARTITION BY HASH( COL1 ) - PARTITIONS 25`) - - tk.MustQuery(`select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`).Check(testkit.Rows()) - tk.MustExec(`explain select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`) - - tk.MustExec(`set @@tidb_partition_prune_mode = 'dynamic'`) - tk.MustQuery(`select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`).Check(testkit.Rows()) - tk.MustExec(`explain select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`) -} - func TestBatchGetforRangeandListPartitionTable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1720,596 +1339,6 @@ func TestPartitionTableWithDifferentJoin(t *testing.T) { tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) } -func createTable4DynamicPruneModeTestWithExpression(tk *testkit.TestKit) { - tk.MustExec("create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11));") - tk.MustExec("create table thash(a int, b int) partition by hash(a) partitions 4;") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into trange values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1);") - tk.MustExec("insert into thash values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1);") - tk.MustExec("insert into t values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1);") - tk.MustExec("set session tidb_partition_prune_mode='dynamic'") - tk.MustExec("analyze table trange") - tk.MustExec("analyze table thash") - tk.MustExec("analyze table t") -} - -type testData4Expression struct { - sql string - partitions []string -} - -func TestDateColWithUnequalExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_datetime_unequal_expression") - tk.MustExec("create database db_datetime_unequal_expression") - tk.MustExec("use db_datetime_unequal_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec(`create table tp(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00"))`) - tk.MustExec(`create table t(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00"))`) - tk.MustExec(`insert into tp values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3)`) - tk.MustExec(`insert into t values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3)`) - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a != '2024-01-01 01:01:01'", - partitions: []string{"all"}, - }, - { - sql: "select * from %s where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'", - partitions: []string{"p1,p2"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestToDaysColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_to_days_expression") - tk.MustExec("create database db_to_days_expression") - tk.MustExec("use db_to_days_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a date, b int) partition by range(to_days(a)) (partition p0 values less than (737822), partition p1 values less than (738019), partition p2 values less than (738154))") - tk.MustExec("create table t(a date, b int)") - tk.MustExec("insert into tp values('2020-01-01', 1), ('2020-03-02', 2), ('2020-05-05', 3), ('2020-11-11', 4)") - tk.MustExec("insert into t values('2020-01-01', 1), ('2020-03-02', 2), ('2020-05-05', 3), ('2020-11-11', 4)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a < '2020-08-16'", - partitions: []string{"p0,p1"}, - }, - { - sql: "select * from %s where a between '2020-05-01' and '2020-10-01'", - partitions: []string{"p1,p2"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestWeekdayWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_weekday_expression") - tk.MustExec("create database db_weekday_expression") - tk.MustExec("use db_weekday_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(3), partition p1 values less than(5), partition p2 values less than(8))") - tk.MustExec("create table t(a datetime, b int)") - tk.MustExec(`insert into tp values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0)`) - tk.MustExec(`insert into t values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0)`) - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a = '2020-08-17 00:00:00'", - partitions: []string{"p0"}, - }, - { - sql: "select * from %s where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'", - partitions: []string{"p1"}, - }, - { - sql: " select * from %s where a < '2020-08-19 00:00:00'", - partitions: []string{"all"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestFloorUnixTimestampAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_floor_unix_timestamp_int_expression") - tk.MustExec("create database db_floor_unix_timestamp_int_expression") - tk.MustExec("use db_floor_unix_timestamp_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a timestamp, b int) partition by range(floor(unix_timestamp(a))) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400))") - tk.MustExec("create table t(a timestamp, b int)") - tk.MustExec("insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a > '2020-09-11 00:00:00'", - partitions: []string{"p2"}, - }, - { - sql: "select * from %s where a < '2020-07-07 01:00:00'", - partitions: []string{"p0,p1"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestUnixTimestampAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_unix_timestamp_int_expression") - tk.MustExec("create database db_unix_timestamp_int_expression") - tk.MustExec("use db_unix_timestamp_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a timestamp, b int) partition by range(unix_timestamp(a)) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400))") - tk.MustExec("create table t(a timestamp, b int)") - tk.MustExec("insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a > '2020-09-11 00:00:00'", - partitions: []string{"p2"}, - }, - { - sql: "select * from %s where a < '2020-07-07 01:00:00'", - partitions: []string{"p0,p1"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestDatetimeColAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_datetime_int_expression") - tk.MustExec("create database db_datetime_int_expression") - tk.MustExec("use db_datetime_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a datetime, b int) partition by range columns(a) (partition p0 values less than('2020-02-02 00:00:00'), partition p1 values less than('2020-09-01 00:00:00'), partition p2 values less than('2020-12-20 00:00:00'))") - tk.MustExec("create table t(a datetime, b int)") - tk.MustExec("insert into tp values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4)") - tk.MustExec("insert into t values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a < '2020-09-01 00:00:00'", - partitions: []string{"p0,p1"}, - }, - { - sql: "select * from %s where a > '2020-07-07 01:00:00'", - partitions: []string{"p1,p2"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestVarcharColAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_varchar_int_expression") - tk.MustExec("create database db_varchar_int_expression") - tk.MustExec("use db_varchar_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a varchar(255), b int) partition by range columns(a) (partition p0 values less than('ddd'), partition p1 values less than('ggggg'), partition p2 values less than('mmmmmm'))") - tk.MustExec("create table t(a varchar(255), b int)") - tk.MustExec("insert into tp values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6)") - tk.MustExec("insert into t values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a < '10'", - partitions: []string{"p0"}, - }, - { - sql: "select * from %s where a > 0", - partitions: []string{"all"}, - }, - { - sql: "select * from %s where a < 0", - partitions: []string{"all"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestDynamicPruneModeWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_equal_expression") - tk.MustExec("create database db_equal_expression") - tk.MustExec("use db_equal_expression") - createTable4DynamicPruneModeTestWithExpression(tk) - - tables := []string{"trange", "thash"} - tests := []testData4Expression{ - { - sql: "select * from %s where a = 2", - partitions: []string{ - "p0", - "p2", - }, - }, - { - sql: "select * from %s where a = 4 or a = 1", - partitions: []string{ - "p0,p1", - "p0,p1", - }, - }, - { - sql: "select * from %s where a = -1", - partitions: []string{ - "p0", - "p1", - }, - }, - { - sql: "select * from %s where a is NULL", - partitions: []string{ - "p0", - "p0", - }, - }, - { - sql: "select * from %s where b is NULL", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a > -1", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a >= 4 and a <= 5", - partitions: []string{ - "p1,p2", - "p0,p1", - }, - }, - { - sql: "select * from %s where a > 10", - partitions: []string{ - "dual", - "all", - }, - }, - { - sql: "select * from %s where a >=2 and a <= 3", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a between 2 and 3", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a < 2", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where a <= 3", - partitions: []string{ - "p0,p1", - "all", - }, - }, - { - sql: "select * from %s where a in (2, 3)", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a in (1, 5)", - partitions: []string{ - "p0,p2", - "p1", - }, - }, - { - sql: "select * from %s where a not in (1, 5)", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a = 2 and a = 2", - partitions: []string{ - "p0", - "p2", - }, - }, - { - sql: "select * from %s where a = 2 and a = 3", - partitions: []string{ - // This means that we have no partition-read plan - "", - "", - }, - }, - { - sql: "select * from %s where a < 2 and a > 0", - partitions: []string{ - "p0", - "p1", - }, - }, - { - sql: "select * from %s where a < 2 and a < 3", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where a > 1 and a > 2", - partitions: []string{ - "p1,p2", - "all", - }, - }, - { - sql: "select * from %s where a = 2 or a = 3", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a = 2 or a in (3)", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a = 2 or a > 3", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a = 2 or a <= 1", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where a = 2 or a between 2 and 2", - partitions: []string{ - "p0", - "p2", - }, - }, - { - sql: "select * from %s where a != 2", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a != 2 and a > 4", - partitions: []string{ - "p2", - "all", - }, - }, - { - sql: "select * from %s where a != 2 and a != 3", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a != 2 and a = 3", - partitions: []string{ - "p1", - "p3", - }, - }, - { - sql: "select * from %s where not (a = 2)", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where not (a > 2)", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where not (a < 2)", - partitions: []string{ - "all", - "all", - }, - }, - // cases that partition pruning can not work - { - sql: "select * from %s where a + 1 > 4", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a - 1 > 0", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a * 2 < 0", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a << 1 < 0", - partitions: []string{ - "all", - "all", - }, - }, - // comparison between int column and string column - { - sql: "select * from %s where a > '10'", - partitions: []string{ - "dual", - "all", - }, - }, - { - sql: "select * from %s where a > '10ab'", - partitions: []string{ - "dual", - "all", - }, - }, - } - - for _, t := range tests { - for i := range t.partitions { - sql := fmt.Sprintf(t.sql, tables[i]) - tk.MustPartition(sql, t.partitions[i]).Sort().Check(tk.MustQuery(fmt.Sprintf(t.sql, "t")).Sort().Rows()) - } - } -} - -func TestAddDropPartitions(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_add_drop_partition") - tk.MustExec("use test_add_drop_partition") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`create table t(a int) partition by range(a) ( - partition p0 values less than (5), - partition p1 values less than (10), - partition p2 values less than (15))`) - tk.MustExec(`insert into t values (2), (7), (12)`) - tk.MustPartition(`select * from t where a < 3`, "p0").Sort().Check(testkit.Rows("2")) - tk.MustPartition(`select * from t where a < 8`, "p0,p1").Sort().Check(testkit.Rows("2", "7")) - tk.MustPartition(`select * from t where a < 20`, "all").Sort().Check(testkit.Rows("12", "2", "7")) - - // remove p0 - tk.MustExec(`alter table t drop partition p0`) - tk.MustPartition(`select * from t where a < 3`, "p1").Sort().Check(testkit.Rows()) - tk.MustPartition(`select * from t where a < 8`, "p1").Sort().Check(testkit.Rows("7")) - tk.MustPartition(`select * from t where a < 20`, "all").Sort().Check(testkit.Rows("12", "7")) - - // add 2 more partitions - tk.MustExec(`alter table t add partition (partition p3 values less than (20))`) - tk.MustExec(`alter table t add partition (partition p4 values less than (40))`) - tk.MustExec(`insert into t values (15), (25)`) - tk.MustPartition(`select * from t where a < 3`, "p1").Sort().Check(testkit.Rows()) - tk.MustPartition(`select * from t where a < 8`, "p1").Sort().Check(testkit.Rows("7")) - tk.MustPartition(`select * from t where a < 20`, "p1,p2,p3").Sort().Check(testkit.Rows("12", "15", "7")) -} - func TestMPPQueryExplainInfo(t *testing.T) { failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") @@ -2341,66 +1370,6 @@ func TestMPPQueryExplainInfo(t *testing.T) { tk.MustPartition(`select * from t where a < 5 union all select * from t where a > 10`, "p2").Sort().Check(testkit.Rows("12", "2")) } -func TestPartitionPruningInTransaction(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_pruning_transaction") - defer tk.MustExec(`drop database test_pruning_transaction`) - tk.MustExec("use test_pruning_transaction") - tk.MustExec(`create table t(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11))`) - tk.MustExec("set @@tidb_partition_prune_mode = 'static'") - tk.MustExec(`begin`) - tk.MustPartitionByList(`select * from t`, []string{"p0", "p1", "p2"}) - tk.MustPartitionByList(`select * from t where a > 3`, []string{"p1", "p2"}) // partition pruning can work in transactions - tk.MustPartitionByList(`select * from t where a > 7`, []string{"p2"}) - tk.MustExec(`rollback`) - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec(`begin`) - tk.MustPartition(`select * from t`, "all") - tk.MustPartition(`select * from t where a > 3`, "p1,p2") // partition pruning can work in transactions - tk.MustPartition(`select * from t where a > 7`, "p2") - tk.MustExec(`rollback`) - tk.MustExec("set @@tidb_partition_prune_mode = default") -} - -func TestIssue25253(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database issue25253") - defer tk.MustExec("drop database issue25253") - tk.MustExec("use issue25253") - - tk.MustExec(`CREATE TABLE IDT_HP23902 ( - COL1 smallint DEFAULT NULL, - COL2 varchar(20) DEFAULT NULL, - COL4 datetime DEFAULT NULL, - COL3 bigint DEFAULT NULL, - COL5 float DEFAULT NULL, - KEY UK_COL1 (COL1) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin - PARTITION BY HASH( COL1+30 ) - PARTITIONS 6`) - tk.MustExec(`insert ignore into IDT_HP23902 partition(p0, p1)(col1, col3) values(-10355, 1930590137900568573), (13810, -1332233145730692137)`) - tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1748 Found a row not matching the given partition set", - "Warning 1748 Found a row not matching the given partition set")) - tk.MustQuery(`select * from IDT_HP23902`).Check(testkit.Rows()) - - tk.MustExec(`create table t ( - a int - ) partition by range(a) ( - partition p0 values less than (10), - partition p1 values less than (20))`) - tk.MustExec(`insert ignore into t partition(p0)(a) values(12)`) - tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1748 Found a row not matching the given partition set")) - tk.MustQuery(`select * from t`).Check(testkit.Rows()) -} - func TestDML(t *testing.T) { store := testkit.CreateMockStore(t) @@ -2831,34 +1800,6 @@ func TestDirectReadingWithUnionScan(t *testing.T) { tk.MustExec(`rollback`) } -func TestIssue25030(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25030") - tk.MustExec("use test_issue_25030") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`CREATE TABLE tbl_936 ( - col_5410 smallint NOT NULL, - col_5411 double, - col_5412 boolean NOT NULL DEFAULT 1, - col_5413 set('Alice', 'Bob', 'Charlie', 'David') NOT NULL DEFAULT 'Charlie', - col_5414 varbinary(147) COLLATE 'binary' DEFAULT 'bvpKgYWLfyuTiOYSkj', - col_5415 timestamp NOT NULL DEFAULT '2021-07-06', - col_5416 decimal(6, 6) DEFAULT 0.49, - col_5417 text COLLATE utf8_bin, - col_5418 float DEFAULT 2048.0762299371554, - col_5419 int UNSIGNED NOT NULL DEFAULT 3152326370, - PRIMARY KEY (col_5419) ) - PARTITION BY HASH (col_5419) PARTITIONS 3`) - tk.MustQuery(`SELECT last_value(col_5414) OVER w FROM tbl_936 - WINDOW w AS (ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419) - ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419, nth_value(col_5412, 5) OVER w`). - Check(testkit.Rows()) // can work properly without any error or panic -} - func TestUnsignedPartitionColumn(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3125,70 +2066,6 @@ func TestDirectReadingWithAgg(t *testing.T) { } } -func TestDynamicModeByDefault(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_dynamic_by_default") - - tk.MustExec(`create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( - partition p0 values less than(300), - partition p1 values less than(500), - partition p2 values less than(1100));`) - tk.MustExec(`create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4;`) - - for _, q := range []string{ - "explain select * from trange where a>400", - "explain select * from thash where a>=100", - } { - for _, r := range tk.MustQuery(q).Rows() { - require.NotContains(t, strings.ToLower(r[0].(string)), "partitionunion") - } - } -} - -func TestIssue24636(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_24636") - tk.MustExec("use test_issue_24636") - - tk.MustExec(`CREATE TABLE t (a int, b date, c int, PRIMARY KEY (a,b)) - PARTITION BY RANGE ( TO_DAYS(b) ) ( - PARTITION p0 VALUES LESS THAN (737821), - PARTITION p1 VALUES LESS THAN (738289) - )`) - tk.MustExec(`INSERT INTO t (a, b, c) VALUES(0, '2021-05-05', 0)`) - tk.MustQuery(`select c from t use index(primary) where a=0 limit 1`).Check(testkit.Rows("0")) - - tk.MustExec(` - CREATE TABLE test_partition ( - a varchar(100) NOT NULL, - b date NOT NULL, - c varchar(100) NOT NULL, - d datetime DEFAULT NULL, - e datetime DEFAULT NULL, - f bigint(20) DEFAULT NULL, - g bigint(20) DEFAULT NULL, - h bigint(20) DEFAULT NULL, - i bigint(20) DEFAULT NULL, - j bigint(20) DEFAULT NULL, - k bigint(20) DEFAULT NULL, - l bigint(20) DEFAULT NULL, - PRIMARY KEY (a,b,c) /*T![clustered_index] NONCLUSTERED */ - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin - PARTITION BY RANGE ( TO_DAYS(b) ) ( - PARTITION pmin VALUES LESS THAN (737821), - PARTITION p20200601 VALUES LESS THAN (738289))`) - tk.MustExec(`INSERT INTO test_partition (a, b, c, d, e, f, g, h, i, j, k, l) VALUES('aaa', '2021-05-05', '428ff6a1-bb37-42ac-9883-33d7a29961e6', '2021-05-06 08:13:38', '2021-05-06 13:28:08', 0, 8, 3, 0, 9, 1, 0)`) - tk.MustQuery(`select c,j,l from test_partition where c='428ff6a1-bb37-42ac-9883-33d7a29961e6' and a='aaa' limit 0, 200`).Check(testkit.Rows("428ff6a1-bb37-42ac-9883-33d7a29961e6 9 0")) -} - func TestIdexMerge(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3283,45 +2160,6 @@ func TestIdexMerge(t *testing.T) { } } -func TestIssue25309(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25309") - tk.MustExec("use test_issue_25309") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`CREATE TABLE tbl_500 ( - col_20 tinyint(4) NOT NULL, - col_21 varchar(399) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, - col_22 json DEFAULT NULL, - col_23 blob DEFAULT NULL, - col_24 mediumint(9) NOT NULL, - col_25 float NOT NULL DEFAULT '7306.384497585912', - col_26 binary(196) NOT NULL, - col_27 timestamp DEFAULT '1976-12-08 00:00:00', - col_28 bigint(20) NOT NULL, - col_29 tinyint(1) NOT NULL DEFAULT '1', - PRIMARY KEY (col_29,col_20) /*T![clustered_index] NONCLUSTERED */, - KEY idx_7 (col_28,col_20,col_26,col_27,col_21,col_24), - KEY idx_8 (col_25,col_29,col_24) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin`) - - tk.MustExec(`CREATE TABLE tbl_600 ( - col_60 int(11) NOT NULL DEFAULT '-776833487', - col_61 tinyint(1) NOT NULL DEFAULT '1', - col_62 tinyint(4) NOT NULL DEFAULT '-125', - PRIMARY KEY (col_62,col_60,col_61) /*T![clustered_index] NONCLUSTERED */, - KEY idx_19 (col_60) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci - PARTITION BY HASH( col_60 ) - PARTITIONS 1`) - - tk.MustExec(`insert into tbl_500 select -34, 'lrfGPPPUuZjtT', '{"obj1": {"sub_obj0": 100}}', 0x6C47636D, 1325624, 7306.3843, 'abc', '1976-12-08', 4757891479624162031, 0`) - tk.MustQuery(`select tbl_5.* from tbl_500 tbl_5 where col_24 in ( select col_62 from tbl_600 where tbl_5.col_26 < 'hSvHLdQeGBNIyOFXStV' )`).Check(testkit.Rows()) -} - func TestGlobalIndexScan(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3409,35 +2247,6 @@ partition p2 values less than (10))`) failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/checkDropGlobalIndex") } -func TestIssue20028(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("set @@tidb_partition_prune_mode='static-only'") - tk.MustExec(`create table t1 (c_datetime datetime, primary key (c_datetime)) -partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')), -partition p1 values less than (to_days('2020-04-01')), -partition p2 values less than (to_days('2020-06-01')), -partition p3 values less than maxvalue)`) - tk.MustExec("create table t2 (c_datetime datetime, unique key(c_datetime))") - tk.MustExec("insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58')") - tk.MustExec("insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18')") - tk.MustExec("begin") - tk.MustQuery("select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update"). - Sort(). - Check(testkit.Rows( - "2020-02-21 07:15:33 2020-01-10 09:36:00", - "2020-02-21 07:15:33 2020-02-04 06:00:00", - "2020-04-27 13:50:58 2020-01-10 09:36:00", - "2020-04-27 13:50:58 2020-02-04 06:00:00", - "2020-06-26 03:24:00 2020-01-10 09:36:00", - "2020-06-26 03:24:00 2020-02-04 06:00:00", - "2020-06-26 03:24:00 2020-06-12 03:45:18")) - tk.MustExec("rollback") -} - func TestSelectLockOnPartitionTable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3571,215 +2380,6 @@ partition p2 values less than (11))`) } } -func TestIssue21731(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists p, t") - tk.MustExec("set @@tidb_enable_list_partition = OFF") - // Notice that this does not really test the issue #21731 - tk.MustExec("create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2));") -} - -type testOutput struct { - SQL string - Plan []string - Res []string -} - -func verifyPartitionResult(tk *testkit.TestKit, input []string, output []testOutput) { - for i, tt := range input { - var isSelect = false - if strings.HasPrefix(strings.ToLower(tt), "select ") { - isSelect = true - } - testdata.OnRecord(func() { - output[i].SQL = tt - if isSelect { - output[i].Plan = testdata.ConvertRowsToStrings(tk.UsedPartitions(tt).Rows()) - output[i].Res = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows()) - } else { - // Just verify SELECT (also avoid double INSERTs during record) - output[i].Res = nil - output[i].Plan = nil - } - }) - if isSelect { - tk.UsedPartitions(tt).Check(testkit.Rows(output[i].Plan...)) - tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...)) - } else { - tk.MustExec(tt) - } - } -} - -func TestRangePartitionBoundariesEq(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("SET @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("CREATE DATABASE TestRangePartitionBoundaries") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundaries") - tk.MustExec("USE TestRangePartitionBoundaries") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1000000), - PARTITION p1 VALUES LESS THAN (2000000), - PARTITION p2 VALUES LESS THAN (3000000)); -`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesNe(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("SET @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("CREATE DATABASE TestRangePartitionBoundariesNe") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesNe") - tk.MustExec("USE TestRangePartitionBoundariesNe") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1), - PARTITION p1 VALUES LESS THAN (2), - PARTITION p2 VALUES LESS THAN (3), - PARTITION p3 VALUES LESS THAN (4), - PARTITION p4 VALUES LESS THAN (5), - PARTITION p5 VALUES LESS THAN (6), - PARTITION p6 VALUES LESS THAN (7))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesBetweenM(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("CREATE DATABASE IF NOT EXISTS TestRangePartitionBoundariesBetweenM") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesBetweenM") - tk.MustExec("USE TestRangePartitionBoundariesBetweenM") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1000000), - PARTITION p1 VALUES LESS THAN (2000000), - PARTITION p2 VALUES LESS THAN (3000000))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesBetweenS(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("CREATE DATABASE IF NOT EXISTS TestRangePartitionBoundariesBetweenS") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesBetweenS") - tk.MustExec("USE TestRangePartitionBoundariesBetweenS") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1), - PARTITION p1 VALUES LESS THAN (2), - PARTITION p2 VALUES LESS THAN (3), - PARTITION p3 VALUES LESS THAN (4), - PARTITION p4 VALUES LESS THAN (5), - PARTITION p5 VALUES LESS THAN (6), - PARTITION p6 VALUES LESS THAN (7))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesLtM(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("create database TestRangePartitionBoundariesLtM") - defer tk.MustExec("drop database TestRangePartitionBoundariesLtM") - tk.MustExec("use TestRangePartitionBoundariesLtM") - tk.MustExec("drop table if exists t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1000000), - PARTITION p1 VALUES LESS THAN (2000000), - PARTITION p2 VALUES LESS THAN (3000000))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesLtS(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("create database TestRangePartitionBoundariesLtS") - defer tk.MustExec("drop database TestRangePartitionBoundariesLtS") - tk.MustExec("use TestRangePartitionBoundariesLtS") - tk.MustExec("drop table if exists t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1), - PARTITION p1 VALUES LESS THAN (2), - PARTITION p2 VALUES LESS THAN (3), - PARTITION p3 VALUES LESS THAN (4), - PARTITION p4 VALUES LESS THAN (5), - PARTITION p5 VALUES LESS THAN (6), - PARTITION p6 VALUES LESS THAN (7))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestIssue25528(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@tidb_partition_prune_mode = 'static'") - tk.MustExec("use test") - tk.MustExec("create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8") - tk.MustExec("insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20')") - tk.MustExec("begin pessimistic") - tk.MustQuery("select * from issue25528 where id = 1 for update").Check(testkit.Rows("1 100.00 -100.00 2021-06-17 22:35:20")) - - tk.MustExec("drop table if exists issue25528") - tk.MustExec("CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10;") - tk.MustExec("INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4);") - tk.MustQuery("select * from issue25528 where c1 in (3, 4) order by c2 for update;").Check(testkit.Rows("3 3 3 3", "4 4 4 4")) -} - func TestIssue26251(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3952,48 +2552,6 @@ func TestIssue31024(t *testing.T) { tk2.MustExec("rollback") } -func TestIssue27346(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk1 := testkit.NewTestKit(t, store) - tk1.MustExec("create database TestIssue27346") - defer tk1.MustExec("drop database TestIssue27346") - tk1.MustExec("use TestIssue27346") - - tk1.MustExec("set @@tidb_enable_index_merge=1,@@tidb_partition_prune_mode='dynamic'") - - tk1.MustExec("DROP TABLE IF EXISTS `tbl_18`") - tk1.MustExec("CREATE TABLE `tbl_18` (`col_119` binary(16) NOT NULL DEFAULT 'skPoKiwYUi',`col_120` int(10) unsigned NOT NULL,`col_121` timestamp NOT NULL,`col_122` double NOT NULL DEFAULT '3937.1887880628115',`col_123` bigint(20) NOT NULL DEFAULT '3550098074891542725',PRIMARY KEY (`col_123`,`col_121`,`col_122`,`col_120`) CLUSTERED,UNIQUE KEY `idx_103` (`col_123`,`col_119`,`col_120`),UNIQUE KEY `idx_104` (`col_122`,`col_120`),UNIQUE KEY `idx_105` (`col_119`,`col_120`),KEY `idx_106` (`col_121`,`col_120`,`col_122`,`col_119`),KEY `idx_107` (`col_121`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PARTITION BY HASH( `col_120` ) PARTITIONS 3") - tk1.MustExec("INSERT INTO tbl_18 (`col_119`, `col_120`, `col_121`, `col_122`, `col_123`) VALUES (X'736b506f4b6977595569000000000000', 672436701, '1974-02-24 00:00:00', 3937.1887880628115e0, -7373106839136381229), (X'736b506f4b6977595569000000000000', 2637316689, '1993-10-29 00:00:00', 3937.1887880628115e0, -4522626077860026631), (X'736b506f4b6977595569000000000000', 831809724, '1995-11-20 00:00:00', 3937.1887880628115e0, -4426441253940231780), (X'736b506f4b6977595569000000000000', 1588592628, '2001-03-28 00:00:00', 3937.1887880628115e0, 1329207475772244999), (X'736b506f4b6977595569000000000000', 3908038471, '2031-06-06 00:00:00', 3937.1887880628115e0, -6562815696723135786), (X'736b506f4b6977595569000000000000', 1674237178, '2001-10-24 00:00:00', 3937.1887880628115e0, -6459065549188938772), (X'736b506f4b6977595569000000000000', 3507075493, '2010-03-25 00:00:00', 3937.1887880628115e0, -4329597025765326929), (X'736b506f4b6977595569000000000000', 1276461709, '2019-07-20 00:00:00', 3937.1887880628115e0, 3550098074891542725)") - - tk1.MustQuery("select col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update").Sort().Check(testkit.Rows( - "1588592628 3937.1887880628115 1329207475772244999", - "1674237178 3937.1887880628115 -6459065549188938772", - "2637316689 3937.1887880628115 -4522626077860026631", - "672436701 3937.1887880628115 -7373106839136381229", - "831809724 3937.1887880628115 -4426441253940231780")) - tk1.MustQuery("select /*+ use_index_merge( tbl_18 ) */ col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update").Sort().Check(testkit.Rows( - "1588592628 3937.1887880628115 1329207475772244999", - "1674237178 3937.1887880628115 -6459065549188938772", - "2637316689 3937.1887880628115 -4522626077860026631", - "672436701 3937.1887880628115 -7373106839136381229", - "831809724 3937.1887880628115 -4426441253940231780")) -} - -func TestIssue35181(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("create database TestIssue35181") - tk.MustExec("use TestIssue35181") - tk.MustExec("CREATE TABLE `t` (`a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL) PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN (2021), PARTITION `p1` VALUES LESS THAN (3000))") - - tk.MustExec("set @@tidb_partition_prune_mode = 'static'") - tk.MustExec(`insert into t select * from t where a=3000`) - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec(`insert into t select * from t where a=3000`) -} - func TestIssue21732(t *testing.T) { failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") @@ -4224,73 +2782,3 @@ func TestGlobalIndexMerge(t *testing.T) { tk.MustQuery("select /*+ use_index_merge(t, uidx_ac, idx_bc) */ * from t where a=1 or b=2").Sort().Check( testkit.Rows("1 1 1 1", "2 2 2 2")) } - -func TestIssue39999(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec(`create schema test39999`) - tk.MustExec(`use test39999`) - tk.MustExec(`set @@tidb_opt_advanced_join_hint=0`) - tk.MustExec(`drop table if exists c, t`) - tk.MustExec("CREATE TABLE `c` (" + - "`serial_id` varchar(24)," + - "`occur_trade_date` date," + - "`txt_account_id` varchar(24)," + - "`capital_sub_class` varchar(10)," + - "`occur_amount` decimal(16,2)," + - "`broker` varchar(10)," + - "PRIMARY KEY (`txt_account_id`,`occur_trade_date`,`serial_id`) /*T![clustered_index] CLUSTERED */," + - "KEY `idx_serial_id` (`serial_id`)" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci " + - "PARTITION BY RANGE COLUMNS(`serial_id`) (" + - "PARTITION `p202209` VALUES LESS THAN ('20221001')," + - "PARTITION `p202210` VALUES LESS THAN ('20221101')," + - "PARTITION `p202211` VALUES LESS THAN ('20221201')" + - ")") - - tk.MustExec("CREATE TABLE `t` ( " + - "`txn_account_id` varchar(24), " + - "`account_id` varchar(32), " + - "`broker` varchar(10), " + - "PRIMARY KEY (`txn_account_id`) /*T![clustered_index] CLUSTERED */ " + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci") - - tk.MustExec("INSERT INTO `c` (serial_id, txt_account_id, capital_sub_class, occur_trade_date, occur_amount, broker) VALUES ('2022111700196920','04482786','CUST','2022-11-17',-2.01,'0009')") - tk.MustExec("INSERT INTO `t` VALUES ('04482786','1142927','0009')") - - tk.MustExec(`set tidb_partition_prune_mode='dynamic'`) - tk.MustExec(`analyze table c`) - tk.MustExec(`analyze table t`) - query := `select - /*+ inl_join(c) */ - c.occur_amount -from - c - join t on c.txt_account_id = t.txn_account_id - and t.broker = '0009' - and c.occur_trade_date = '2022-11-17'` - tk.MustQuery("explain " + query).Check(testkit.Rows(""+ - "IndexJoin_22 1.00 root inner join, inner:TableReader_21, outer key:test39999.t.txn_account_id, inner key:test39999.c.txt_account_id, equal cond:eq(test39999.t.txn_account_id, test39999.c.txt_account_id)", - "├─TableReader_27(Build) 1.00 root data:Selection_26", - "│ └─Selection_26 1.00 cop[tikv] eq(test39999.t.broker, \"0009\")", - "│ └─TableFullScan_25 1.00 cop[tikv] table:t keep order:false", - "└─TableReader_21(Probe) 1.00 root partition:all data:Selection_20", - " └─Selection_20 1.00 cop[tikv] eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)", - " └─TableRangeScan_19 1.00 cop[tikv] table:c range: decided by [eq(test39999.c.txt_account_id, test39999.t.txn_account_id) eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false")) - tk.MustQuery(query).Check(testkit.Rows("-2.01")) - - // Add the missing partition key part. - tk.MustExec(`alter table t add column serial_id varchar(24) default '2022111700196920'`) - query += ` and c.serial_id = t.serial_id` - tk.MustQuery(query).Check(testkit.Rows("-2.01")) - tk.MustQuery("explain " + query).Check(testkit.Rows(""+ - `IndexJoin_20 0.80 root inner join, inner:TableReader_19, outer key:test39999.t.txn_account_id, test39999.t.serial_id, inner key:test39999.c.txt_account_id, test39999.c.serial_id, equal cond:eq(test39999.t.serial_id, test39999.c.serial_id), eq(test39999.t.txn_account_id, test39999.c.txt_account_id)`, - `├─TableReader_25(Build) 0.80 root data:Selection_24`, - `│ └─Selection_24 0.80 cop[tikv] eq(test39999.t.broker, "0009"), not(isnull(test39999.t.serial_id))`, - `│ └─TableFullScan_23 1.00 cop[tikv] table:t keep order:false`, - `└─TableReader_19(Probe) 0.80 root partition:all data:Selection_18`, - ` └─Selection_18 0.80 cop[tikv] eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)`, - ` └─TableRangeScan_17 0.80 cop[tikv] table:c range: decided by [eq(test39999.c.txt_account_id, test39999.t.txn_account_id) eq(test39999.c.serial_id, test39999.t.serial_id) eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false`)) -} diff --git a/pkg/executor/test/partitiontest/BUILD.bazel b/pkg/executor/test/partitiontest/BUILD.bazel index 1f2f0adfeeb71..eba32c340a02d 100644 --- a/pkg/executor/test/partitiontest/BUILD.bazel +++ b/pkg/executor/test/partitiontest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 5, + shard_count = 4, deps = [ "//pkg/testkit", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/executor/test/partitiontest/partition_test.go b/pkg/executor/test/partitiontest/partition_test.go index 28e9002060e27..cbffd97769b9e 100644 --- a/pkg/executor/test/partitiontest/partition_test.go +++ b/pkg/executor/test/partitiontest/partition_test.go @@ -447,57 +447,3 @@ func TestPartitionedTableDelete(t *testing.T) { tk.CheckExecResult(1, 0) tk.MustExec(`drop table t1;`) } - -func TestPartitionOnMissing(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create schema OnMissing") - tk.MustExec("use OnMissing") - tk.MustExec(`set global tidb_partition_prune_mode='dynamic'`) - tk.MustExec(`set session tidb_partition_prune_mode='dynamic'`) - - tk.MustExec(`CREATE TABLE tt1 ( - id INT NOT NULL, - listid INT, - name varchar(10), - primary key (listid) clustered - ) - PARTITION BY LIST (listid) ( - PARTITION p1 VALUES IN (1), - PARTITION p2 VALUES IN (2), - PARTITION p3 VALUES IN (3), - PARTITION p4 VALUES IN (4) - )`) - - tk.MustExec(`CREATE TABLE tt2 ( - id INT NOT NULL, - listid INT - )`) - - tk.MustExec(`create index idx_listid on tt1(id,listid)`) - tk.MustExec(`create index idx_listid on tt2(listid)`) - - tk.MustExec(`insert into tt1 values(1,1,1)`) - tk.MustExec(`insert into tt1 values(2,2,2)`) - tk.MustExec(`insert into tt1 values(3,3,3)`) - tk.MustExec(`insert into tt1 values(4,4,4)`) - tk.MustExec(`insert into tt2 values(1,1)`) - tk.MustExec(`insert into tt2 values(2,2)`) - tk.MustExec(`insert into tt2 values(3,3)`) - tk.MustExec(`insert into tt2 values(4,4)`) - tk.MustExec(`insert into tt2 values(5,5)`) - - tk.MustExec(`analyze table tt1`) - tk.MustExec(`analyze table tt2`) - - tk.MustQuery(`select /*+ inl_join(tt1)*/ count(*) from tt2 - left join tt1 on tt1.listid=tt2.listid and tt1.id=tt2.id`).Check(testkit.Rows("5")) - tk.MustQuery(`select /*+ inl_join(tt1)*/ count(*) from tt2 - left join tt1 on tt1.listid=tt2.listid`).Check(testkit.Rows("5")) - tk.MustQuery(`explain format = 'brief' select /*+ inl_join(tt1)*/ count(*) from tt2 - left join tt1 on tt1.listid=tt2.listid`).Check(testkit.Rows(""+ - "StreamAgg 1.00 root funcs:count(Column#13)->Column#7", - "└─IndexReader 1.00 root index:StreamAgg", - " └─StreamAgg 1.00 cop[tikv] funcs:count(1)->Column#13", - " └─IndexFullScan 5.00 cop[tikv] table:tt2, index:idx_listid(listid) keep order:false")) -} diff --git a/pkg/executor/testdata/executor_suite_in.json b/pkg/executor/testdata/executor_suite_in.json deleted file mode 100644 index 484014cd62126..0000000000000 --- a/pkg/executor/testdata/executor_suite_in.json +++ /dev/null @@ -1,559 +0,0 @@ -[ - { - "name": "TestRangePartitionBoundariesEq", - "cases": [ - "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a = -2147483648", - "SELECT * FROM t WHERE a IN (-2147483648)", - "SELECT * FROM t WHERE a = 0", - "SELECT * FROM t WHERE a IN (0)", - "SELECT * FROM t WHERE a = 999998", - "SELECT * FROM t WHERE a IN (999998)", - "SELECT * FROM t WHERE a = 999999", - "SELECT * FROM t WHERE a IN (999999)", - "SELECT * FROM t WHERE a = 1000000", - "SELECT * FROM t WHERE a IN (1000000)", - "SELECT * FROM t WHERE a = 1000001", - "SELECT * FROM t WHERE a IN (1000001)", - "SELECT * FROM t WHERE a = 1000002", - "SELECT * FROM t WHERE a IN (1000002)", - "SELECT * FROM t WHERE a = 3000000", - "SELECT * FROM t WHERE a IN (3000000)", - "SELECT * FROM t WHERE a = 3000001", - "SELECT * FROM t WHERE a IN (3000001)", - "SELECT * FROM t WHERE a IN (-2147483648, -2147483647)", - "SELECT * FROM t WHERE a IN (-2147483647, -2147483646)", - "SELECT * FROM t WHERE a IN (999997, 999998, 999999)", - "SELECT * FROM t WHERE a IN (999998, 999999, 1000000)", - "SELECT * FROM t WHERE a IN (999999, 1000000, 1000001)", - "SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002)", - "SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999)", - "SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000)", - "SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001)", - "SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002)", - "SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999)", - "SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000)", - "SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001)", - "SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002)" - ] - }, - { - "name": "TestRangePartitionBoundariesNe", - "cases": [ - "INSERT INTO t VALUES (0, '0 Filler...')", - "INSERT INTO t VALUES (1, '1 Filler...')", - "INSERT INTO t VALUES (2, '2 Filler...')", - "INSERT INTO t VALUES (3, '3 Filler...')", - "INSERT INTO t VALUES (4, '4 Filler...')", - "INSERT INTO t VALUES (5, '5 Filler...')", - "INSERT INTO t VALUES (6, '6 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a != -1", - "SELECT * FROM t WHERE 1 = 1 AND a != -1", - "SELECT * FROM t WHERE a NOT IN (-2, -1)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1", - "SELECT * FROM t WHERE a != 0", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0", - "SELECT * FROM t WHERE a != 1", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1", - "SELECT * FROM t WHERE a != 2", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2", - "SELECT * FROM t WHERE a != 3", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3", - "SELECT * FROM t WHERE a != 4", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4", - "SELECT * FROM t WHERE a != 5", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5", - "SELECT * FROM t WHERE a != 6", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6", - "SELECT * FROM t WHERE a != 7", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7" - ] - }, - { - "name": "TestRangePartitionBoundariesBetweenM", - "cases": [ - "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646", - "SELECT * FROM t WHERE a BETWEEN 0 AND -1", - "SELECT * FROM t WHERE a BETWEEN 0 AND 0", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1", - "SELECT * FROM t WHERE a BETWEEN 0 AND 2", - "SELECT * FROM t WHERE a BETWEEN 0 AND 10", - "SELECT * FROM t WHERE a BETWEEN 0 AND 999998", - "SELECT * FROM t WHERE a BETWEEN 0 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 999997", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 999998", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 999998", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003" - ] - }, - { - "name": "TestRangePartitionBoundariesBetweenS", - "cases": [ - "INSERT INTO t VALUES (0, '0 Filler...')", - "INSERT INTO t VALUES (1, '1 Filler...')", - "INSERT INTO t VALUES (2, '2 Filler...')", - "INSERT INTO t VALUES (3, '3 Filler...')", - "INSERT INTO t VALUES (4, '4 Filler...')", - "INSERT INTO t VALUES (5, '5 Filler...')", - "INSERT INTO t VALUES (6, '6 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a BETWEEN 2 AND -1", - "SELECT * FROM t WHERE a BETWEEN -1 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 0", - "SELECT * FROM t WHERE a BETWEEN 0 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 1", - "SELECT * FROM t WHERE a BETWEEN 1 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 2", - "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 3", - "SELECT * FROM t WHERE a BETWEEN 3 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "SELECT * FROM t WHERE a BETWEEN 4 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 5", - "SELECT * FROM t WHERE a BETWEEN 5 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 6", - "SELECT * FROM t WHERE a BETWEEN 6 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 7", - "SELECT * FROM t WHERE a BETWEEN 7 AND 4" - ] - }, - { - "name": "TestRangePartitionBoundariesLtM", - "cases": [ - "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a < -2147483648", - "SELECT * FROM t WHERE a > -2147483648", - "SELECT * FROM t WHERE a <= -2147483648", - "SELECT * FROM t WHERE a >= -2147483648", - "SELECT * FROM t WHERE a < 0", - "SELECT * FROM t WHERE a > 0", - "SELECT * FROM t WHERE a <= 0", - "SELECT * FROM t WHERE a >= 0", - "SELECT * FROM t WHERE a < 999998", - "SELECT * FROM t WHERE a > 999998", - "SELECT * FROM t WHERE a <= 999998", - "SELECT * FROM t WHERE a >= 999998", - "SELECT * FROM t WHERE a < 999999", - "SELECT * FROM t WHERE a > 999999", - "SELECT * FROM t WHERE a <= 999999", - "SELECT * FROM t WHERE a >= 999999", - "SELECT * FROM t WHERE a < 1000000", - "SELECT * FROM t WHERE a > 1000000", - "SELECT * FROM t WHERE a <= 1000000", - "SELECT * FROM t WHERE a >= 1000000", - "SELECT * FROM t WHERE a < 1000001", - "SELECT * FROM t WHERE a > 1000001", - "SELECT * FROM t WHERE a <= 1000001", - "SELECT * FROM t WHERE a >= 1000001", - "SELECT * FROM t WHERE a < 1000002", - "SELECT * FROM t WHERE a > 1000002", - "SELECT * FROM t WHERE a <= 1000002", - "SELECT * FROM t WHERE a >= 1000002", - "SELECT * FROM t WHERE a < 3000000", - "SELECT * FROM t WHERE a > 3000000", - "SELECT * FROM t WHERE a <= 3000000", - "SELECT * FROM t WHERE a >= 3000000", - "SELECT * FROM t WHERE a < 3000001", - "SELECT * FROM t WHERE a > 3000001", - "SELECT * FROM t WHERE a <= 3000001", - "SELECT * FROM t WHERE a >= 3000001", - "SELECT * FROM t WHERE a < 999997", - "SELECT * FROM t WHERE a > 999997", - "SELECT * FROM t WHERE a <= 999997", - "SELECT * FROM t WHERE a >= 999997", - "SELECT * FROM t WHERE a >= 999997 AND a <= 999999", - "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "SELECT * FROM t WHERE a > 999997 AND a < 999999", - "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "SELECT * FROM t WHERE a < 999998", - "SELECT * FROM t WHERE a > 999998", - "SELECT * FROM t WHERE a <= 999998", - "SELECT * FROM t WHERE a >= 999998", - "SELECT * FROM t WHERE a >= 999998 AND a <= 1000000", - "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "SELECT * FROM t WHERE a > 999998 AND a < 1000000", - "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "SELECT * FROM t WHERE a < 999999", - "SELECT * FROM t WHERE a > 999999", - "SELECT * FROM t WHERE a <= 999999", - "SELECT * FROM t WHERE a >= 999999", - "SELECT * FROM t WHERE a >= 999999 AND a <= 1000001", - "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "SELECT * FROM t WHERE a > 999999 AND a < 1000001", - "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "SELECT * FROM t WHERE a < 1000000", - "SELECT * FROM t WHERE a > 1000000", - "SELECT * FROM t WHERE a <= 1000000", - "SELECT * FROM t WHERE a >= 1000000", - "SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002", - "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "SELECT * FROM t WHERE a > 1000000 AND a < 1000002", - "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "SELECT * FROM t WHERE a < 1999997", - "SELECT * FROM t WHERE a > 1999997", - "SELECT * FROM t WHERE a <= 1999997", - "SELECT * FROM t WHERE a >= 1999997", - "SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999", - "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "SELECT * FROM t WHERE a > 1999997 AND a < 1999999", - "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "SELECT * FROM t WHERE a < 1999998", - "SELECT * FROM t WHERE a > 1999998", - "SELECT * FROM t WHERE a <= 1999998", - "SELECT * FROM t WHERE a >= 1999998", - "SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000", - "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "SELECT * FROM t WHERE a > 1999998 AND a < 2000000", - "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "SELECT * FROM t WHERE a < 1999999", - "SELECT * FROM t WHERE a > 1999999", - "SELECT * FROM t WHERE a <= 1999999", - "SELECT * FROM t WHERE a >= 1999999", - "SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001", - "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "SELECT * FROM t WHERE a > 1999999 AND a < 2000001", - "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "SELECT * FROM t WHERE a < 2000000", - "SELECT * FROM t WHERE a > 2000000", - "SELECT * FROM t WHERE a <= 2000000", - "SELECT * FROM t WHERE a >= 2000000", - "SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002", - "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "SELECT * FROM t WHERE a > 2000000 AND a < 2000002", - "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "SELECT * FROM t WHERE a < 2999997", - "SELECT * FROM t WHERE a > 2999997", - "SELECT * FROM t WHERE a <= 2999997", - "SELECT * FROM t WHERE a >= 2999997", - "SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999", - "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "SELECT * FROM t WHERE a > 2999997 AND a < 2999999", - "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "SELECT * FROM t WHERE a < 2999998", - "SELECT * FROM t WHERE a > 2999998", - "SELECT * FROM t WHERE a <= 2999998", - "SELECT * FROM t WHERE a >= 2999998", - "SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000", - "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "SELECT * FROM t WHERE a > 2999998 AND a < 3000000", - "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "SELECT * FROM t WHERE a < 2999999", - "SELECT * FROM t WHERE a > 2999999", - "SELECT * FROM t WHERE a <= 2999999", - "SELECT * FROM t WHERE a >= 2999999", - "SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001", - "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "SELECT * FROM t WHERE a > 2999999 AND a < 3000001", - "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "SELECT * FROM t WHERE a < 3000000", - "SELECT * FROM t WHERE a > 3000000", - "SELECT * FROM t WHERE a <= 3000000", - "SELECT * FROM t WHERE a >= 3000000", - "SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002", - "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002", - "SELECT * FROM t WHERE a > 3000000 AND a < 3000002", - "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002" - ] - }, - { - "name": "TestRangePartitionBoundariesLtS", - "cases": [ - "INSERT INTO t VALUES (0, '0 Filler...')", - "INSERT INTO t VALUES (1, '1 Filler...')", - "INSERT INTO t VALUES (2, '2 Filler...')", - "INSERT INTO t VALUES (3, '3 Filler...')", - "INSERT INTO t VALUES (4, '4 Filler...')", - "INSERT INTO t VALUES (5, '5 Filler...')", - "INSERT INTO t VALUES (6, '6 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a < -1", - "SELECT * FROM t WHERE a > -1", - "SELECT * FROM t WHERE a <= -1", - "SELECT * FROM t WHERE a >= -1", - "SELECT * FROM t WHERE a < 2 OR a > -1", - "SELECT * FROM t WHERE a > 2 AND a < -1", - "SELECT * FROM t WHERE NOT (a < 2 OR a > -1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < -1)", - "SELECT * FROM t WHERE a < 2 OR a >= -1", - "SELECT * FROM t WHERE a >= 2 AND a < -1", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= -1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < -1)", - "SELECT * FROM t WHERE a <= 2 OR a > -1", - "SELECT * FROM t WHERE a > 2 AND a <= -1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > -1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= -1)", - "SELECT * FROM t WHERE a <= 2 OR a >= -1", - "SELECT * FROM t WHERE a >= 2 AND a <= -1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1)", - "SELECT * FROM t WHERE a < 0", - "SELECT * FROM t WHERE a > 0", - "SELECT * FROM t WHERE a <= 0", - "SELECT * FROM t WHERE a >= 0", - "SELECT * FROM t WHERE a < 2 OR a > 0", - "SELECT * FROM t WHERE a > 2 AND a < 0", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 0)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 0)", - "SELECT * FROM t WHERE a < 2 OR a >= 0", - "SELECT * FROM t WHERE a >= 2 AND a < 0", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 0)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 0)", - "SELECT * FROM t WHERE a <= 2 OR a > 0", - "SELECT * FROM t WHERE a > 2 AND a <= 0", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 0)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 0)", - "SELECT * FROM t WHERE a <= 2 OR a >= 0", - "SELECT * FROM t WHERE a >= 2 AND a <= 0", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0)", - "SELECT * FROM t WHERE a < 1", - "SELECT * FROM t WHERE a > 1", - "SELECT * FROM t WHERE a <= 1", - "SELECT * FROM t WHERE a >= 1", - "SELECT * FROM t WHERE a < 2 OR a > 1", - "SELECT * FROM t WHERE a > 2 AND a < 1", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 1)", - "SELECT * FROM t WHERE a < 2 OR a >= 1", - "SELECT * FROM t WHERE a >= 2 AND a < 1", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 1)", - "SELECT * FROM t WHERE a <= 2 OR a > 1", - "SELECT * FROM t WHERE a > 2 AND a <= 1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 1)", - "SELECT * FROM t WHERE a <= 2 OR a >= 1", - "SELECT * FROM t WHERE a >= 2 AND a <= 1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1)", - "SELECT * FROM t WHERE a < 2", - "SELECT * FROM t WHERE a > 2", - "SELECT * FROM t WHERE a <= 2", - "SELECT * FROM t WHERE a >= 2", - "SELECT * FROM t WHERE a < 2 OR a > 2", - "SELECT * FROM t WHERE a > 2 AND a < 2", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 2)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 2)", - "SELECT * FROM t WHERE a < 2 OR a >= 2", - "SELECT * FROM t WHERE a >= 2 AND a < 2", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 2)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 2)", - "SELECT * FROM t WHERE a <= 2 OR a > 2", - "SELECT * FROM t WHERE a > 2 AND a <= 2", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 2)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 2)", - "SELECT * FROM t WHERE a <= 2 OR a >= 2", - "SELECT * FROM t WHERE a >= 2 AND a <= 2", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2)", - "SELECT * FROM t WHERE a < 3", - "SELECT * FROM t WHERE a > 3", - "SELECT * FROM t WHERE a <= 3", - "SELECT * FROM t WHERE a >= 3", - "SELECT * FROM t WHERE a < 2 OR a > 3", - "SELECT * FROM t WHERE a > 2 AND a < 3", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 3)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 3)", - "SELECT * FROM t WHERE a < 2 OR a >= 3", - "SELECT * FROM t WHERE a >= 2 AND a < 3", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 3)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 3)", - "SELECT * FROM t WHERE a <= 2 OR a > 3", - "SELECT * FROM t WHERE a > 2 AND a <= 3", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 3)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 3)", - "SELECT * FROM t WHERE a <= 2 OR a >= 3", - "SELECT * FROM t WHERE a >= 2 AND a <= 3", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3)", - "SELECT * FROM t WHERE a < 4", - "SELECT * FROM t WHERE a > 4", - "SELECT * FROM t WHERE a <= 4", - "SELECT * FROM t WHERE a >= 4", - "SELECT * FROM t WHERE a < 2 OR a > 4", - "SELECT * FROM t WHERE a > 2 AND a < 4", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 4)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 4)", - "SELECT * FROM t WHERE a < 2 OR a >= 4", - "SELECT * FROM t WHERE a >= 2 AND a < 4", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 4)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 4)", - "SELECT * FROM t WHERE a <= 2 OR a > 4", - "SELECT * FROM t WHERE a > 2 AND a <= 4", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 4)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 4)", - "SELECT * FROM t WHERE a <= 2 OR a >= 4", - "SELECT * FROM t WHERE a >= 2 AND a <= 4", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4)", - "SELECT * FROM t WHERE a < 5", - "SELECT * FROM t WHERE a > 5", - "SELECT * FROM t WHERE a <= 5", - "SELECT * FROM t WHERE a >= 5", - "SELECT * FROM t WHERE a < 2 OR a > 5", - "SELECT * FROM t WHERE a > 2 AND a < 5", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 5)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 5)", - "SELECT * FROM t WHERE a < 2 OR a >= 5", - "SELECT * FROM t WHERE a >= 2 AND a < 5", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 5)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 5)", - "SELECT * FROM t WHERE a <= 2 OR a > 5", - "SELECT * FROM t WHERE a > 2 AND a <= 5", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 5)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 5)", - "SELECT * FROM t WHERE a <= 2 OR a >= 5", - "SELECT * FROM t WHERE a >= 2 AND a <= 5", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5)", - "SELECT * FROM t WHERE a < 6", - "SELECT * FROM t WHERE a > 6", - "SELECT * FROM t WHERE a <= 6", - "SELECT * FROM t WHERE a >= 6", - "SELECT * FROM t WHERE a < 2 OR a > 6", - "SELECT * FROM t WHERE a > 2 AND a < 6", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 6)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 6)", - "SELECT * FROM t WHERE a < 2 OR a >= 6", - "SELECT * FROM t WHERE a >= 2 AND a < 6", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 6)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 6)", - "SELECT * FROM t WHERE a <= 2 OR a > 6", - "SELECT * FROM t WHERE a > 2 AND a <= 6", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 6)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 6)", - "SELECT * FROM t WHERE a <= 2 OR a >= 6", - "SELECT * FROM t WHERE a >= 2 AND a <= 6", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6)", - "SELECT * FROM t WHERE a < 7", - "SELECT * FROM t WHERE a > 7", - "SELECT * FROM t WHERE a <= 7", - "SELECT * FROM t WHERE a >= 7", - "SELECT * FROM t WHERE a < 2 OR a > 7", - "SELECT * FROM t WHERE a > 2 AND a < 7", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 7)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 7)", - "SELECT * FROM t WHERE a < 2 OR a >= 7", - "SELECT * FROM t WHERE a >= 2 AND a < 7", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 7)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 7)", - "SELECT * FROM t WHERE a <= 2 OR a > 7", - "SELECT * FROM t WHERE a > 2 AND a <= 7", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 7)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 7)", - "SELECT * FROM t WHERE a <= 2 OR a >= 7", - "SELECT * FROM t WHERE a >= 2 AND a <= 7", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7)" - ] - } -] diff --git a/pkg/executor/testdata/executor_suite_out.json b/pkg/executor/testdata/executor_suite_out.json deleted file mode 100644 index 3ed1e0bfd0868..0000000000000 --- a/pkg/executor/testdata/executor_suite_out.json +++ /dev/null @@ -1,5773 +0,0 @@ -[ - { - "Name": "TestRangePartitionBoundariesEq", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a = -2147483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (-2147483648)", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (0)", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 999998", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999998)", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999999)", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 1000000", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000000)", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000001)", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000002)", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 3000000", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (3000000)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a = 3000001", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (3000001)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (-2147483648, -2147483647)", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (-2147483647, -2147483646)", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999997, 999998, 999999)", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999998, 999999, 1000000)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999999, 1000000, 1000001)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002)", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999)", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000)", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001)", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002)", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999)", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000)", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001)", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002)", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesNe", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a != -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a != 0", - "Plan": [ - "all" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0", - "Plan": [ - "all" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0)", - "Plan": [ - "all" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1", - "Plan": [ - "all" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1)", - "Plan": [ - "all" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2", - "Plan": [ - "all" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2)", - "Plan": [ - "all" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 3", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3", - "Plan": [ - "all" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3)", - "Plan": [ - "all" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3", - "Plan": [ - "p0 p1 p2 p3" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 4", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4", - "Plan": [ - "all" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4)", - "Plan": [ - "all" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 5", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5", - "Plan": [ - "all" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5)", - "Plan": [ - "all" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5", - "Plan": [ - "p0 p1 p2 p3 p4 p5" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 6", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6)", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7)", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - } - ] - }, - { - "Name": "TestRangePartitionBoundariesBetweenM", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 2", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 10", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 999998", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 999999", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000002", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999997", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999998", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 999998", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000", - "Plan": [ - "p1" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001", - "Plan": [ - "p1" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesBetweenS", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -1 AND 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1 AND 4", - "Plan": [ - "p1 p2 p3 p4" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 2", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 3", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3 AND 4", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 4 AND 4", - "Plan": [ - "p4" - ], - "Res": [ - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 5", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 5 AND 4", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 6", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 6 AND 4", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 7", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 7 AND 4", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesLtM", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < -2147483648", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > -2147483648", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= -2147483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= -2147483648", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 0", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 0", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 0", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999999", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000000", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000002", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000002", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 3000001", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3000001", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 999997", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999997", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999997", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999997 AND a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997 AND a < 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999998 AND a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998 AND a < 1000000", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999999", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999999 AND a <= 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999 AND a < 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000000", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000 AND a < 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999997", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997 AND a < 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999998", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998 AND a < 2000000", - "Plan": [ - "p1" - ], - "Res": [ - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999999", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999 AND a < 2000001", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2000000", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000 AND a < 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2999997", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2999997", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999997", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997 AND a < 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2999998", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2999998", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999998", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998 AND a < 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2999999", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2999999", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999 AND a < 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000 AND a < 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesLtS", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a >= -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 0", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 0", - "Plan": [ - "p1 p2 p3 p4 p5 p6" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1", - "Plan": [ - "p1 p2 p3 p4 p5 p6" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 2", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 2", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 2)", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 2)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 2", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 2)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 2)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 2", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 2)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 2)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 2", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2)", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 3", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3", - "Plan": [ - "p4 p5 p6" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3", - "Plan": [ - "p0 p1 p2 p3" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 3", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 3", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 3)", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 3)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 3", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 3", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 3)", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 3)", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 3", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 3", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 3)", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 3)", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 3", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 3", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3)", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 4", - "Plan": [ - "p0 p1 p2 p3" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 4", - "Plan": [ - "p5 p6" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 4", - "Plan": [ - "p4 p5 p6" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 4", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 4", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 4)", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 4)", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 4", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 4", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 4)", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 4)", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 4", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 4", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 4)", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 4)", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 4", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 4", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4)", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4)", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 5", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 5", - "Plan": [ - "p6" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 5", - "Plan": [ - "p0 p1 p2 p3 p4 p5" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 5", - "Plan": [ - "p5 p6" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 5", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 5", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 5)", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 5)", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 5", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 5", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 5)", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 5)", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 5", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 5", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 5)", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 5)", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 5", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 5", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5)", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5)", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 6", - "Plan": [ - "p0 p1 p2 p3 p4 p5" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 6", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 6", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 6", - "Plan": [ - "p6" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 6", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 6", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 6)", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 6)", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 6", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 6", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 6)", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 6)", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 6", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 6", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 6)", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 6)", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 6", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 6", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6)", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 7", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 7", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 7", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 7", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 7)", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 7)", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 7", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 7", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 7)", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 7)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 7", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 7", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 7)", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 7)", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 7", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 7", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7)", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - } - ] - } -] diff --git a/tests/integrationtest/r/executor/partition/issues.result b/tests/integrationtest/r/executor/partition/issues.result new file mode 100644 index 0000000000000..36acea78dcc46 --- /dev/null +++ b/tests/integrationtest/r/executor/partition/issues.result @@ -0,0 +1,461 @@ +drop table if exists t, t0, t1, t2; +set @@tidb_partition_prune_mode = 'dynamic'; +set @@session.tidb_enable_list_partition = ON; +CREATE TABLE t ( +col1 tinyint(4) primary key +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) +PARTITIONS 6; +insert into t values(-128), (107); +prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'; +set @a=-128, @b=107, @c=-128; +execute stmt using @a,@b,@c; +col1 +-128 +107 +CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2; +insert into t0 values (1); +select a from t0 where a in (1); +a +1 +create table t1 (a int primary key) partition by range (a+5) ( +partition p0 values less than(10), partition p1 values less than(20)); +insert into t1 values (5); +select a from t1 where a in (5); +a +5 +create table t2 (a int primary key) partition by list (a+5) ( +partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12)); +insert into t2 values (5); +select a from t2 where a in (5); +a +5 +set @@tidb_partition_prune_mode = default; +set @@session.tidb_enable_list_partition = default; +drop table if exists UK_HP16726; +CREATE TABLE UK_HP16726 ( +COL1 bigint(16) DEFAULT NULL, +COL2 varchar(20) DEFAULT NULL, +COL4 datetime DEFAULT NULL, +COL3 bigint(20) DEFAULT NULL, +COL5 float DEFAULT NULL, +UNIQUE KEY UK_COL1 (COL1) /*!80000 INVISIBLE */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH( COL1 ) +PARTITIONS 25; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +col1 col1 +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +id estRows task access object operator info +HashAgg 71666.67 root group by:executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1 +└─HashJoin 111979.17 root inner join, equal:[eq(executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1)] + ├─PartitionUnion(Build) 89583.33 root + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p0 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p1 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p2 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p3 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p4 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p5 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p6 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p7 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p8 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p9 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p10 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p11 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p12 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p13 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p14 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p15 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p16 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p17 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p18 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p19 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p20 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p21 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p22 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p23 keep order:false, stats:pseudo + │ └─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p24 keep order:false, stats:pseudo + └─PartitionUnion(Probe) 89583.33 root + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p0 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p2 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p3 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p4 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p5 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p6 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p7 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p8 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p9 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p10 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p11 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p12 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p13 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p14 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p15 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p16 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p17 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p18 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p19 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p20 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p21 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p22 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p23 keep order:false, stats:pseudo + └─TableReader 3583.33 root data:Selection + └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p24 keep order:false, stats:pseudo +set @@tidb_partition_prune_mode = 'dynamic'; +analyze table UK_HP16726; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +col1 col1 +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +id estRows task access object operator info +HashAgg 2866.67 root group by:executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1 +└─HashJoin 4479.17 root inner join, equal:[eq(executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1)] + ├─TableReader(Build) 3583.33 root partition:all data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 3583.33 root partition:all data:Selection + └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +set @@tidb_partition_prune_mode = default; +drop table if exists IDT_HP23902, t; +CREATE TABLE IDT_HP23902 ( +COL1 smallint DEFAULT NULL, +COL2 varchar(20) DEFAULT NULL, +COL4 datetime DEFAULT NULL, +COL3 bigint DEFAULT NULL, +COL5 float DEFAULT NULL, +KEY UK_COL1 (COL1) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH( COL1+30 ) +PARTITIONS 6; +insert ignore into IDT_HP23902 partition(p0, p1)(col1, col3) values(-10355, 1930590137900568573), (13810, -1332233145730692137); +show warnings; +Level Code Message +Warning 1748 Found a row not matching the given partition set +Warning 1748 Found a row not matching the given partition set +select * from IDT_HP23902; +COL1 COL2 COL4 COL3 COL5 +create table t ( +a int +) partition by range(a) ( +partition p0 values less than (10), +partition p1 values less than (20)); +insert ignore into t partition(p0)(a) values(12); +show warnings; +Level Code Message +Warning 1748 Found a row not matching the given partition set +select * from t; +a +drop table if exists tbl_936; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_936 ( +col_5410 smallint NOT NULL, +col_5411 double, +col_5412 boolean NOT NULL DEFAULT 1, +col_5413 set('Alice', 'Bob', 'Charlie', 'David') NOT NULL DEFAULT 'Charlie', +col_5414 varbinary(147) COLLATE 'binary' DEFAULT 'bvpKgYWLfyuTiOYSkj', +col_5415 timestamp NOT NULL DEFAULT '2021-07-06', +col_5416 decimal(6, 6) DEFAULT 0.49, +col_5417 text COLLATE utf8_bin, +col_5418 float DEFAULT 2048.0762299371554, +col_5419 int UNSIGNED NOT NULL DEFAULT 3152326370, +PRIMARY KEY (col_5419) ) +PARTITION BY HASH (col_5419) PARTITIONS 3; +SELECT last_value(col_5414) OVER w FROM tbl_936 +WINDOW w AS (ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419) +ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419, nth_value(col_5412, 5) OVER w; +last_value(col_5414) OVER w +set @@tidb_partition_prune_mode = default; +drop table if exists t; +CREATE TABLE t (a int, b date, c int, PRIMARY KEY (a,b)) +PARTITION BY RANGE ( TO_DAYS(b) ) ( +PARTITION p0 VALUES LESS THAN (737821), +PARTITION p1 VALUES LESS THAN (738289) +); +INSERT INTO t (a, b, c) VALUES(0, '2021-05-05', 0); +select c from t use index(primary) where a=0 limit 1; +c +0 +CREATE TABLE test_partition ( +a varchar(100) NOT NULL, +b date NOT NULL, +c varchar(100) NOT NULL, +d datetime DEFAULT NULL, +e datetime DEFAULT NULL, +f bigint(20) DEFAULT NULL, +g bigint(20) DEFAULT NULL, +h bigint(20) DEFAULT NULL, +i bigint(20) DEFAULT NULL, +j bigint(20) DEFAULT NULL, +k bigint(20) DEFAULT NULL, +l bigint(20) DEFAULT NULL, +PRIMARY KEY (a,b,c) /*T![clustered_index] NONCLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY RANGE ( TO_DAYS(b) ) ( +PARTITION pmin VALUES LESS THAN (737821), +PARTITION p20200601 VALUES LESS THAN (738289)); +INSERT INTO test_partition (a, b, c, d, e, f, g, h, i, j, k, l) VALUES('aaa', '2021-05-05', '428ff6a1-bb37-42ac-9883-33d7a29961e6', '2021-05-06 08:13:38', '2021-05-06 13:28:08', 0, 8, 3, 0, 9, 1, 0); +select c,j,l from test_partition where c='428ff6a1-bb37-42ac-9883-33d7a29961e6' and a='aaa' limit 0, 200; +c j l +428ff6a1-bb37-42ac-9883-33d7a29961e6 9 0 +drop table if exists tbl_500, tbl_600; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_500 ( +col_20 tinyint(4) NOT NULL, +col_21 varchar(399) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, +col_22 json DEFAULT NULL, +col_23 blob DEFAULT NULL, +col_24 mediumint(9) NOT NULL, +col_25 float NOT NULL DEFAULT '7306.384497585912', +col_26 binary(196) NOT NULL, +col_27 timestamp DEFAULT '1976-12-08 00:00:00', +col_28 bigint(20) NOT NULL, +col_29 tinyint(1) NOT NULL DEFAULT '1', +PRIMARY KEY (col_29,col_20) /*T![clustered_index] NONCLUSTERED */, +KEY idx_7 (col_28,col_20,col_26,col_27,col_21,col_24), +KEY idx_8 (col_25,col_29,col_24) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE tbl_600 ( +col_60 int(11) NOT NULL DEFAULT '-776833487', +col_61 tinyint(1) NOT NULL DEFAULT '1', +col_62 tinyint(4) NOT NULL DEFAULT '-125', +PRIMARY KEY (col_62,col_60,col_61) /*T![clustered_index] NONCLUSTERED */, +KEY idx_19 (col_60) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci +PARTITION BY HASH( col_60 ) +PARTITIONS 1; +insert into tbl_500 select -34, 'lrfGPPPUuZjtT', '{"obj1": {"sub_obj0": 100}}', 0x6C47636D, 1325624, 7306.3843, 'abc', '1976-12-08', 4757891479624162031, 0; +select tbl_5.* from tbl_500 tbl_5 where col_24 in ( select col_62 from tbl_600 where tbl_5.col_26 < 'hSvHLdQeGBNIyOFXStV' ); +col_20 col_21 col_22 col_23 col_24 col_25 col_26 col_27 col_28 col_29 +set @@tidb_partition_prune_mode = default; +drop table if exists t1, t2; +set @@tidb_partition_prune_mode='static-only'; +create table t1 (c_datetime datetime, primary key (c_datetime)) +partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')), +partition p1 values less than (to_days('2020-04-01')), +partition p2 values less than (to_days('2020-06-01')), +partition p3 values less than maxvalue); +create table t2 (c_datetime datetime, unique key(c_datetime)); +insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58'); +insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18'); +begin; +select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update; +c_datetime c_datetime +2020-02-21 07:15:33 2020-01-10 09:36:00 +2020-02-21 07:15:33 2020-02-04 06:00:00 +2020-04-27 13:50:58 2020-01-10 09:36:00 +2020-04-27 13:50:58 2020-02-04 06:00:00 +2020-06-26 03:24:00 2020-01-10 09:36:00 +2020-06-26 03:24:00 2020-02-04 06:00:00 +2020-06-26 03:24:00 2020-06-12 03:45:18 +rollback; +set @@tidb_partition_prune_mode = default; +drop table if exists p, t; +set @@tidb_enable_list_partition = OFF; +create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2)); +set @@tidb_enable_list_partition = default; +drop table if exists issue25528; +set @@tidb_partition_prune_mode = 'static'; +create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8; +insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20'); +begin pessimistic; +select * from issue25528 where id = 1 for update; +id balance balance2 created_at +1 100.00 -100.00 2021-06-17 22:35:20 +drop table if exists issue25528; +CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10; +INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4); +select * from issue25528 where c1 in (3, 4) order by c2 for update; +c1 c2 c3 c4 +3 3 3 3 +4 4 4 4 +rollback; +set @@tidb_enable_list_partition = default; +set @@tidb_enable_index_merge=1,@@tidb_partition_prune_mode='dynamic'; +DROP TABLE IF EXISTS `tbl_18`; +CREATE TABLE `tbl_18` (`col_119` binary(16) NOT NULL DEFAULT 'skPoKiwYUi',`col_120` int(10) unsigned NOT NULL,`col_121` timestamp NOT NULL,`col_122` double NOT NULL DEFAULT '3937.1887880628115',`col_123` bigint(20) NOT NULL DEFAULT '3550098074891542725',PRIMARY KEY (`col_123`,`col_121`,`col_122`,`col_120`) CLUSTERED,UNIQUE KEY `idx_103` (`col_123`,`col_119`,`col_120`),UNIQUE KEY `idx_104` (`col_122`,`col_120`),UNIQUE KEY `idx_105` (`col_119`,`col_120`),KEY `idx_106` (`col_121`,`col_120`,`col_122`,`col_119`),KEY `idx_107` (`col_121`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PARTITION BY HASH( `col_120` ) PARTITIONS 3; +INSERT INTO tbl_18 (`col_119`, `col_120`, `col_121`, `col_122`, `col_123`) VALUES (X'736b506f4b6977595569000000000000', 672436701, '1974-02-24 00:00:00', 3937.1887880628115e0, -7373106839136381229), (X'736b506f4b6977595569000000000000', 2637316689, '1993-10-29 00:00:00', 3937.1887880628115e0, -4522626077860026631), (X'736b506f4b6977595569000000000000', 831809724, '1995-11-20 00:00:00', 3937.1887880628115e0, -4426441253940231780), (X'736b506f4b6977595569000000000000', 1588592628, '2001-03-28 00:00:00', 3937.1887880628115e0, 1329207475772244999), (X'736b506f4b6977595569000000000000', 3908038471, '2031-06-06 00:00:00', 3937.1887880628115e0, -6562815696723135786), (X'736b506f4b6977595569000000000000', 1674237178, '2001-10-24 00:00:00', 3937.1887880628115e0, -6459065549188938772), (X'736b506f4b6977595569000000000000', 3507075493, '2010-03-25 00:00:00', 3937.1887880628115e0, -4329597025765326929), (X'736b506f4b6977595569000000000000', 1276461709, '2019-07-20 00:00:00', 3937.1887880628115e0, 3550098074891542725); +select col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +col_120 col_122 col_123 +1588592628 3937.1887880628115 1329207475772244999 +1674237178 3937.1887880628115 -6459065549188938772 +2637316689 3937.1887880628115 -4522626077860026631 +672436701 3937.1887880628115 -7373106839136381229 +831809724 3937.1887880628115 -4426441253940231780 +select /*+ use_index_merge( tbl_18 ) */ col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +col_120 col_122 col_123 +1588592628 3937.1887880628115 1329207475772244999 +1674237178 3937.1887880628115 -6459065549188938772 +2637316689 3937.1887880628115 -4522626077860026631 +672436701 3937.1887880628115 -7373106839136381229 +831809724 3937.1887880628115 -4426441253940231780 +set @@tidb_enable_index_merge=default,@@tidb_partition_prune_mode=default; +drop table if exists t; +CREATE TABLE `t` (`a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL) PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN (2021), PARTITION `p1` VALUES LESS THAN (3000)); +set @@tidb_partition_prune_mode = 'static'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = 'dynamic'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = default; +set @@tidb_opt_advanced_join_hint=0; +drop table if exists c, t; +CREATE TABLE `c` (`serial_id` varchar(24),`occur_trade_date` date,`txt_account_id` varchar(24),`capital_sub_class` varchar(10),`occur_amount` decimal(16,2),`broker` varchar(10),PRIMARY KEY (`txt_account_id`,`occur_trade_date`,`serial_id`) /*T![clustered_index] CLUSTERED */,KEY `idx_serial_id` (`serial_id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci PARTITION BY RANGE COLUMNS(`serial_id`) (PARTITION `p202209` VALUES LESS THAN ('20221001'),PARTITION `p202210` VALUES LESS THAN ('20221101'),PARTITION `p202211` VALUES LESS THAN ('20221201')); +CREATE TABLE `t` ( `txn_account_id` varchar(24), `account_id` varchar(32), `broker` varchar(10), PRIMARY KEY (`txn_account_id`) /*T![clustered_index] CLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; +INSERT INTO `c` (serial_id, txt_account_id, capital_sub_class, occur_trade_date, occur_amount, broker) VALUES ('2022111700196920','04482786','CUST','2022-11-17',-2.01,'0009'); +INSERT INTO `t` VALUES ('04482786','1142927','0009'); +set tidb_partition_prune_mode='dynamic'; +analyze table c; +analyze table t; +explain select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17'; +id estRows task access object operator info +IndexJoin_22 1.00 root inner join, inner:TableReader_21, outer key:executor__partition__issues.t.txn_account_id, inner key:executor__partition__issues.c.txt_account_id, equal cond:eq(executor__partition__issues.t.txn_account_id, executor__partition__issues.c.txt_account_id) +├─TableReader_27(Build) 1.00 root data:Selection_26 +│ └─Selection_26 1.00 cop[tikv] eq(executor__partition__issues.t.broker, "0009") +│ └─TableFullScan_25 1.00 cop[tikv] table:t keep order:false +└─TableReader_21(Probe) 1.00 root partition:all data:Selection_20 + └─Selection_20 1.00 cop[tikv] eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000) + └─TableRangeScan_19 1.00 cop[tikv] table:c range: decided by [eq(executor__partition__issues.c.txt_account_id, executor__partition__issues.t.txn_account_id) eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false +select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17'; +occur_amount +-2.01 +alter table t add column serial_id varchar(24) default '2022111700196920'; +select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +occur_amount +-2.01 +explain select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +id estRows task access object operator info +IndexJoin_20 0.80 root inner join, inner:TableReader_19, outer key:executor__partition__issues.t.txn_account_id, executor__partition__issues.t.serial_id, inner key:executor__partition__issues.c.txt_account_id, executor__partition__issues.c.serial_id, equal cond:eq(executor__partition__issues.t.serial_id, executor__partition__issues.c.serial_id), eq(executor__partition__issues.t.txn_account_id, executor__partition__issues.c.txt_account_id) +├─TableReader_25(Build) 0.80 root data:Selection_24 +│ └─Selection_24 0.80 cop[tikv] eq(executor__partition__issues.t.broker, "0009"), not(isnull(executor__partition__issues.t.serial_id)) +│ └─TableFullScan_23 1.00 cop[tikv] table:t keep order:false +└─TableReader_19(Probe) 0.80 root partition:all data:Selection_18 + └─Selection_18 0.80 cop[tikv] eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000) + └─TableRangeScan_17 0.80 cop[tikv] table:c range: decided by [eq(executor__partition__issues.c.txt_account_id, executor__partition__issues.t.txn_account_id) eq(executor__partition__issues.c.serial_id, executor__partition__issues.t.serial_id) eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false +set @@tidb_opt_advanced_join_hint=default; +set tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/r/executor/partition/partition_boundaries.result b/tests/integrationtest/r/executor/partition/partition_boundaries.result new file mode 100644 index 0000000000000..6c4c9152c8eda --- /dev/null +++ b/tests/integrationtest/r/executor/partition/partition_boundaries.result @@ -0,0 +1,5256 @@ +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a = -2147483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = -2147483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (-2147483648); +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a = 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a IN (0); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (0); +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a = 999998; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 999998; +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999998); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999998); +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 999999; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 999999; +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999999); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999999); +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 1000000; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 1000000; +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000000); +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000000); +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 1000001; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 1000001; +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000001); +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000001); +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 1000002; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 1000002; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000002); +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000002); +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 3000000; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * FROM t WHERE a = 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a IN (3000000); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (3000000); +a b +explain format='brief' SELECT * FROM t WHERE a = 3000001; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * FROM t WHERE a = 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a IN (3000001); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (3000001); +a b +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, -2147483648, -2147483647) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, -2147483647, -2147483646) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +a b +explain format='brief' SELECT * FROM t WHERE a IN (999997, 999998, 999999); +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 999997, 999998, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999997, 999998, 999999); +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 999998, 999999, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +a b +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 999999, 1000000, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1000000, 1000001, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1999997, 1999998, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1999998, 1999999, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1999999, 2000000, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +id estRows task access object operator info +TableReader 3.00 root partition:p2 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2000000, 2000001, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2999997, 2999998, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2999998, 2999999, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2999999, 3000000, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 3000000, 3000001, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +a b +SET @@tidb_partition_prune_mode = default; +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1), +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (3), +PARTITION p3 VALUES LESS THAN (4), +PARTITION p4 VALUES LESS THAN (5), +PARTITION p5 VALUES LESS THAN (6), +PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a != -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] or(0, eq(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1; +a b +explain format='brief' SELECT * FROM t WHERE a != 0; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 0; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] or(0, or(eq(executor__partition__partition_boundaries.t.a, -1), eq(executor__partition__partition_boundaries.t.a, 0))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a != 1; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 1; +a b +0 0 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), eq(executor__partition__partition_boundaries.t.a, 1))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a != 2; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 2; +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), or(eq(executor__partition__partition_boundaries.t.a, 1), eq(executor__partition__partition_boundaries.t.a, 2)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a != 3; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p3 data:Selection +└─Selection 4.00 cop[tikv] or(or(0, or(eq(executor__partition__partition_boundaries.t.a, -1), eq(executor__partition__partition_boundaries.t.a, 0))), or(eq(executor__partition__partition_boundaries.t.a, 1), or(eq(executor__partition__partition_boundaries.t.a, 2), eq(executor__partition__partition_boundaries.t.a, 3)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a != 4; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] or(or(0, or(eq(executor__partition__partition_boundaries.t.a, -1), eq(executor__partition__partition_boundaries.t.a, 0))), or(or(eq(executor__partition__partition_boundaries.t.a, 1), eq(executor__partition__partition_boundaries.t.a, 2)), or(eq(executor__partition__partition_boundaries.t.a, 3), eq(executor__partition__partition_boundaries.t.a, 4)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a != 5; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +id estRows task access object operator info +TableReader 1.00 root partition:all data:Selection +└─Selection 1.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4), ne(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +id estRows task access object operator info +TableReader 1.00 root partition:all data:Selection +└─Selection 1.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p3,p4,p5 data:Selection +└─Selection 6.00 cop[tikv] or(or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), eq(executor__partition__partition_boundaries.t.a, 1))), or(or(eq(executor__partition__partition_boundaries.t.a, 2), eq(executor__partition__partition_boundaries.t.a, 3)), or(eq(executor__partition__partition_boundaries.t.a, 4), eq(executor__partition__partition_boundaries.t.a, 5)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a != 6; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4), ne(executor__partition__partition_boundaries.t.a, 5), ne(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +a b +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4, 5, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +a b +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), eq(executor__partition__partition_boundaries.t.a, 1))), or(or(eq(executor__partition__partition_boundaries.t.a, 2), eq(executor__partition__partition_boundaries.t.a, 3)), or(eq(executor__partition__partition_boundaries.t.a, 4), or(eq(executor__partition__partition_boundaries.t.a, 5), eq(executor__partition__partition_boundaries.t.a, 6))))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a != 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4), ne(executor__partition__partition_boundaries.t.a, 5), ne(executor__partition__partition_boundaries.t.a, 6), ne(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +a b +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +a b +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), or(eq(executor__partition__partition_boundaries.t.a, 1), eq(executor__partition__partition_boundaries.t.a, 2)))), or(or(eq(executor__partition__partition_boundaries.t.a, 3), eq(executor__partition__partition_boundaries.t.a, 4)), or(eq(executor__partition__partition_boundaries.t.a, 5), or(eq(executor__partition__partition_boundaries.t.a, 6), eq(executor__partition__partition_boundaries.t.a, 7))))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +SET @@tidb_partition_prune_mode = default; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483649) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483647) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483646) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483638) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483650) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483649) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483647) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483646) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND -1; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 2; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 10; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 10) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 10; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +a b +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +a b +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +a b +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +a b +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000008) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999996) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +a b +1000000 1000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000009) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000010) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +id estRows task access object operator info +TableReader 4.00 root partition:p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +id estRows task access object operator info +TableReader 5.00 root partition:p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +id estRows task access object operator info +TableReader 8.00 root partition:p1,p2 data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +id estRows task access object operator info +TableReader 0.00 root partition:p1 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000011) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +id estRows task access object operator info +TableReader 4.00 root partition:p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +id estRows task access object operator info +TableReader 5.00 root partition:p1,p2 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +id estRows task access object operator info +TableReader 0.00 root partition:p1 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000004) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000012) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +id estRows task access object operator info +TableReader 4.00 root partition:p1,p2 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +id estRows task access object operator info +TableReader 5.00 root partition:p1,p2 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000004) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000010) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 4000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 4000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 4000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000011) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; +a b +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1), +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (3), +PARTITION p3 VALUES LESS THAN (4), +PARTITION p4 VALUES LESS THAN (5), +PARTITION p5 VALUES LESS THAN (6), +PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND -1; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN -1 AND 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -1), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -1 AND 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 0; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 1; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1 AND 4; +id estRows task access object operator info +TableReader 4.00 root partition:p1,p2,p3,p4 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1 AND 4; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 2; +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 3; +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3 AND 4; +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3 AND 4; +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 4 AND 4; +id estRows task access object operator info +TableReader 1.00 root partition:p4 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 4), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 4 AND 4; +a b +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 5; +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 5; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 5 AND 4; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 5), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 5 AND 4; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 6; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 6; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 6 AND 4; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 6), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 6 AND 4; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 7; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 7; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 7 AND 4; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 7), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 7 AND 4; +a b +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -2147483648; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < -2147483648; +a b +explain format='brief' SELECT * FROM t WHERE a > -2147483648; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > -2147483648; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= -2147483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= -2147483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a >= -2147483648; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= -2147483648; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 0; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a > 0; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 0; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 0; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 0; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 0; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 0; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999998; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 999998; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999998; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999998; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999999; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999999; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999999; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000000; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000; +id estRows task access object operator info +TableReader 9.00 root partition:p1,p2 data:Selection +└─Selection 9.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000001; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000001; +id estRows task access object operator info +TableReader 8.00 root partition:p1,p2 data:Selection +└─Selection 8.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000001; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000001; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000001; +id estRows task access object operator info +TableReader 9.00 root partition:p1,p2 data:Selection +└─Selection 9.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000001; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000002; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000002; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000002; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000002; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000002; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000002; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000002; +id estRows task access object operator info +TableReader 8.00 root partition:p1,p2 data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000002; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a < 3000001; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a <= 3000001; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a < 999997; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999997; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999997), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a < 999999; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997), lt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997 AND a < 999999; +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999998; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 999998; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999998; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999998; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +a b +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +a b +1000000 1000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a < 1000000; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998), lt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998 AND a < 1000000; +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +a b +1000000 1000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999999; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999999; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999999; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a < 1000001; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999), lt(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999 AND a < 1000001; +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000000; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000; +id estRows task access object operator info +TableReader 9.00 root partition:p1,p2 data:Selection +└─Selection 9.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000), lt(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999997; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999997), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997), lt(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +a b +1999998 1999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1999998; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1999998; +id estRows task access object operator info +TableReader 8.00 root partition:p0,p1 data:Selection +└─Selection 8.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999998; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999998; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +id estRows task access object operator info +TableReader 2.00 root partition:p1,p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998), lt(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +a b +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +id estRows task access object operator info +TableReader 2.00 root partition:p1,p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1999999; +id estRows task access object operator info +TableReader 8.00 root partition:p0,p1 data:Selection +└─Selection 8.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999; +id estRows task access object operator info +TableReader 5.00 root partition:p2 data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1999999; +id estRows task access object operator info +TableReader 9.00 root partition:p0,p1 data:Selection +└─Selection 9.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999999; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999999; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999), lt(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +a b +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2000000; +id estRows task access object operator info +TableReader 9.00 root partition:p0,p1 data:Selection +└─Selection 9.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000; +id estRows task access object operator info +TableReader 4.00 root partition:p2 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000; +a b +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 2000000; +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2000000; +id estRows task access object operator info +TableReader 5.00 root partition:p2 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2000000; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +id estRows task access object operator info +TableReader 3.00 root partition:p2 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +a b +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000), lt(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +a b +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +a b +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 2999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999997; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999997; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999997), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997), lt(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +a b +2999998 2999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2999998; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 2999998; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999998; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999998; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999998), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998), lt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2999999; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999; +a b +explain format='brief' SELECT * FROM t WHERE a <= 2999999; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999999; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999999; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999999), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999), lt(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a < 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000), lt(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +a b +set @@tidb_partition_prune_mode = default; +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1), +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (3), +PARTITION p3 VALUES LESS THAN (4), +PARTITION p4 VALUES LESS THAN (5), +PARTITION p5 VALUES LESS THAN (6), +PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < -1; +a b +explain format='brief' SELECT * FROM t WHERE a > -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= -1; +a b +explain format='brief' SELECT * FROM t WHERE a >= -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 0; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 0; +a b +explain format='brief' SELECT * FROM t WHERE a > 0; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 0; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 1; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 1; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 1; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 1; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 2; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 2; +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 2; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 2; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 2; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 2; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 2; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 2; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 2; +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 3; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a > 3; +id estRows task access object operator info +TableReader 3.00 root partition:p4,p5,p6 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3; +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 3; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p3 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 3; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 3; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 3; +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 3; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 3; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 3; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 3; +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 3; +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 3; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 3; +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 3; +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 3; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 3; +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 4; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p3 data:Selection +└─Selection 4.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a > 4; +id estRows task access object operator info +TableReader 2.00 root partition:p5,p6 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 4; +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 4; +id estRows task access object operator info +TableReader 3.00 root partition:p4,p5,p6 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 4; +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 4; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 4; +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 4; +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 4; +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 4; +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 4; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 4; +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 4; +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 4; +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 4; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 4; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 4; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 5; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a > 5; +id estRows task access object operator info +TableReader 1.00 root partition:p6 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 5; +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 5; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p3,p4,p5 data:Selection +└─Selection 6.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 5; +id estRows task access object operator info +TableReader 2.00 root partition:p5,p6 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 5; +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 5; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 5; +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 5; +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 5; +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 5; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 5; +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 5; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 5; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 5; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 5; +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 5; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 5; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 5; +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 5; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 6; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p3,p4,p5 data:Selection +└─Selection 6.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a > 6; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 6; +a b +explain format='brief' SELECT * FROM t WHERE a <= 6; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 6; +id estRows task access object operator info +TableReader 1.00 root partition:p6 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 6; +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 6; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 6; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 6; +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 6; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 6; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 6; +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 6; +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 6; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 6; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 6; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 6; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 6; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 6; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 6; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a < 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 7; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 7; +a b +explain format='brief' SELECT * FROM t WHERE a <= 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 7; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 7; +a b +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 7; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 7; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 7; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 7; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 7; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 7; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 7; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 7; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 7; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 7; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 7; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 7; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 7; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 7; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +a b +0 0 Filler... +1 1 Filler... +set @@tidb_partition_prune_mode = default; diff --git a/tests/integrationtest/r/executor/partition/partition_with_expression.result b/tests/integrationtest/r/executor/partition/partition_with_expression.result new file mode 100644 index 0000000000000..5435f6baeb059 --- /dev/null +++ b/tests/integrationtest/r/executor/partition/partition_with_expression.result @@ -0,0 +1,1250 @@ +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +create table t(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +insert into tp values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +insert into t values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a != '2024-01-01 01:01:01'; +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] ne(executor__partition__partition_with_expression.tp.a, 2024-01-01 01:01:01.000000) + └─TableFullScan 3.00 cop[tikv] table:tp keep order:false +select * from tp where a != '2024-01-01 01:01:01'; +a b +2015-09-09 00:00:00 1 +2020-08-08 19:00:01 2 +select * from t where a != '2024-01-01 01:01:01'; +a b +2015-09-09 00:00:00 1 +2020-08-08 19:00:01 2 +explain format='brief' select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +id estRows task access object operator info +TableReader 1.00 root partition:p1,p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2015-09-09 00:00:00.000000), ne(executor__partition__partition_with_expression.tp.a, 2024-01-01 01:01:01.000000) + └─TableFullScan 3.00 cop[tikv] table:tp keep order:false +select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +a b +2020-08-08 19:00:01 2 +select * from t where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +a b +2020-08-08 19:00:01 2 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(3), partition p1 values less than(5), partition p2 values less than(8)); +create table t(a datetime, b int); +insert into tp values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +insert into t values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a = '2020-08-17 00:00:00'; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.tp.a, 2020-08-17 00:00:00.000000) + └─TableFullScan 6.00 cop[tikv] table:tp keep order:false +select * from tp where a = '2020-08-17 00:00:00'; +a b +2020-08-17 00:00:00 1 +select * from t where a = '2020-08-17 00:00:00'; +a b +2020-08-17 00:00:00 1 +explain format='brief' select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.tp.a, 2020-08-20 00:00:00.000000) + └─TableFullScan 6.00 cop[tikv] table:tp keep order:false +select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +a b +2020-08-20 00:00:00 5 +select * from t where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +a b +2020-08-20 00:00:00 5 +explain format='brief' select * from tp where a < '2020-08-19 00:00:00'; +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-08-19 00:00:00.000000) + └─TableFullScan 6.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-08-19 00:00:00'; +a b +2020-08-17 00:00:00 1 +2020-08-18 00:00:00 2 +select * from t where a < '2020-08-19 00:00:00'; +a b +2020-08-17 00:00:00 1 +2020-08-18 00:00:00 2 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(floor(unix_timestamp(a))) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p2 data:Selection_6 +└─Selection_6 1.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2020-09-11 00:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +select * from t where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +explain select * from tp where a < '2020-07-07 01:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p0,p1 data:Selection_6 +└─Selection_6 1.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-07-07 01:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +select * from t where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(unix_timestamp(a)) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p2 data:Selection_6 +└─Selection_6 1.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2020-09-11 00:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +select * from t where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +explain select * from tp where a < '2020-07-07 01:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p0,p1 data:Selection_6 +└─Selection_6 1.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-07-07 01:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +select * from t where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns(a) (partition p0 values less than('2020-02-02 00:00:00'), partition p1 values less than('2020-09-01 00:00:00'), partition p2 values less than('2020-12-20 00:00:00')); +create table t(a datetime, b int); +insert into tp values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +insert into t values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +analyze table tp; +analyze table t; +explain select * from tp where a < '2020-09-01 00:00:00'; +id estRows task access object operator info +TableReader_7 2.00 root partition:p0,p1 data:Selection_6 +└─Selection_6 2.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-09-01 00:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-09-01 00:00:00'; +a b +2020-01-01 12:00:00 1 +2020-08-22 10:00:00 2 +select * from t where a < '2020-09-01 00:00:00'; +a b +2020-01-01 12:00:00 1 +2020-08-22 10:00:00 2 +explain select * from tp where a > '2020-07-07 01:00:00'; +id estRows task access object operator info +TableReader_7 3.00 root partition:p1,p2 data:Selection_6 +└─Selection_6 3.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2020-07-07 01:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a > '2020-07-07 01:00:00'; +a b +2020-08-22 10:00:00 2 +2020-09-09 11:00:00 3 +2020-10-01 00:00:00 4 +select * from t where a > '2020-07-07 01:00:00'; +a b +2020-08-22 10:00:00 2 +2020-09-09 11:00:00 3 +2020-10-01 00:00:00 4 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a varchar(255), b int) partition by range columns(a) (partition p0 values less than('ddd'), partition p1 values less than('ggggg'), partition p2 values less than('mmmmmm')); +create table t(a varchar(255), b int); +insert into tp values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +insert into t values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +analyze table tp; +analyze table t; +explain select * from tp where a < '10'; +id estRows task access object operator info +TableReader_7 0.00 root partition:p0 data:Selection_6 +└─Selection_6 0.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, "10") + └─TableFullScan_5 6.00 cop[tikv] table:tp keep order:false +select * from tp where a < '10'; +a b +select * from t where a < '10'; +a b +explain select * from tp where a > 0; +id estRows task access object operator info +TableReader_7 4.80 root partition:all data:Selection_6 +└─Selection_6 4.80 cop[tikv] gt(cast(executor__partition__partition_with_expression.tp.a, double BINARY), 0) + └─TableFullScan_5 6.00 cop[tikv] table:tp keep order:false +select * from tp where a > 0; +a b +10 6 +select * from t where a > 0; +a b +10 6 +explain select * from tp where a < 0; +id estRows task access object operator info +TableReader_7 4.80 root partition:all data:Selection_6 +└─Selection_6 4.80 cop[tikv] lt(cast(executor__partition__partition_with_expression.tp.a, double BINARY), 0) + └─TableFullScan_5 6.00 cop[tikv] table:tp keep order:false +select * from tp where a < 0; +a b +select * from t where a < 0; +a b +set tidb_partition_prune_mode=default; +drop table if exists trange, thash, t; +create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +create table thash(a int, b int) partition by hash(a) partitions 4; +create table t(a int, b int); +insert into trange values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into thash values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into t values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +set session tidb_partition_prune_mode='dynamic'; +analyze table trange; +analyze table thash; +analyze table t; +SELECT * from t where a = 2; +a b +2 1 +explain format='brief' select * from trange where a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2; +a b +2 1 +explain format='brief' select * from thash where a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2; +a b +2 1 +SELECT * from t where a = 4 or a = 1; +a b +1 NULL +1 NULL +1 1 +4 3 +explain format='brief' select * from trange where a = 4 or a = 1; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 4), eq(executor__partition__partition_with_expression.trange.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 4 or a = 1; +a b +1 NULL +1 NULL +1 1 +4 3 +explain format='brief' select * from thash where a = 4 or a = 1; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 4), eq(executor__partition__partition_with_expression.thash.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 4 or a = 1; +a b +1 NULL +1 NULL +1 1 +4 3 +SELECT * from t where a = -1; +a b +explain format='brief' select * from trange where a = -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, -1) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = -1; +a b +explain format='brief' select * from thash where a = -1; +id estRows task access object operator info +TableReader 0.00 root partition:p1 data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, -1) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = -1; +a b +SELECT * from t where a is NULL; +a b +NULL NULL +NULL 1 +explain format='brief' select * from trange where a is NULL; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] isnull(executor__partition__partition_with_expression.trange.a) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a is NULL; +a b +NULL NULL +NULL 1 +explain format='brief' select * from thash where a is NULL; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] isnull(executor__partition__partition_with_expression.thash.a) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a is NULL; +a b +NULL NULL +NULL 1 +SELECT * from t where b is NULL; +a b +NULL NULL +1 NULL +1 NULL +10 NULL +explain format='brief' select * from trange where b is NULL; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] isnull(executor__partition__partition_with_expression.trange.b) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where b is NULL; +a b +NULL NULL +1 NULL +1 NULL +10 NULL +explain format='brief' select * from thash where b is NULL; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] isnull(executor__partition__partition_with_expression.thash.b) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where b is NULL; +a b +NULL NULL +1 NULL +1 NULL +10 NULL +SELECT * from t where a > -1; +a b +1 NULL +1 NULL +1 1 +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a > -1; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, -1) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > -1; +a b +1 NULL +1 NULL +1 1 +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a > -1; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, -1) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > -1; +a b +1 NULL +1 NULL +1 1 +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a >= 4 and a <= 5; +a b +4 3 +5 5 +explain format='brief' select * from trange where a >= 4 and a <= 5; +id estRows task access object operator info +TableReader 2.00 root partition:p1,p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 4), le(executor__partition__partition_with_expression.trange.a, 5) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a >= 4 and a <= 5; +a b +4 3 +5 5 +explain format='brief' select * from thash where a >= 4 and a <= 5; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 4), le(executor__partition__partition_with_expression.thash.a, 5) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a >= 4 and a <= 5; +a b +4 3 +5 5 +SELECT * from t where a > 10; +a b +explain format='brief' select * from trange where a > 10; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 10) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > 10; +a b +explain format='brief' select * from thash where a > 10; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 10) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > 10; +a b +SELECT * from t where a >=2 and a <= 3; +a b +2 1 +3 2 +explain format='brief' select * from trange where a >=2 and a <= 3; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a >=2 and a <= 3; +a b +2 1 +3 2 +explain format='brief' select * from thash where a >=2 and a <= 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a >=2 and a <= 3; +a b +2 1 +3 2 +SELECT * from t where a between 2 and 3; +a b +2 1 +3 2 +explain format='brief' select * from trange where a between 2 and 3; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a between 2 and 3; +a b +2 1 +3 2 +explain format='brief' select * from thash where a between 2 and 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a between 2 and 3; +a b +2 1 +3 2 +SELECT * from t where a < 2; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from trange where a < 2; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a < 2; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from thash where a < 2; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a < 2; +a b +1 NULL +1 NULL +1 1 +SELECT * from t where a <= 3; +a b +1 NULL +1 NULL +1 1 +2 1 +3 2 +explain format='brief' select * from trange where a <= 3; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a <= 3; +a b +1 NULL +1 NULL +1 1 +2 1 +3 2 +explain format='brief' select * from thash where a <= 3; +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a <= 3; +a b +1 NULL +1 NULL +1 1 +2 1 +3 2 +SELECT * from t where a in (2, 3); +a b +2 1 +3 2 +explain format='brief' select * from trange where a in (2, 3); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_with_expression.trange.a, 2, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a in (2, 3); +a b +2 1 +3 2 +explain format='brief' select * from thash where a in (2, 3); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_with_expression.thash.a, 2, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a in (2, 3); +a b +2 1 +3 2 +SELECT * from t where a in (1, 5); +a b +1 NULL +1 NULL +1 1 +5 5 +explain format='brief' select * from trange where a in (1, 5); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p2 data:Selection +└─Selection 4.00 cop[tikv] in(executor__partition__partition_with_expression.trange.a, 1, 5) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a in (1, 5); +a b +1 NULL +1 NULL +1 1 +5 5 +explain format='brief' select * from thash where a in (1, 5); +id estRows task access object operator info +TableReader 4.00 root partition:p1 data:Selection +└─Selection 4.00 cop[tikv] in(executor__partition__partition_with_expression.thash.a, 1, 5) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a in (1, 5); +a b +1 NULL +1 NULL +1 1 +5 5 +SELECT * from t where a not in (1, 5); +a b +10 NULL +2 1 +3 2 +4 3 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a not in (1, 5); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] not(in(executor__partition__partition_with_expression.trange.a, 1, 5)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a not in (1, 5); +a b +10 NULL +2 1 +3 2 +4 3 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a not in (1, 5); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] not(in(executor__partition__partition_with_expression.thash.a, 1, 5)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a not in (1, 5); +a b +10 NULL +2 1 +3 2 +4 3 +6 7 +7 7 +7 7 +SELECT * from t where a = 2 and a = 2; +a b +2 1 +explain format='brief' select * from trange where a = 2 and a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 and a = 2; +a b +2 1 +explain format='brief' select * from thash where a = 2 and a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 and a = 2; +a b +2 1 +SELECT * from t where a = 2 and a = 3; +a b +explain format='brief' select * from trange where a = 2 and a = 3; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * from trange where a = 2 and a = 3; +a b +explain format='brief' select * from thash where a = 2 and a = 3; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * from thash where a = 2 and a = 3; +a b +SELECT * from t where a < 2 and a > 0; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from trange where a < 2 and a > 0; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 0), lt(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a < 2 and a > 0; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from thash where a < 2 and a > 0; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 0), lt(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a < 2 and a > 0; +a b +1 NULL +1 NULL +1 1 +SELECT * from t where a < 2 and a < 3; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from trange where a < 2 and a < 3; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.trange.a, 2), lt(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a < 2 and a < 3; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from thash where a < 2 and a < 3; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.thash.a, 2), lt(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a < 2 and a < 3; +a b +1 NULL +1 NULL +1 1 +SELECT * from t where a > 1 and a > 2; +a b +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a > 1 and a > 2; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 1), gt(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > 1 and a > 2; +a b +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a > 1 and a > 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 1), gt(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > 1 and a > 2; +a b +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a = 2 or a = 3; +a b +2 1 +3 2 +explain format='brief' select * from trange where a = 2 or a = 3; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), eq(executor__partition__partition_with_expression.trange.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a = 3; +a b +2 1 +3 2 +explain format='brief' select * from thash where a = 2 or a = 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), eq(executor__partition__partition_with_expression.thash.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a = 3; +a b +2 1 +3 2 +SELECT * from t where a = 2 or a in (3); +a b +2 1 +3 2 +explain format='brief' select * from trange where a = 2 or a in (3); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), eq(executor__partition__partition_with_expression.trange.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a in (3); +a b +2 1 +3 2 +explain format='brief' select * from thash where a = 2 or a in (3); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), eq(executor__partition__partition_with_expression.thash.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a in (3); +a b +2 1 +3 2 +SELECT * from t where a = 2 or a > 3; +a b +10 NULL +2 1 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a = 2 or a > 3; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), gt(executor__partition__partition_with_expression.trange.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a > 3; +a b +10 NULL +2 1 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a = 2 or a > 3; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), gt(executor__partition__partition_with_expression.thash.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a > 3; +a b +10 NULL +2 1 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a = 2 or a <= 1; +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from trange where a = 2 or a <= 1; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a <= 1; +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from thash where a = 2 or a <= 1; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a <= 1; +a b +1 NULL +1 NULL +1 1 +2 1 +SELECT * from t where a = 2 or a between 2 and 2; +a b +2 1 +explain format='brief' select * from trange where a = 2 or a between 2 and 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), and(ge(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 2))) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a between 2 and 2; +a b +2 1 +explain format='brief' select * from thash where a = 2 or a between 2 and 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), and(ge(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 2))) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a between 2 and 2; +a b +2 1 +SELECT * from t where a != 2; +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a != 2; +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2; +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a != 2; +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2; +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a != 2 and a > 4; +a b +10 NULL +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a != 2 and a > 4; +id estRows task access object operator info +TableReader 5.00 root partition:p2 data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 4), ne(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2 and a > 4; +a b +10 NULL +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a != 2 and a > 4; +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 4), ne(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2 and a > 4; +a b +10 NULL +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a != 2 and a != 3; +a b +1 NULL +1 NULL +1 1 +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a != 2 and a != 3; +id estRows task access object operator info +TableReader 9.00 root partition:all data:Selection +└─Selection 9.00 cop[tikv] ne(executor__partition__partition_with_expression.trange.a, 2), ne(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2 and a != 3; +a b +1 NULL +1 NULL +1 1 +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a != 2 and a != 3; +id estRows task access object operator info +TableReader 9.00 root partition:all data:Selection +└─Selection 9.00 cop[tikv] ne(executor__partition__partition_with_expression.thash.a, 2), ne(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2 and a != 3; +a b +1 NULL +1 NULL +1 1 +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a != 2 and a = 3; +a b +3 2 +explain format='brief' select * from trange where a != 2 and a = 3; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2 and a = 3; +a b +3 2 +explain format='brief' select * from thash where a != 2 and a = 3; +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2 and a = 3; +a b +3 2 +SELECT * from t where not (a = 2); +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where not (a = 2); +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where not (a = 2); +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where not (a = 2); +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where not (a = 2); +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where not (a > 2); +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from trange where not (a > 2); +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where not (a > 2); +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from thash where not (a > 2); +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where not (a > 2); +a b +1 NULL +1 NULL +1 1 +2 1 +SELECT * from t where not (a < 2); +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where not (a < 2); +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where not (a < 2); +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where not (a < 2); +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where not (a < 2); +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a + 1 > 4; +a b +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a + 1 > 4; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(plus(executor__partition__partition_with_expression.trange.a, 1), 4) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a + 1 > 4; +a b +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a + 1 > 4; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(plus(executor__partition__partition_with_expression.thash.a, 1), 4) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a + 1 > 4; +a b +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a - 1 > 0; +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a - 1 > 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(minus(executor__partition__partition_with_expression.trange.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a - 1 > 0; +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a - 1 > 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(minus(executor__partition__partition_with_expression.thash.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a - 1 > 0; +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a * 2 < 0; +a b +explain format='brief' select * from trange where a * 2 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(mul(executor__partition__partition_with_expression.trange.a, 2), 0) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a * 2 < 0; +a b +explain format='brief' select * from thash where a * 2 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(mul(executor__partition__partition_with_expression.thash.a, 2), 0) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a * 2 < 0; +a b +SELECT * from t where a << 1 < 0; +a b +explain format='brief' select * from trange where a << 1 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(leftshift(executor__partition__partition_with_expression.trange.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a << 1 < 0; +a b +explain format='brief' select * from thash where a << 1 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(leftshift(executor__partition__partition_with_expression.thash.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a << 1 < 0; +a b +SELECT * from t where a > '10'; +a b +explain format='brief' select * from trange where a > '10'; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 10) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > '10'; +a b +explain format='brief' select * from thash where a > '10'; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 10) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > '10'; +a b +SELECT * from t where a > '10ab'; +a b +explain format='brief' select * from trange where a > '10ab'; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 10) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > '10ab'; +a b +explain format='brief' select * from thash where a > '10ab'; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 10) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > '10ab'; +a b +set tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/r/executor/partition/table.result b/tests/integrationtest/r/executor/partition/table.result new file mode 100644 index 0000000000000..99c99010a747a --- /dev/null +++ b/tests/integrationtest/r/executor/partition/table.result @@ -0,0 +1,545 @@ +set @@session.tidb_partition_prune_mode = DEFAULT; +show warnings; +Level Code Message +set @@global.tidb_partition_prune_mode = DEFAULT; +show warnings; +Level Code Message +Warning 1105 Please analyze all partition tables again for consistency between partition and global stats +select @@global.tidb_partition_prune_mode; +@@global.tidb_partition_prune_mode +dynamic +select @@session.tidb_partition_prune_mode; +@@session.tidb_partition_prune_mode +dynamic +set @@session.tidb_partition_prune_mode = "static"; +show warnings; +Level Code Message +set @@global.tidb_partition_prune_mode = "static"; +show warnings; +Level Code Message +select @@session.tidb_partition_prune_mode; +@@session.tidb_partition_prune_mode +static +show warnings; +Level Code Message +select @@global.tidb_partition_prune_mode; +@@global.tidb_partition_prune_mode +static +set @@session.tidb_partition_prune_mode = "dynamic"; +show warnings; +Level Code Message +Warning 1105 Please analyze all partition tables again for consistency between partition and global stats +Warning 1105 Please avoid setting partition prune mode to dynamic at session level and set partition prune mode to dynamic at global level +set @@global.tidb_partition_prune_mode = "dynamic"; +show warnings; +Level Code Message +Warning 1105 Please analyze all partition tables again for consistency between partition and global stats +select @@global.tidb_partition_prune_mode; +@@global.tidb_partition_prune_mode +dynamic +select @@session.tidb_partition_prune_mode; +@@session.tidb_partition_prune_mode +dynamic +set @@session.tidb_partition_prune_mode = DEFAULT; +set @@global.tidb_partition_prune_mode = DEFAULT; +drop table if exists pt; +create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( +partition p0 values less than (4), +partition p1 values less than (7), +partition p2 values less than (10)); +analyze table pt; +insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null); +select * from pt; +id c +NULL NULL +0 0 +2 2 +4 4 +6 6 +7 7 +9 9 +select * from pt where c > 10; +id c +select * from pt where c > 8; +id c +9 9 +select * from pt where c < 2 or c >= 9; +id c +0 0 +9 9 +select c from pt; +c +NULL +0 +2 +4 +6 +7 +9 +select c from pt where c > 10; +c +select c from pt where c > 8; +c +9 +select c from pt where c < 2 or c >= 9; +c +0 +9 +select /*+ use_index(pt, i_id) */ * from pt; +id c +NULL NULL +0 0 +2 2 +4 4 +6 6 +7 7 +9 9 +select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10; +id c +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8; +id c +9 9 +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9; +id c +0 0 +9 9 +set @@tidb_enable_index_merge = 1; +select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7; +id c +0 0 +2 2 +4 4 +6 6 +set @@tidb_enable_index_merge = DEFAULT; +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( +partition p0 values less than (4), +partition p1 values less than (7), +partition p2 values less than (10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +id c id +4 4 4 +9 9 9 +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +id +4 +9 +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by list (c) ( +partition p0 values in (1,2,3,4), +partition p1 values in (5,6,7), +partition p2 values in (8, 9,10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +id c id +4 4 4 +9 9 9 +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +id +4 +9 +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by hash(c) partitions 5; +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +id c id +4 4 4 +9 9 9 +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +id +4 +9 +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue); +create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4; +insert into t1 values (10, 'interesting neumann'); +insert into t2 select * from t1; +begin; +insert into t2 values (11, 'hopeful hoover'); +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +c_int c_str c_int c_str +10 interesting neumann 10 interesting neumann +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +c_int c_str c_int c_str +10 interesting neumann 10 interesting neumann +commit; +drop table if exists t; +create table t(c_int int); +insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9); +DROP TABLE IF EXISTS `t1`; +CREATE TABLE t1 ( +c_int int NOT NULL, +c_str varchar(40) NOT NULL, +c_datetime datetime NOT NULL, +c_timestamp timestamp NULL DEFAULT NULL, +c_double double DEFAULT NULL, +c_decimal decimal(12,6) DEFAULT NULL, +PRIMARY KEY (c_int,c_str,c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci +PARTITION BY RANGE (c_int) +(PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB, +PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB, +PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB, +PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB, +PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB, +PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB, +PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB, +PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB); +INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000); +begin; +insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088); +select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int; +c_int +1 +2 +3 +4 +5 +6 +7 +8 +9 +rollback; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)); +create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524); +insert into t2 select * from t1; +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +c_int c_str c_decimal +1 romantic robinson 4.436000 +2 stoic chaplygin 9.826000 +3 vibrant shamir 6.300000 +4 hungry wilson 4.900000 +5 naughty swartz 9.524000 +set @@tidb_partition_prune_mode='static'; +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +c_int c_str c_decimal +1 romantic robinson 4.436000 +2 stoic chaplygin 9.826000 +3 vibrant shamir 6.300000 +4 hungry wilson 4.900000 +5 naughty swartz 9.524000 +set @@tidb_partition_prune_mode=default; +drop table if exists coverage_rr, coverage_dt; +create table coverage_rr ( +pk1 varchar(35) NOT NULL, +pk2 int NOT NULL, +c int, +PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4; +create table coverage_dt (pk1 varchar(35), pk2 int); +insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1); +insert into coverage_dt values ('apple',3),('ios',3),('linux',5); +set @@tidb_partition_prune_mode = 'dynamic'; +select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +pk1 pk2 pk1 pk2 c +ios 3 ios 3 2 +linux 5 linux 5 1 +select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +pk1 pk2 pk1 pk2 c +ios 3 ios 3 2 +linux 5 linux 5 1 +set @@tidb_partition_prune_mode = default; +drop table if exists tunsigned_hash; +create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6; +insert into tunsigned_hash values(25), (9279808998424041135); +select min(a) from tunsigned_hash; +min(a) +25 +select max(a) from tunsigned_hash; +max(a) +9279808998424041135 +drop table if exists t, t1; +create table t (id int not null, store_id int not null )partition by range (store_id)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1(id int not null, store_id int not null); +insert into t values (1, 1); +insert into t values (2, 17); +insert into t1 values (0, 18); +alter table t exchange partition p3 with table t1; +alter table t add index idx(id); +analyze table t; +select *,_tidb_rowid from t use index(idx) order by id limit 2; +id store_id _tidb_rowid +0 18 1 +1 1 1 +drop table t, t1; +create table t (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))partition by range (b)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1 (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c)); +insert into t values (1,2,3), (2,3,4), (3,4,5); +insert into t1 values (1,18,3); +alter table t exchange partition p3 with table t1; +analyze table t; +select * from t where a = 1 or b = 5 order by c limit 2; +a b c +1 18 3 +1 2 3 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +analyze table t; +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,primary key(`a`),KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, a limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +analyze table t; +select * from t use index(idx_b) order by b, a limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +a b c +3 2 2 +1 1 1 +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +a b c +3 2 2 +1 1 1 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`),PRIMARY KEY (`a`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; +a b c +1 1 1 +3 2 2 +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; +a b c +1 1 1 +3 2 2 +drop table if exists trange, thash; +create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( +partition p0 values less than(300), +partition p1 values less than(500), +partition p2 values less than(1100)); +create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4; +analyze table thash, trange; +explain format='brief' select * from trange where a>400; +id estRows task access object operator info +TableReader 3333.33 root partition:p1,p2 data:TableRangeScan +└─TableRangeScan 3333.33 cop[tikv] table:trange range:(400,+inf], keep order:false, stats:pseudo +explain format='brief' select * from thash where a>=100; +id estRows task access object operator info +TableReader 3333.33 root partition:all data:TableRangeScan +└─TableRangeScan 3333.33 cop[tikv] table:thash range:[100,+inf], keep order:false, stats:pseudo +drop table if exists t; +set @@tidb_partition_prune_mode = 'dynamic'; +create table t(a int) partition by range(a) ( +partition p0 values less than (5), +partition p1 values less than (10), +partition p2 values less than (15)); +insert into t values (2), (7), (12); +analyze table t; +explain format='brief' select * from t where a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__table.t.a, 3) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 3; +a +2 +explain format='brief' select * from t where a < 8; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__table.t.a, 8) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 8; +a +2 +7 +explain format='brief' select * from t where a < 20; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__table.t.a, 20) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 20; +a +12 +2 +7 +alter table t drop partition p0; +explain format='brief' select * from t where a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__table.t.a, 3) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 3; +a +explain format='brief' select * from t where a < 8; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__table.t.a, 8) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 8; +a +7 +explain format='brief' select * from t where a < 20; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__table.t.a, 20) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 20; +a +12 +7 +alter table t add partition (partition p3 values less than (20)); +alter table t add partition (partition p4 values less than (40)); +insert into t values (15), (25); +explain format='brief' select * from t where a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__table.t.a, 3) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 3; +a +explain format='brief' select * from t where a < 8; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__table.t.a, 8) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 8; +a +7 +explain format='brief' select * from t where a < 20; +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2,p3 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__table.t.a, 20) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 20; +a +12 +15 +7 +drop table if exists t; +create table t(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +analyze table t; +set @@tidb_partition_prune_mode = 'static'; +begin; +explain format='brief' select * from t; +id estRows task access object operator info +PartitionUnion 30000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t, partition:p0 keep order:false, stats:pseudo +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t, partition:p1 keep order:false, stats:pseudo +└─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t, partition:p2 keep order:false, stats:pseudo +select * from t; +a b +explain format='brief' select * from t where a > 3; +id estRows task access object operator info +PartitionUnion 6666.67 root +├─TableReader 3333.33 root data:Selection +│ └─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 3) +│ └─TableFullScan 10000.00 cop[tikv] table:t, partition:p1 keep order:false, stats:pseudo +└─TableReader 3333.33 root data:Selection + └─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 3) + └─TableFullScan 10000.00 cop[tikv] table:t, partition:p2 keep order:false, stats:pseudo +select * from t where a > 3; +a b +explain format='brief' select * from t where a > 7; +id estRows task access object operator info +TableReader 3333.33 root data:Selection +└─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 7) + └─TableFullScan 10000.00 cop[tikv] table:t, partition:p2 keep order:false, stats:pseudo +select * from t where a > 7; +a b +rollback; +set @@tidb_partition_prune_mode = 'dynamic'; +begin; +explain format='brief' select * from t; +id estRows task access object operator info +TableReader 10000.00 root partition:all data:TableFullScan +└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t; +a b +explain format='brief' select * from t where a > 3; +id estRows task access object operator info +TableReader 3333.33 root partition:p1,p2 data:Selection +└─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 3) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t where a > 3; +a b +explain format='brief' select * from t where a > 7; +id estRows task access object operator info +TableReader 3333.33 root partition:p2 data:Selection +└─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 7) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t where a > 7; +a b +rollback; +set @@tidb_partition_prune_mode = default; +drop table if exists tt1, tt2; +set global tidb_partition_prune_mode='dynamic'; +set session tidb_partition_prune_mode='dynamic'; +CREATE TABLE tt1 ( +id INT NOT NULL, +listid INT, +name varchar(10), +primary key (listid) clustered +) +PARTITION BY LIST (listid) ( +PARTITION p1 VALUES IN (1), +PARTITION p2 VALUES IN (2), +PARTITION p3 VALUES IN (3), +PARTITION p4 VALUES IN (4) +); +CREATE TABLE tt2 ( +id INT NOT NULL, +listid INT +); +create index idx_listid on tt1(id,listid); +create index idx_listid on tt2(listid); +insert into tt1 values(1,1,1); +insert into tt1 values(2,2,2); +insert into tt1 values(3,3,3); +insert into tt1 values(4,4,4); +insert into tt2 values(1,1); +insert into tt2 values(2,2); +insert into tt2 values(3,3); +insert into tt2 values(4,4); +insert into tt2 values(5,5); +analyze table tt1; +analyze table tt2; +select /*+ inl_join(tt1)*/ count(*) from tt2 +left join tt1 on tt1.listid=tt2.listid and tt1.id=tt2.id; +count(*) +5 +select /*+ inl_join(tt1)*/ count(*) from tt2 +left join tt1 on tt1.listid=tt2.listid; +count(*) +5 +explain format = 'brief' select /*+ inl_join(tt1)*/ count(*) from tt2 +left join tt1 on tt1.listid=tt2.listid; +id estRows task access object operator info +StreamAgg 1.00 root funcs:count(Column#13)->Column#7 +└─IndexReader 1.00 root index:StreamAgg + └─StreamAgg 1.00 cop[tikv] funcs:count(1)->Column#13 + └─IndexFullScan 5.00 cop[tikv] table:tt2, index:idx_listid(listid) keep order:false +set global tidb_partition_prune_mode=default; +set session tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/t/executor/partition/issues.test b/tests/integrationtest/t/executor/partition/issues.test new file mode 100644 index 0000000000000..5ffd753115768 --- /dev/null +++ b/tests/integrationtest/t/executor/partition/issues.test @@ -0,0 +1,255 @@ +# TestIssue25527 +drop table if exists t, t0, t1, t2; +set @@tidb_partition_prune_mode = 'dynamic'; +set @@session.tidb_enable_list_partition = ON; +CREATE TABLE t ( + col1 tinyint(4) primary key +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) +PARTITIONS 6; +insert into t values(-128), (107); +prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'; +set @a=-128, @b=107, @c=-128; +--sorted_result +execute stmt using @a,@b,@c; +CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2; +insert into t0 values (1); +select a from t0 where a in (1); +create table t1 (a int primary key) partition by range (a+5) ( + partition p0 values less than(10), partition p1 values less than(20)); +insert into t1 values (5); +select a from t1 where a in (5); +create table t2 (a int primary key) partition by list (a+5) ( + partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12)); +insert into t2 values (5); +select a from t2 where a in (5); +set @@tidb_partition_prune_mode = default; +set @@session.tidb_enable_list_partition = default; + +# TestIssue25598 +drop table if exists UK_HP16726; +CREATE TABLE UK_HP16726 ( + COL1 bigint(16) DEFAULT NULL, + COL2 varchar(20) DEFAULT NULL, + COL4 datetime DEFAULT NULL, + COL3 bigint(20) DEFAULT NULL, + COL5 float DEFAULT NULL, + UNIQUE KEY UK_COL1 (COL1) /*!80000 INVISIBLE */ + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin + PARTITION BY HASH( COL1 ) + PARTITIONS 25; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +set @@tidb_partition_prune_mode = 'dynamic'; +analyze table UK_HP16726; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +set @@tidb_partition_prune_mode = default; + +# TestIssue25253 +drop table if exists IDT_HP23902, t; +CREATE TABLE IDT_HP23902 ( + COL1 smallint DEFAULT NULL, + COL2 varchar(20) DEFAULT NULL, + COL4 datetime DEFAULT NULL, + COL3 bigint DEFAULT NULL, + COL5 float DEFAULT NULL, + KEY UK_COL1 (COL1) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH( COL1+30 ) +PARTITIONS 6; +insert ignore into IDT_HP23902 partition(p0, p1)(col1, col3) values(-10355, 1930590137900568573), (13810, -1332233145730692137); +show warnings; +select * from IDT_HP23902; +create table t ( + a int +) partition by range(a) ( + partition p0 values less than (10), + partition p1 values less than (20)); +insert ignore into t partition(p0)(a) values(12); +show warnings; +select * from t; + +# TestIssue25030 +drop table if exists tbl_936; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_936 ( + col_5410 smallint NOT NULL, + col_5411 double, + col_5412 boolean NOT NULL DEFAULT 1, + col_5413 set('Alice', 'Bob', 'Charlie', 'David') NOT NULL DEFAULT 'Charlie', + col_5414 varbinary(147) COLLATE 'binary' DEFAULT 'bvpKgYWLfyuTiOYSkj', + col_5415 timestamp NOT NULL DEFAULT '2021-07-06', + col_5416 decimal(6, 6) DEFAULT 0.49, + col_5417 text COLLATE utf8_bin, + col_5418 float DEFAULT 2048.0762299371554, + col_5419 int UNSIGNED NOT NULL DEFAULT 3152326370, + PRIMARY KEY (col_5419) ) + PARTITION BY HASH (col_5419) PARTITIONS 3; +SELECT last_value(col_5414) OVER w FROM tbl_936 + WINDOW w AS (ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419) + ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419, nth_value(col_5412, 5) OVER w; +set @@tidb_partition_prune_mode = default; + +# TestIssue24636 +drop table if exists t; +CREATE TABLE t (a int, b date, c int, PRIMARY KEY (a,b)) +PARTITION BY RANGE ( TO_DAYS(b) ) ( + PARTITION p0 VALUES LESS THAN (737821), + PARTITION p1 VALUES LESS THAN (738289) +); +INSERT INTO t (a, b, c) VALUES(0, '2021-05-05', 0); +select c from t use index(primary) where a=0 limit 1; +CREATE TABLE test_partition ( + a varchar(100) NOT NULL, + b date NOT NULL, + c varchar(100) NOT NULL, + d datetime DEFAULT NULL, + e datetime DEFAULT NULL, + f bigint(20) DEFAULT NULL, + g bigint(20) DEFAULT NULL, + h bigint(20) DEFAULT NULL, + i bigint(20) DEFAULT NULL, + j bigint(20) DEFAULT NULL, + k bigint(20) DEFAULT NULL, + l bigint(20) DEFAULT NULL, + PRIMARY KEY (a,b,c) /*T![clustered_index] NONCLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY RANGE ( TO_DAYS(b) ) ( + PARTITION pmin VALUES LESS THAN (737821), + PARTITION p20200601 VALUES LESS THAN (738289)); +INSERT INTO test_partition (a, b, c, d, e, f, g, h, i, j, k, l) VALUES('aaa', '2021-05-05', '428ff6a1-bb37-42ac-9883-33d7a29961e6', '2021-05-06 08:13:38', '2021-05-06 13:28:08', 0, 8, 3, 0, 9, 1, 0); +select c,j,l from test_partition where c='428ff6a1-bb37-42ac-9883-33d7a29961e6' and a='aaa' limit 0, 200; + +# TestIssue25309 +drop table if exists tbl_500, tbl_600; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_500 ( + col_20 tinyint(4) NOT NULL, + col_21 varchar(399) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, + col_22 json DEFAULT NULL, + col_23 blob DEFAULT NULL, + col_24 mediumint(9) NOT NULL, + col_25 float NOT NULL DEFAULT '7306.384497585912', + col_26 binary(196) NOT NULL, + col_27 timestamp DEFAULT '1976-12-08 00:00:00', + col_28 bigint(20) NOT NULL, + col_29 tinyint(1) NOT NULL DEFAULT '1', + PRIMARY KEY (col_29,col_20) /*T![clustered_index] NONCLUSTERED */, + KEY idx_7 (col_28,col_20,col_26,col_27,col_21,col_24), + KEY idx_8 (col_25,col_29,col_24) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE tbl_600 ( + col_60 int(11) NOT NULL DEFAULT '-776833487', + col_61 tinyint(1) NOT NULL DEFAULT '1', + col_62 tinyint(4) NOT NULL DEFAULT '-125', + PRIMARY KEY (col_62,col_60,col_61) /*T![clustered_index] NONCLUSTERED */, + KEY idx_19 (col_60) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci + PARTITION BY HASH( col_60 ) + PARTITIONS 1; +insert into tbl_500 select -34, 'lrfGPPPUuZjtT', '{"obj1": {"sub_obj0": 100}}', 0x6C47636D, 1325624, 7306.3843, 'abc', '1976-12-08', 4757891479624162031, 0; +select tbl_5.* from tbl_500 tbl_5 where col_24 in ( select col_62 from tbl_600 where tbl_5.col_26 < 'hSvHLdQeGBNIyOFXStV' ); +set @@tidb_partition_prune_mode = default; + +# TestIssue20028 +drop table if exists t1, t2; +set @@tidb_partition_prune_mode='static-only'; +create table t1 (c_datetime datetime, primary key (c_datetime)) +partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')), +partition p1 values less than (to_days('2020-04-01')), +partition p2 values less than (to_days('2020-06-01')), +partition p3 values less than maxvalue); +create table t2 (c_datetime datetime, unique key(c_datetime)); +insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58'); +insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18'); +begin; +--sorted_result +select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update; +rollback; +set @@tidb_partition_prune_mode = default; + +# TestIssue21731 +drop table if exists p, t; +set @@tidb_enable_list_partition = OFF; +create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2)); +set @@tidb_enable_list_partition = default; + +# TestIssue25528 +drop table if exists issue25528; +set @@tidb_partition_prune_mode = 'static'; +create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8; +insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20'); +begin pessimistic; +select * from issue25528 where id = 1 for update; +drop table if exists issue25528; +CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10; +INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4); +select * from issue25528 where c1 in (3, 4) order by c2 for update; +rollback; +set @@tidb_enable_list_partition = default; + +# TestIssue27346 +set @@tidb_enable_index_merge=1,@@tidb_partition_prune_mode='dynamic'; +DROP TABLE IF EXISTS `tbl_18`; +CREATE TABLE `tbl_18` (`col_119` binary(16) NOT NULL DEFAULT 'skPoKiwYUi',`col_120` int(10) unsigned NOT NULL,`col_121` timestamp NOT NULL,`col_122` double NOT NULL DEFAULT '3937.1887880628115',`col_123` bigint(20) NOT NULL DEFAULT '3550098074891542725',PRIMARY KEY (`col_123`,`col_121`,`col_122`,`col_120`) CLUSTERED,UNIQUE KEY `idx_103` (`col_123`,`col_119`,`col_120`),UNIQUE KEY `idx_104` (`col_122`,`col_120`),UNIQUE KEY `idx_105` (`col_119`,`col_120`),KEY `idx_106` (`col_121`,`col_120`,`col_122`,`col_119`),KEY `idx_107` (`col_121`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PARTITION BY HASH( `col_120` ) PARTITIONS 3; +INSERT INTO tbl_18 (`col_119`, `col_120`, `col_121`, `col_122`, `col_123`) VALUES (X'736b506f4b6977595569000000000000', 672436701, '1974-02-24 00:00:00', 3937.1887880628115e0, -7373106839136381229), (X'736b506f4b6977595569000000000000', 2637316689, '1993-10-29 00:00:00', 3937.1887880628115e0, -4522626077860026631), (X'736b506f4b6977595569000000000000', 831809724, '1995-11-20 00:00:00', 3937.1887880628115e0, -4426441253940231780), (X'736b506f4b6977595569000000000000', 1588592628, '2001-03-28 00:00:00', 3937.1887880628115e0, 1329207475772244999), (X'736b506f4b6977595569000000000000', 3908038471, '2031-06-06 00:00:00', 3937.1887880628115e0, -6562815696723135786), (X'736b506f4b6977595569000000000000', 1674237178, '2001-10-24 00:00:00', 3937.1887880628115e0, -6459065549188938772), (X'736b506f4b6977595569000000000000', 3507075493, '2010-03-25 00:00:00', 3937.1887880628115e0, -4329597025765326929), (X'736b506f4b6977595569000000000000', 1276461709, '2019-07-20 00:00:00', 3937.1887880628115e0, 3550098074891542725); +--sorted_result +select col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +--sorted_result +select /*+ use_index_merge( tbl_18 ) */ col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +set @@tidb_enable_index_merge=default,@@tidb_partition_prune_mode=default; + +# TestIssue35181 +drop table if exists t; +CREATE TABLE `t` (`a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL) PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN (2021), PARTITION `p1` VALUES LESS THAN (3000)); +set @@tidb_partition_prune_mode = 'static'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = 'dynamic'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = default; + +# TestIssue39999 +set @@tidb_opt_advanced_join_hint=0; +drop table if exists c, t; +CREATE TABLE `c` (`serial_id` varchar(24),`occur_trade_date` date,`txt_account_id` varchar(24),`capital_sub_class` varchar(10),`occur_amount` decimal(16,2),`broker` varchar(10),PRIMARY KEY (`txt_account_id`,`occur_trade_date`,`serial_id`) /*T![clustered_index] CLUSTERED */,KEY `idx_serial_id` (`serial_id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci PARTITION BY RANGE COLUMNS(`serial_id`) (PARTITION `p202209` VALUES LESS THAN ('20221001'),PARTITION `p202210` VALUES LESS THAN ('20221101'),PARTITION `p202211` VALUES LESS THAN ('20221201')); +CREATE TABLE `t` ( `txn_account_id` varchar(24), `account_id` varchar(32), `broker` varchar(10), PRIMARY KEY (`txn_account_id`) /*T![clustered_index] CLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; +INSERT INTO `c` (serial_id, txt_account_id, capital_sub_class, occur_trade_date, occur_amount, broker) VALUES ('2022111700196920','04482786','CUST','2022-11-17',-2.01,'0009'); +INSERT INTO `t` VALUES ('04482786','1142927','0009'); +set tidb_partition_prune_mode='dynamic'; +analyze table c; +analyze table t; +explain select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17'; +select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17'; +alter table t add column serial_id varchar(24) default '2022111700196920'; +select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +explain select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +set @@tidb_opt_advanced_join_hint=default; +set tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/t/executor/partition/partition_boundaries.test b/tests/integrationtest/t/executor/partition/partition_boundaries.test new file mode 100644 index 0000000000000..7194aac64c3ab --- /dev/null +++ b/tests/integrationtest/t/executor/partition/partition_boundaries.test @@ -0,0 +1,1577 @@ +# TestRangePartitionBoundariesEq +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a = -2147483648; +--sorted_result +SELECT * FROM t WHERE a = -2147483648; +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648); +--sorted_result +SELECT * FROM t WHERE a IN (-2147483648); +explain format='brief' SELECT * FROM t WHERE a = 0; +--sorted_result +SELECT * FROM t WHERE a = 0; +explain format='brief' SELECT * FROM t WHERE a IN (0); +--sorted_result +SELECT * FROM t WHERE a IN (0); +explain format='brief' SELECT * FROM t WHERE a = 999998; +--sorted_result +SELECT * FROM t WHERE a = 999998; +explain format='brief' SELECT * FROM t WHERE a IN (999998); +--sorted_result +SELECT * FROM t WHERE a IN (999998); +explain format='brief' SELECT * FROM t WHERE a = 999999; +--sorted_result +SELECT * FROM t WHERE a = 999999; +explain format='brief' SELECT * FROM t WHERE a IN (999999); +--sorted_result +SELECT * FROM t WHERE a IN (999999); +explain format='brief' SELECT * FROM t WHERE a = 1000000; +--sorted_result +SELECT * FROM t WHERE a = 1000000; +explain format='brief' SELECT * FROM t WHERE a IN (1000000); +--sorted_result +SELECT * FROM t WHERE a IN (1000000); +explain format='brief' SELECT * FROM t WHERE a = 1000001; +--sorted_result +SELECT * FROM t WHERE a = 1000001; +explain format='brief' SELECT * FROM t WHERE a IN (1000001); +--sorted_result +SELECT * FROM t WHERE a IN (1000001); +explain format='brief' SELECT * FROM t WHERE a = 1000002; +--sorted_result +SELECT * FROM t WHERE a = 1000002; +explain format='brief' SELECT * FROM t WHERE a IN (1000002); +--sorted_result +SELECT * FROM t WHERE a IN (1000002); +explain format='brief' SELECT * FROM t WHERE a = 3000000; +--sorted_result +SELECT * FROM t WHERE a = 3000000; +explain format='brief' SELECT * FROM t WHERE a IN (3000000); +--sorted_result +SELECT * FROM t WHERE a IN (3000000); +explain format='brief' SELECT * FROM t WHERE a = 3000001; +--sorted_result +SELECT * FROM t WHERE a = 3000001; +explain format='brief' SELECT * FROM t WHERE a IN (3000001); +--sorted_result +SELECT * FROM t WHERE a IN (3000001); +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +--sorted_result +SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +explain format='brief' SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +--sorted_result +SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +explain format='brief' SELECT * FROM t WHERE a IN (999997, 999998, 999999); +--sorted_result +SELECT * FROM t WHERE a IN (999997, 999998, 999999); +explain format='brief' SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +--sorted_result +SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +explain format='brief' SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +--sorted_result +SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +explain format='brief' SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +--sorted_result +SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +explain format='brief' SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +--sorted_result +SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +explain format='brief' SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +--sorted_result +SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +explain format='brief' SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +--sorted_result +SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +explain format='brief' SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +--sorted_result +SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +explain format='brief' SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +--sorted_result +SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +explain format='brief' SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +--sorted_result +SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +explain format='brief' SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +--sorted_result +SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +explain format='brief' SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +--sorted_result +SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +SET @@tidb_partition_prune_mode = default; + +# TestRangePartitionBoundariesNe +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a != -1; +--sorted_result +SELECT * FROM t WHERE a != -1; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1; +explain format='brief' SELECT * FROM t WHERE a != 0; +--sorted_result +SELECT * FROM t WHERE a != 0; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +explain format='brief' SELECT * FROM t WHERE a != 1; +--sorted_result +SELECT * FROM t WHERE a != 1; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +explain format='brief' SELECT * FROM t WHERE a != 2; +--sorted_result +SELECT * FROM t WHERE a != 2; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +explain format='brief' SELECT * FROM t WHERE a != 3; +--sorted_result +SELECT * FROM t WHERE a != 3; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +explain format='brief' SELECT * FROM t WHERE a != 4; +--sorted_result +SELECT * FROM t WHERE a != 4; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +explain format='brief' SELECT * FROM t WHERE a != 5; +--sorted_result +SELECT * FROM t WHERE a != 5; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +explain format='brief' SELECT * FROM t WHERE a != 6; +--sorted_result +SELECT * FROM t WHERE a != 6; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +explain format='brief' SELECT * FROM t WHERE a != 7; +--sorted_result +SELECT * FROM t WHERE a != 7; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +SET @@tidb_partition_prune_mode = default; + +# TestRangePartitionBoundariesBetweenM +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND -1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND -1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 0; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 0; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 2; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 2; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 10; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 10; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; + +# TestRangePartitionBoundariesBetweenS +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND -1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND -1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -1 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -1 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 0; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 0; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 2; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 2; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 3; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 3; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 4 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 4 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 5; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 5; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 5 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 5 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 6; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 6; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 6 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 6 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 7; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 7; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 7 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 7 AND 4; + +# TestRangePartitionBoundariesLtM +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -2147483648; +--sorted_result +SELECT * FROM t WHERE a < -2147483648; +explain format='brief' SELECT * FROM t WHERE a > -2147483648; +--sorted_result +SELECT * FROM t WHERE a > -2147483648; +explain format='brief' SELECT * FROM t WHERE a <= -2147483648; +--sorted_result +SELECT * FROM t WHERE a <= -2147483648; +explain format='brief' SELECT * FROM t WHERE a >= -2147483648; +--sorted_result +SELECT * FROM t WHERE a >= -2147483648; +explain format='brief' SELECT * FROM t WHERE a < 0; +--sorted_result +SELECT * FROM t WHERE a < 0; +explain format='brief' SELECT * FROM t WHERE a > 0; +--sorted_result +SELECT * FROM t WHERE a > 0; +explain format='brief' SELECT * FROM t WHERE a <= 0; +--sorted_result +SELECT * FROM t WHERE a <= 0; +explain format='brief' SELECT * FROM t WHERE a >= 0; +--sorted_result +SELECT * FROM t WHERE a >= 0; +explain format='brief' SELECT * FROM t WHERE a < 999998; +--sorted_result +SELECT * FROM t WHERE a < 999998; +explain format='brief' SELECT * FROM t WHERE a > 999998; +--sorted_result +SELECT * FROM t WHERE a > 999998; +explain format='brief' SELECT * FROM t WHERE a <= 999998; +--sorted_result +SELECT * FROM t WHERE a <= 999998; +explain format='brief' SELECT * FROM t WHERE a >= 999998; +--sorted_result +SELECT * FROM t WHERE a >= 999998; +explain format='brief' SELECT * FROM t WHERE a < 999999; +--sorted_result +SELECT * FROM t WHERE a < 999999; +explain format='brief' SELECT * FROM t WHERE a > 999999; +--sorted_result +SELECT * FROM t WHERE a > 999999; +explain format='brief' SELECT * FROM t WHERE a <= 999999; +--sorted_result +SELECT * FROM t WHERE a <= 999999; +explain format='brief' SELECT * FROM t WHERE a >= 999999; +--sorted_result +SELECT * FROM t WHERE a >= 999999; +explain format='brief' SELECT * FROM t WHERE a < 1000000; +--sorted_result +SELECT * FROM t WHERE a < 1000000; +explain format='brief' SELECT * FROM t WHERE a > 1000000; +--sorted_result +SELECT * FROM t WHERE a > 1000000; +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +--sorted_result +SELECT * FROM t WHERE a >= 1000000; +explain format='brief' SELECT * FROM t WHERE a < 1000001; +--sorted_result +SELECT * FROM t WHERE a < 1000001; +explain format='brief' SELECT * FROM t WHERE a > 1000001; +--sorted_result +SELECT * FROM t WHERE a > 1000001; +explain format='brief' SELECT * FROM t WHERE a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a >= 1000001; +--sorted_result +SELECT * FROM t WHERE a >= 1000001; +explain format='brief' SELECT * FROM t WHERE a < 1000002; +--sorted_result +SELECT * FROM t WHERE a < 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000002; +explain format='brief' SELECT * FROM t WHERE a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a >= 1000002; +--sorted_result +SELECT * FROM t WHERE a >= 1000002; +explain format='brief' SELECT * FROM t WHERE a < 3000000; +--sorted_result +SELECT * FROM t WHERE a < 3000000; +explain format='brief' SELECT * FROM t WHERE a > 3000000; +--sorted_result +SELECT * FROM t WHERE a > 3000000; +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +--sorted_result +SELECT * FROM t WHERE a >= 3000000; +explain format='brief' SELECT * FROM t WHERE a < 3000001; +--sorted_result +SELECT * FROM t WHERE a < 3000001; +explain format='brief' SELECT * FROM t WHERE a > 3000001; +--sorted_result +SELECT * FROM t WHERE a > 3000001; +explain format='brief' SELECT * FROM t WHERE a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a >= 3000001; +--sorted_result +SELECT * FROM t WHERE a >= 3000001; +explain format='brief' SELECT * FROM t WHERE a < 999997; +--sorted_result +SELECT * FROM t WHERE a < 999997; +explain format='brief' SELECT * FROM t WHERE a > 999997; +--sorted_result +SELECT * FROM t WHERE a > 999997; +explain format='brief' SELECT * FROM t WHERE a <= 999997; +--sorted_result +SELECT * FROM t WHERE a <= 999997; +explain format='brief' SELECT * FROM t WHERE a >= 999997; +--sorted_result +SELECT * FROM t WHERE a >= 999997; +explain format='brief' SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +--sorted_result +SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +--sorted_result +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a < 999999; +--sorted_result +SELECT * FROM t WHERE a > 999997 AND a < 999999; +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +--sorted_result +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +explain format='brief' SELECT * FROM t WHERE a < 999998; +--sorted_result +SELECT * FROM t WHERE a < 999998; +explain format='brief' SELECT * FROM t WHERE a > 999998; +--sorted_result +SELECT * FROM t WHERE a > 999998; +explain format='brief' SELECT * FROM t WHERE a <= 999998; +--sorted_result +SELECT * FROM t WHERE a <= 999998; +explain format='brief' SELECT * FROM t WHERE a >= 999998; +--sorted_result +SELECT * FROM t WHERE a >= 999998; +explain format='brief' SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a < 1000000; +--sorted_result +SELECT * FROM t WHERE a > 999998 AND a < 1000000; +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a < 999999; +--sorted_result +SELECT * FROM t WHERE a < 999999; +explain format='brief' SELECT * FROM t WHERE a > 999999; +--sorted_result +SELECT * FROM t WHERE a > 999999; +explain format='brief' SELECT * FROM t WHERE a <= 999999; +--sorted_result +SELECT * FROM t WHERE a <= 999999; +explain format='brief' SELECT * FROM t WHERE a >= 999999; +--sorted_result +SELECT * FROM t WHERE a >= 999999; +explain format='brief' SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a < 1000001; +--sorted_result +SELECT * FROM t WHERE a > 999999 AND a < 1000001; +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a < 1000000; +--sorted_result +SELECT * FROM t WHERE a < 1000000; +explain format='brief' SELECT * FROM t WHERE a > 1000000; +--sorted_result +SELECT * FROM t WHERE a > 1000000; +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +--sorted_result +SELECT * FROM t WHERE a >= 1000000; +explain format='brief' SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a < 1999997; +--sorted_result +SELECT * FROM t WHERE a < 1999997; +explain format='brief' SELECT * FROM t WHERE a > 1999997; +--sorted_result +SELECT * FROM t WHERE a > 1999997; +explain format='brief' SELECT * FROM t WHERE a <= 1999997; +--sorted_result +SELECT * FROM t WHERE a <= 1999997; +explain format='brief' SELECT * FROM t WHERE a >= 1999997; +--sorted_result +SELECT * FROM t WHERE a >= 1999997; +explain format='brief' SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a < 1999998; +--sorted_result +SELECT * FROM t WHERE a < 1999998; +explain format='brief' SELECT * FROM t WHERE a > 1999998; +--sorted_result +SELECT * FROM t WHERE a > 1999998; +explain format='brief' SELECT * FROM t WHERE a <= 1999998; +--sorted_result +SELECT * FROM t WHERE a <= 1999998; +explain format='brief' SELECT * FROM t WHERE a >= 1999998; +--sorted_result +SELECT * FROM t WHERE a >= 1999998; +explain format='brief' SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +--sorted_result +SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a < 1999999; +--sorted_result +SELECT * FROM t WHERE a < 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999999; +explain format='brief' SELECT * FROM t WHERE a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a >= 1999999; +--sorted_result +SELECT * FROM t WHERE a >= 1999999; +explain format='brief' SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +--sorted_result +SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +--sorted_result +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +--sorted_result +SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +--sorted_result +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +explain format='brief' SELECT * FROM t WHERE a < 2000000; +--sorted_result +SELECT * FROM t WHERE a < 2000000; +explain format='brief' SELECT * FROM t WHERE a > 2000000; +--sorted_result +SELECT * FROM t WHERE a > 2000000; +explain format='brief' SELECT * FROM t WHERE a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a >= 2000000; +--sorted_result +SELECT * FROM t WHERE a >= 2000000; +explain format='brief' SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +--sorted_result +SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +--sorted_result +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +--sorted_result +SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +--sorted_result +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +explain format='brief' SELECT * FROM t WHERE a < 2999997; +--sorted_result +SELECT * FROM t WHERE a < 2999997; +explain format='brief' SELECT * FROM t WHERE a > 2999997; +--sorted_result +SELECT * FROM t WHERE a > 2999997; +explain format='brief' SELECT * FROM t WHERE a <= 2999997; +--sorted_result +SELECT * FROM t WHERE a <= 2999997; +explain format='brief' SELECT * FROM t WHERE a >= 2999997; +--sorted_result +SELECT * FROM t WHERE a >= 2999997; +explain format='brief' SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a < 2999998; +--sorted_result +SELECT * FROM t WHERE a < 2999998; +explain format='brief' SELECT * FROM t WHERE a > 2999998; +--sorted_result +SELECT * FROM t WHERE a > 2999998; +explain format='brief' SELECT * FROM t WHERE a <= 2999998; +--sorted_result +SELECT * FROM t WHERE a <= 2999998; +explain format='brief' SELECT * FROM t WHERE a >= 2999998; +--sorted_result +SELECT * FROM t WHERE a >= 2999998; +explain format='brief' SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +--sorted_result +SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a < 2999999; +--sorted_result +SELECT * FROM t WHERE a < 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999999; +explain format='brief' SELECT * FROM t WHERE a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a >= 2999999; +--sorted_result +SELECT * FROM t WHERE a >= 2999999; +explain format='brief' SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +--sorted_result +SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a < 3000000; +--sorted_result +SELECT * FROM t WHERE a < 3000000; +explain format='brief' SELECT * FROM t WHERE a > 3000000; +--sorted_result +SELECT * FROM t WHERE a > 3000000; +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +--sorted_result +SELECT * FROM t WHERE a >= 3000000; +explain format='brief' SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +--sorted_result +SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +--sorted_result +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +--sorted_result +SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +--sorted_result +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +set @@tidb_partition_prune_mode = default; + +# TestRangePartitionBoundariesLtS +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -1; +--sorted_result +SELECT * FROM t WHERE a < -1; +explain format='brief' SELECT * FROM t WHERE a > -1; +--sorted_result +SELECT * FROM t WHERE a > -1; +explain format='brief' SELECT * FROM t WHERE a <= -1; +--sorted_result +SELECT * FROM t WHERE a <= -1; +explain format='brief' SELECT * FROM t WHERE a >= -1; +--sorted_result +SELECT * FROM t WHERE a >= -1; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > -1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > -1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < -1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < -1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= -1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= -1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < -1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < -1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > -1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > -1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= -1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= -1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= -1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= -1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= -1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= -1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +explain format='brief' SELECT * FROM t WHERE a < 0; +--sorted_result +SELECT * FROM t WHERE a < 0; +explain format='brief' SELECT * FROM t WHERE a > 0; +--sorted_result +SELECT * FROM t WHERE a > 0; +explain format='brief' SELECT * FROM t WHERE a <= 0; +--sorted_result +SELECT * FROM t WHERE a <= 0; +explain format='brief' SELECT * FROM t WHERE a >= 0; +--sorted_result +SELECT * FROM t WHERE a >= 0; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 0; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 0; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 0; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 0; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 0; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 0; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 0; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 0; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 0; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 0; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 0; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 0; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 0; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 0; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 0; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 0; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +explain format='brief' SELECT * FROM t WHERE a < 1; +--sorted_result +SELECT * FROM t WHERE a < 1; +explain format='brief' SELECT * FROM t WHERE a > 1; +--sorted_result +SELECT * FROM t WHERE a > 1; +explain format='brief' SELECT * FROM t WHERE a <= 1; +--sorted_result +SELECT * FROM t WHERE a <= 1; +explain format='brief' SELECT * FROM t WHERE a >= 1; +--sorted_result +SELECT * FROM t WHERE a >= 1; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +explain format='brief' SELECT * FROM t WHERE a < 2; +--sorted_result +SELECT * FROM t WHERE a < 2; +explain format='brief' SELECT * FROM t WHERE a > 2; +--sorted_result +SELECT * FROM t WHERE a > 2; +explain format='brief' SELECT * FROM t WHERE a <= 2; +--sorted_result +SELECT * FROM t WHERE a <= 2; +explain format='brief' SELECT * FROM t WHERE a >= 2; +--sorted_result +SELECT * FROM t WHERE a >= 2; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 2; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 2; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 2; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 2; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 2; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 2; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 2; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 2; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 2; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 2; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 2; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 2; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 2; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 2; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 2; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 2; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +explain format='brief' SELECT * FROM t WHERE a < 3; +--sorted_result +SELECT * FROM t WHERE a < 3; +explain format='brief' SELECT * FROM t WHERE a > 3; +--sorted_result +SELECT * FROM t WHERE a > 3; +explain format='brief' SELECT * FROM t WHERE a <= 3; +--sorted_result +SELECT * FROM t WHERE a <= 3; +explain format='brief' SELECT * FROM t WHERE a >= 3; +--sorted_result +SELECT * FROM t WHERE a >= 3; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 3; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 3; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 3; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 3; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 3; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 3; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 3; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 3; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 3; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 3; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 3; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 3; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 3; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 3; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 3; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 3; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +explain format='brief' SELECT * FROM t WHERE a < 4; +--sorted_result +SELECT * FROM t WHERE a < 4; +explain format='brief' SELECT * FROM t WHERE a > 4; +--sorted_result +SELECT * FROM t WHERE a > 4; +explain format='brief' SELECT * FROM t WHERE a <= 4; +--sorted_result +SELECT * FROM t WHERE a <= 4; +explain format='brief' SELECT * FROM t WHERE a >= 4; +--sorted_result +SELECT * FROM t WHERE a >= 4; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 4; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 4; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 4; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 4; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 4; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 4; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 4; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 4; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 4; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 4; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 4; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 4; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 4; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 4; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 4; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 4; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +explain format='brief' SELECT * FROM t WHERE a < 5; +--sorted_result +SELECT * FROM t WHERE a < 5; +explain format='brief' SELECT * FROM t WHERE a > 5; +--sorted_result +SELECT * FROM t WHERE a > 5; +explain format='brief' SELECT * FROM t WHERE a <= 5; +--sorted_result +SELECT * FROM t WHERE a <= 5; +explain format='brief' SELECT * FROM t WHERE a >= 5; +--sorted_result +SELECT * FROM t WHERE a >= 5; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 5; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 5; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 5; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 5; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 5; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 5; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 5; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 5; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 5; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 5; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 5; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 5; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 5; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 5; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 5; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 5; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +explain format='brief' SELECT * FROM t WHERE a < 6; +--sorted_result +SELECT * FROM t WHERE a < 6; +explain format='brief' SELECT * FROM t WHERE a > 6; +--sorted_result +SELECT * FROM t WHERE a > 6; +explain format='brief' SELECT * FROM t WHERE a <= 6; +--sorted_result +SELECT * FROM t WHERE a <= 6; +explain format='brief' SELECT * FROM t WHERE a >= 6; +--sorted_result +SELECT * FROM t WHERE a >= 6; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 6; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 6; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 6; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 6; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 6; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 6; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 6; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 6; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 6; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 6; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 6; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 6; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 6; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 6; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 6; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 6; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +explain format='brief' SELECT * FROM t WHERE a < 7; +--sorted_result +SELECT * FROM t WHERE a < 7; +explain format='brief' SELECT * FROM t WHERE a > 7; +--sorted_result +SELECT * FROM t WHERE a > 7; +explain format='brief' SELECT * FROM t WHERE a <= 7; +--sorted_result +SELECT * FROM t WHERE a <= 7; +explain format='brief' SELECT * FROM t WHERE a >= 7; +--sorted_result +SELECT * FROM t WHERE a >= 7; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 7; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 7; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 7; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 7; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 7; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 7; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 7; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 7; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 7; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 7; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 7; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 7; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 7; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 7; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 7; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 7; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +set @@tidb_partition_prune_mode = default; + diff --git a/tests/integrationtest/t/executor/partition/partition_with_expression.test b/tests/integrationtest/t/executor/partition/partition_with_expression.test new file mode 100644 index 0000000000000..dc8efe0b37369 --- /dev/null +++ b/tests/integrationtest/t/executor/partition/partition_with_expression.test @@ -0,0 +1,454 @@ +# TestDateColWithUnequalExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +create table t(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +insert into tp values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +insert into t values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a != '2024-01-01 01:01:01'; +--sorted_result +select * from tp where a != '2024-01-01 01:01:01'; +--sorted_result +select * from t where a != '2024-01-01 01:01:01'; +explain format='brief' select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +--sorted_result +select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +--sorted_result +select * from t where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +set tidb_partition_prune_mode=default; + +# TestWeekdayWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(3), partition p1 values less than(5), partition p2 values less than(8)); +create table t(a datetime, b int); +insert into tp values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +insert into t values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a = '2020-08-17 00:00:00'; +--sorted_result +select * from tp where a = '2020-08-17 00:00:00'; +--sorted_result +select * from t where a = '2020-08-17 00:00:00'; +explain format='brief' select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +--sorted_result +select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +--sorted_result +select * from t where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +explain format='brief' select * from tp where a < '2020-08-19 00:00:00'; +--sorted_result +select * from tp where a < '2020-08-19 00:00:00'; +--sorted_result +select * from t where a < '2020-08-19 00:00:00'; +set tidb_partition_prune_mode=default; + +# TestFloorUnixTimestampAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(floor(unix_timestamp(a))) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from t where a > '2020-09-11 00:00:00'; +explain select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from t where a < '2020-07-07 01:00:00'; +set tidb_partition_prune_mode=default; + +# TestUnixTimestampAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(unix_timestamp(a)) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from t where a > '2020-09-11 00:00:00'; +explain select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from t where a < '2020-07-07 01:00:00'; +set tidb_partition_prune_mode=default; + +# TestDatetimeColAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns(a) (partition p0 values less than('2020-02-02 00:00:00'), partition p1 values less than('2020-09-01 00:00:00'), partition p2 values less than('2020-12-20 00:00:00')); +create table t(a datetime, b int); +insert into tp values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +insert into t values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +analyze table tp; +analyze table t; +explain select * from tp where a < '2020-09-01 00:00:00'; +--sorted_result +select * from tp where a < '2020-09-01 00:00:00'; +--sorted_result +select * from t where a < '2020-09-01 00:00:00'; +explain select * from tp where a > '2020-07-07 01:00:00'; +--sorted_result +select * from tp where a > '2020-07-07 01:00:00'; +--sorted_result +select * from t where a > '2020-07-07 01:00:00'; +set tidb_partition_prune_mode=default; + +# TestVarcharColAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a varchar(255), b int) partition by range columns(a) (partition p0 values less than('ddd'), partition p1 values less than('ggggg'), partition p2 values less than('mmmmmm')); +create table t(a varchar(255), b int); +insert into tp values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +insert into t values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +analyze table tp; +analyze table t; +explain select * from tp where a < '10'; +--sorted_result +select * from tp where a < '10'; +--sorted_result +select * from t where a < '10'; +explain select * from tp where a > 0; +--sorted_result +select * from tp where a > 0; +--sorted_result +select * from t where a > 0; +explain select * from tp where a < 0; +--sorted_result +select * from tp where a < 0; +--sorted_result +select * from t where a < 0; +set tidb_partition_prune_mode=default; + +# TestDynamicPruneModeWithExpression +drop table if exists trange, thash, t; +create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +create table thash(a int, b int) partition by hash(a) partitions 4; +create table t(a int, b int); +insert into trange values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into thash values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into t values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +set session tidb_partition_prune_mode='dynamic'; +analyze table trange; +analyze table thash; +analyze table t; +--sorted_result +SELECT * from t where a = 2; +explain format='brief' select * from trange where a = 2; +--sorted_result +SELECT * from trange where a = 2; +explain format='brief' select * from thash where a = 2; +--sorted_result +SELECT * from thash where a = 2; +--sorted_result +SELECT * from t where a = 4 or a = 1; +explain format='brief' select * from trange where a = 4 or a = 1; +--sorted_result +SELECT * from trange where a = 4 or a = 1; +explain format='brief' select * from thash where a = 4 or a = 1; +--sorted_result +SELECT * from thash where a = 4 or a = 1; +--sorted_result +SELECT * from t where a = -1; +explain format='brief' select * from trange where a = -1; +--sorted_result +SELECT * from trange where a = -1; +explain format='brief' select * from thash where a = -1; +--sorted_result +SELECT * from thash where a = -1; +--sorted_result +SELECT * from t where a is NULL; +explain format='brief' select * from trange where a is NULL; +--sorted_result +SELECT * from trange where a is NULL; +explain format='brief' select * from thash where a is NULL; +--sorted_result +SELECT * from thash where a is NULL; +--sorted_result +SELECT * from t where b is NULL; +explain format='brief' select * from trange where b is NULL; +--sorted_result +SELECT * from trange where b is NULL; +explain format='brief' select * from thash where b is NULL; +--sorted_result +SELECT * from thash where b is NULL; +--sorted_result +SELECT * from t where a > -1; +explain format='brief' select * from trange where a > -1; +--sorted_result +SELECT * from trange where a > -1; +explain format='brief' select * from thash where a > -1; +--sorted_result +SELECT * from thash where a > -1; +--sorted_result +SELECT * from t where a >= 4 and a <= 5; +explain format='brief' select * from trange where a >= 4 and a <= 5; +--sorted_result +SELECT * from trange where a >= 4 and a <= 5; +explain format='brief' select * from thash where a >= 4 and a <= 5; +--sorted_result +SELECT * from thash where a >= 4 and a <= 5; +--sorted_result +SELECT * from t where a > 10; +explain format='brief' select * from trange where a > 10; +--sorted_result +SELECT * from trange where a > 10; +explain format='brief' select * from thash where a > 10; +--sorted_result +SELECT * from thash where a > 10; +--sorted_result +SELECT * from t where a >=2 and a <= 3; +explain format='brief' select * from trange where a >=2 and a <= 3; +--sorted_result +SELECT * from trange where a >=2 and a <= 3; +explain format='brief' select * from thash where a >=2 and a <= 3; +--sorted_result +SELECT * from thash where a >=2 and a <= 3; +--sorted_result +SELECT * from t where a between 2 and 3; +explain format='brief' select * from trange where a between 2 and 3; +--sorted_result +SELECT * from trange where a between 2 and 3; +explain format='brief' select * from thash where a between 2 and 3; +--sorted_result +SELECT * from thash where a between 2 and 3; +--sorted_result +SELECT * from t where a < 2; +explain format='brief' select * from trange where a < 2; +--sorted_result +SELECT * from trange where a < 2; +explain format='brief' select * from thash where a < 2; +--sorted_result +SELECT * from thash where a < 2; +--sorted_result +SELECT * from t where a <= 3; +explain format='brief' select * from trange where a <= 3; +--sorted_result +SELECT * from trange where a <= 3; +explain format='brief' select * from thash where a <= 3; +--sorted_result +SELECT * from thash where a <= 3; +--sorted_result +SELECT * from t where a in (2, 3); +explain format='brief' select * from trange where a in (2, 3); +--sorted_result +SELECT * from trange where a in (2, 3); +explain format='brief' select * from thash where a in (2, 3); +--sorted_result +SELECT * from thash where a in (2, 3); +--sorted_result +SELECT * from t where a in (1, 5); +explain format='brief' select * from trange where a in (1, 5); +--sorted_result +SELECT * from trange where a in (1, 5); +explain format='brief' select * from thash where a in (1, 5); +--sorted_result +SELECT * from thash where a in (1, 5); +--sorted_result +SELECT * from t where a not in (1, 5); +explain format='brief' select * from trange where a not in (1, 5); +--sorted_result +SELECT * from trange where a not in (1, 5); +explain format='brief' select * from thash where a not in (1, 5); +--sorted_result +SELECT * from thash where a not in (1, 5); +--sorted_result +SELECT * from t where a = 2 and a = 2; +explain format='brief' select * from trange where a = 2 and a = 2; +--sorted_result +SELECT * from trange where a = 2 and a = 2; +explain format='brief' select * from thash where a = 2 and a = 2; +--sorted_result +SELECT * from thash where a = 2 and a = 2; +--sorted_result +SELECT * from t where a = 2 and a = 3; +explain format='brief' select * from trange where a = 2 and a = 3; +--sorted_result +SELECT * from trange where a = 2 and a = 3; +explain format='brief' select * from thash where a = 2 and a = 3; +--sorted_result +SELECT * from thash where a = 2 and a = 3; +--sorted_result +SELECT * from t where a < 2 and a > 0; +explain format='brief' select * from trange where a < 2 and a > 0; +--sorted_result +SELECT * from trange where a < 2 and a > 0; +explain format='brief' select * from thash where a < 2 and a > 0; +--sorted_result +SELECT * from thash where a < 2 and a > 0; +--sorted_result +SELECT * from t where a < 2 and a < 3; +explain format='brief' select * from trange where a < 2 and a < 3; +--sorted_result +SELECT * from trange where a < 2 and a < 3; +explain format='brief' select * from thash where a < 2 and a < 3; +--sorted_result +SELECT * from thash where a < 2 and a < 3; +--sorted_result +SELECT * from t where a > 1 and a > 2; +explain format='brief' select * from trange where a > 1 and a > 2; +--sorted_result +SELECT * from trange where a > 1 and a > 2; +explain format='brief' select * from thash where a > 1 and a > 2; +--sorted_result +SELECT * from thash where a > 1 and a > 2; +--sorted_result +SELECT * from t where a = 2 or a = 3; +explain format='brief' select * from trange where a = 2 or a = 3; +--sorted_result +SELECT * from trange where a = 2 or a = 3; +explain format='brief' select * from thash where a = 2 or a = 3; +--sorted_result +SELECT * from thash where a = 2 or a = 3; +--sorted_result +SELECT * from t where a = 2 or a in (3); +explain format='brief' select * from trange where a = 2 or a in (3); +--sorted_result +SELECT * from trange where a = 2 or a in (3); +explain format='brief' select * from thash where a = 2 or a in (3); +--sorted_result +SELECT * from thash where a = 2 or a in (3); +--sorted_result +SELECT * from t where a = 2 or a > 3; +explain format='brief' select * from trange where a = 2 or a > 3; +--sorted_result +SELECT * from trange where a = 2 or a > 3; +explain format='brief' select * from thash where a = 2 or a > 3; +--sorted_result +SELECT * from thash where a = 2 or a > 3; +--sorted_result +SELECT * from t where a = 2 or a <= 1; +explain format='brief' select * from trange where a = 2 or a <= 1; +--sorted_result +SELECT * from trange where a = 2 or a <= 1; +explain format='brief' select * from thash where a = 2 or a <= 1; +--sorted_result +SELECT * from thash where a = 2 or a <= 1; +--sorted_result +SELECT * from t where a = 2 or a between 2 and 2; +explain format='brief' select * from trange where a = 2 or a between 2 and 2; +--sorted_result +SELECT * from trange where a = 2 or a between 2 and 2; +explain format='brief' select * from thash where a = 2 or a between 2 and 2; +--sorted_result +SELECT * from thash where a = 2 or a between 2 and 2; +--sorted_result +SELECT * from t where a != 2; +explain format='brief' select * from trange where a != 2; +--sorted_result +SELECT * from trange where a != 2; +explain format='brief' select * from thash where a != 2; +--sorted_result +SELECT * from thash where a != 2; +--sorted_result +SELECT * from t where a != 2 and a > 4; +explain format='brief' select * from trange where a != 2 and a > 4; +--sorted_result +SELECT * from trange where a != 2 and a > 4; +explain format='brief' select * from thash where a != 2 and a > 4; +--sorted_result +SELECT * from thash where a != 2 and a > 4; +--sorted_result +SELECT * from t where a != 2 and a != 3; +explain format='brief' select * from trange where a != 2 and a != 3; +--sorted_result +SELECT * from trange where a != 2 and a != 3; +explain format='brief' select * from thash where a != 2 and a != 3; +--sorted_result +SELECT * from thash where a != 2 and a != 3; +--sorted_result +SELECT * from t where a != 2 and a = 3; +explain format='brief' select * from trange where a != 2 and a = 3; +--sorted_result +SELECT * from trange where a != 2 and a = 3; +explain format='brief' select * from thash where a != 2 and a = 3; +--sorted_result +SELECT * from thash where a != 2 and a = 3; +--sorted_result +SELECT * from t where not (a = 2); +explain format='brief' select * from trange where not (a = 2); +--sorted_result +SELECT * from trange where not (a = 2); +explain format='brief' select * from thash where not (a = 2); +--sorted_result +SELECT * from thash where not (a = 2); +--sorted_result +SELECT * from t where not (a > 2); +explain format='brief' select * from trange where not (a > 2); +--sorted_result +SELECT * from trange where not (a > 2); +explain format='brief' select * from thash where not (a > 2); +--sorted_result +SELECT * from thash where not (a > 2); +--sorted_result +SELECT * from t where not (a < 2); +explain format='brief' select * from trange where not (a < 2); +--sorted_result +SELECT * from trange where not (a < 2); +explain format='brief' select * from thash where not (a < 2); +--sorted_result +SELECT * from thash where not (a < 2); +--sorted_result +SELECT * from t where a + 1 > 4; +explain format='brief' select * from trange where a + 1 > 4; +--sorted_result +SELECT * from trange where a + 1 > 4; +explain format='brief' select * from thash where a + 1 > 4; +--sorted_result +SELECT * from thash where a + 1 > 4; +--sorted_result +SELECT * from t where a - 1 > 0; +explain format='brief' select * from trange where a - 1 > 0; +--sorted_result +SELECT * from trange where a - 1 > 0; +explain format='brief' select * from thash where a - 1 > 0; +--sorted_result +SELECT * from thash where a - 1 > 0; +--sorted_result +SELECT * from t where a * 2 < 0; +explain format='brief' select * from trange where a * 2 < 0; +--sorted_result +SELECT * from trange where a * 2 < 0; +explain format='brief' select * from thash where a * 2 < 0; +--sorted_result +SELECT * from thash where a * 2 < 0; +--sorted_result +SELECT * from t where a << 1 < 0; +explain format='brief' select * from trange where a << 1 < 0; +--sorted_result +SELECT * from trange where a << 1 < 0; +explain format='brief' select * from thash where a << 1 < 0; +--sorted_result +SELECT * from thash where a << 1 < 0; +--sorted_result +SELECT * from t where a > '10'; +explain format='brief' select * from trange where a > '10'; +--sorted_result +SELECT * from trange where a > '10'; +explain format='brief' select * from thash where a > '10'; +--sorted_result +SELECT * from thash where a > '10'; +--sorted_result +SELECT * from t where a > '10ab'; +explain format='brief' select * from trange where a > '10ab'; +--sorted_result +SELECT * from trange where a > '10ab'; +explain format='brief' select * from thash where a > '10ab'; +--sorted_result +SELECT * from thash where a > '10ab'; +set tidb_partition_prune_mode=default; + diff --git a/tests/integrationtest/t/executor/partition/table.test b/tests/integrationtest/t/executor/partition/table.test new file mode 100644 index 0000000000000..d3b8b7645cfbd --- /dev/null +++ b/tests/integrationtest/t/executor/partition/table.test @@ -0,0 +1,358 @@ +# TestSetPartitionPruneMode +set @@session.tidb_partition_prune_mode = DEFAULT; +show warnings; +set @@global.tidb_partition_prune_mode = DEFAULT; +show warnings; + +connect (conn1, localhost, root,,); +select @@global.tidb_partition_prune_mode; +select @@session.tidb_partition_prune_mode; +set @@session.tidb_partition_prune_mode = "static"; +show warnings; +set @@global.tidb_partition_prune_mode = "static"; +show warnings; +connection default; +disconnect conn1; + +connect (conn1, localhost, root,,); +select @@session.tidb_partition_prune_mode; +show warnings; +select @@global.tidb_partition_prune_mode; +set @@session.tidb_partition_prune_mode = "dynamic"; +show warnings; +set @@global.tidb_partition_prune_mode = "dynamic"; +show warnings; +connection default; +disconnect conn1; + +connect (conn1, localhost, root,,); +select @@global.tidb_partition_prune_mode; +select @@session.tidb_partition_prune_mode; +connection default; +disconnect conn1; + +set @@session.tidb_partition_prune_mode = DEFAULT; +set @@global.tidb_partition_prune_mode = DEFAULT; + +# TestFourReader +drop table if exists pt; +create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( +partition p0 values less than (4), +partition p1 values less than (7), +partition p2 values less than (10)); +analyze table pt; +insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null); +--sorted_result +select * from pt; +select * from pt where c > 10; +select * from pt where c > 8; +--sorted_result +select * from pt where c < 2 or c >= 9; +--sorted_result +select c from pt; +select c from pt where c > 10; +select c from pt where c > 8; +--sorted_result +select c from pt where c < 2 or c >= 9; +--sorted_result +select /*+ use_index(pt, i_id) */ * from pt; +select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10; +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8; +--sorted_result +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9; +set @@tidb_enable_index_merge = 1; +--sorted_result +select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7; +set @@tidb_enable_index_merge = DEFAULT; + +# TestPartitionIndexJoin +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( + partition p0 values less than (4), + partition p1 values less than (7), + partition p2 values less than (10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +--sorted_result +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +--sorted_result +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by list (c) ( + partition p0 values in (1,2,3,4), + partition p1 values in (5,6,7), + partition p2 values in (8, 9,10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +--sorted_result +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +--sorted_result +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by hash(c) partitions 5; +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +--sorted_result +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +--sorted_result +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; + +# TestPartitionUnionScanIndexJoin +# For issue https://github.com/pingcap/tidb/issues/19152 +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue); +create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4; +insert into t1 values (10, 'interesting neumann'); +insert into t2 select * from t1; +begin; +insert into t2 values (11, 'hopeful hoover'); +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +commit; + +# TestPartitionReaderUnderApply +## For issue 19458. +drop table if exists t; +create table t(c_int int); +insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9); +DROP TABLE IF EXISTS `t1`; +CREATE TABLE t1 ( + c_int int NOT NULL, + c_str varchar(40) NOT NULL, + c_datetime datetime NOT NULL, + c_timestamp timestamp NULL DEFAULT NULL, + c_double double DEFAULT NULL, + c_decimal decimal(12,6) DEFAULT NULL, + PRIMARY KEY (c_int,c_str,c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci + PARTITION BY RANGE (c_int) +(PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB, + PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB, + PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB, + PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB, + PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB, + PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB, + PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB, + PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB); +INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000); +begin; +insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088); +select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int; +rollback; +## For issue 19450. +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)); +create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524); +insert into t2 select * from t1; +--sorted_result +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +# For issue 19450 release-4.0 +set @@tidb_partition_prune_mode='static'; +--sorted_result +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +set @@tidb_partition_prune_mode=default; + +# TestImproveCoverage +drop table if exists coverage_rr, coverage_dt; +create table coverage_rr ( +pk1 varchar(35) NOT NULL, +pk2 int NOT NULL, +c int, +PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4; +create table coverage_dt (pk1 varchar(35), pk2 int); +insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1); +insert into coverage_dt values ('apple',3),('ios',3),('linux',5); +set @@tidb_partition_prune_mode = 'dynamic'; +--sorted_result +select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +--sorted_result +select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +set @@tidb_partition_prune_mode = default; + +# TestOrderByOnUnsignedPk +drop table if exists tunsigned_hash; +create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6; +insert into tunsigned_hash values(25), (9279808998424041135); +select min(a) from tunsigned_hash; +select max(a) from tunsigned_hash; + +# TestPartitionHandleWithKeepOrder +# https://github.com/pingcap/tidb/issues/44312 +drop table if exists t, t1; +create table t (id int not null, store_id int not null )partition by range (store_id)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1(id int not null, store_id int not null); +insert into t values (1, 1); +insert into t values (2, 17); +insert into t1 values (0, 18); +alter table t exchange partition p3 with table t1; +alter table t add index idx(id); +analyze table t; +--sorted_result +select *,_tidb_rowid from t use index(idx) order by id limit 2; +drop table t, t1; +create table t (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))partition by range (b)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1 (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c)); +insert into t values (1,2,3), (2,3,4), (3,4,5); +insert into t1 values (1,18,3); +alter table t exchange partition p3 with table t1; +analyze table t; +--sorted_result +select * from t where a = 1 or b = 5 order by c limit 2; + +# TestOrderByOnHandle +## indexLookUp + _tidb_rowid +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +analyze table t; +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +## indexLookUp + pkIsHandle +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,primary key(`a`),KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, a limit 10; +analyze table t; +select * from t use index(idx_b) order by b, a limit 10; +## indexMerge + _tidb_rowid +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +## indexMerge + pkIsHandle +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`),PRIMARY KEY (`a`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; + +# TestDynamicModeByDefault +drop table if exists trange, thash; +create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( + partition p0 values less than(300), + partition p1 values less than(500), + partition p2 values less than(1100)); +create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4; +analyze table thash, trange; +explain format='brief' select * from trange where a>400; +explain format='brief' select * from thash where a>=100; + +# TestAddDropPartitions +drop table if exists t; +set @@tidb_partition_prune_mode = 'dynamic'; +create table t(a int) partition by range(a) ( + partition p0 values less than (5), + partition p1 values less than (10), + partition p2 values less than (15)); +insert into t values (2), (7), (12); +analyze table t; +explain format='brief' select * from t where a < 3; +--sorted_result +select * from t where a < 3; +explain format='brief' select * from t where a < 8; +--sorted_result +select * from t where a < 8; +explain format='brief' select * from t where a < 20; +--sorted_result +select * from t where a < 20; +alter table t drop partition p0; +explain format='brief' select * from t where a < 3; +--sorted_result +select * from t where a < 3; +explain format='brief' select * from t where a < 8; +--sorted_result +select * from t where a < 8; +explain format='brief' select * from t where a < 20; +--sorted_result +select * from t where a < 20; +alter table t add partition (partition p3 values less than (20)); +alter table t add partition (partition p4 values less than (40)); +insert into t values (15), (25); +explain format='brief' select * from t where a < 3; +--sorted_result +select * from t where a < 3; +explain format='brief' select * from t where a < 8; +--sorted_result +select * from t where a < 8; +explain format='brief' select * from t where a < 20; +--sorted_result +select * from t where a < 20; + +# TestPartitionPruningInTransaction +drop table if exists t; +create table t(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +analyze table t; +set @@tidb_partition_prune_mode = 'static'; +begin; +explain format='brief' select * from t; +--sorted_result +select * from t; +explain format='brief' select * from t where a > 3; +--sorted_result +select * from t where a > 3; +explain format='brief' select * from t where a > 7; +--sorted_result +select * from t where a > 7; +rollback; +set @@tidb_partition_prune_mode = 'dynamic'; +begin; +explain format='brief' select * from t; +--sorted_result +select * from t; +explain format='brief' select * from t where a > 3; +--sorted_result +select * from t where a > 3; +explain format='brief' select * from t where a > 7; +--sorted_result +select * from t where a > 7; +rollback; +set @@tidb_partition_prune_mode = default; + +# TestPartitionOnMissing +drop table if exists tt1, tt2; +set global tidb_partition_prune_mode='dynamic'; +set session tidb_partition_prune_mode='dynamic'; +CREATE TABLE tt1 ( + id INT NOT NULL, + listid INT, + name varchar(10), + primary key (listid) clustered +) +PARTITION BY LIST (listid) ( + PARTITION p1 VALUES IN (1), + PARTITION p2 VALUES IN (2), + PARTITION p3 VALUES IN (3), + PARTITION p4 VALUES IN (4) +); +CREATE TABLE tt2 ( + id INT NOT NULL, + listid INT +); +create index idx_listid on tt1(id,listid); +create index idx_listid on tt2(listid); +insert into tt1 values(1,1,1); +insert into tt1 values(2,2,2); +insert into tt1 values(3,3,3); +insert into tt1 values(4,4,4); +insert into tt2 values(1,1); +insert into tt2 values(2,2); +insert into tt2 values(3,3); +insert into tt2 values(4,4); +insert into tt2 values(5,5); +analyze table tt1; +analyze table tt2; +select /*+ inl_join(tt1)*/ count(*) from tt2 + left join tt1 on tt1.listid=tt2.listid and tt1.id=tt2.id; +select /*+ inl_join(tt1)*/ count(*) from tt2 + left join tt1 on tt1.listid=tt2.listid; +explain format = 'brief' select /*+ inl_join(tt1)*/ count(*) from tt2 + left join tt1 on tt1.listid=tt2.listid; +set global tidb_partition_prune_mode=default; +set session tidb_partition_prune_mode=default; From b9b336843bcbc3772ef1454e0d33fbf2722657a9 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 25 Oct 2023 16:14:03 +0800 Subject: [PATCH 02/33] lightning: every HTTP retry should use its own request (#47959) close pingcap/tidb#47930 --- br/pkg/pdutil/pd.go | 13 ++++++++----- br/pkg/pdutil/pd_serial_test.go | 11 ++++++++++- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/br/pkg/pdutil/pd.go b/br/pkg/pdutil/pd.go index 9d403017c69d6..851009e7aeeb0 100644 --- a/br/pkg/pdutil/pd.go +++ b/br/pkg/pdutil/pd.go @@ -164,13 +164,16 @@ func pdRequestWithCode( return 0, nil, errors.Trace(err) } reqURL := fmt.Sprintf("%s/%s", u, prefix) - req, err := http.NewRequestWithContext(ctx, method, reqURL, body) - if err != nil { - return 0, nil, errors.Trace(err) - } - var resp *http.Response + var ( + req *http.Request + resp *http.Response + ) count := 0 for { + req, err = http.NewRequestWithContext(ctx, method, reqURL, body) + if err != nil { + return 0, nil, errors.Trace(err) + } resp, err = cli.Do(req) //nolint:bodyclose count++ failpoint.Inject("InjectClosed", func(v failpoint.Value) { diff --git a/br/pkg/pdutil/pd_serial_test.go b/br/pkg/pdutil/pd_serial_test.go index 32f69106b2139..329c34c9ed810 100644 --- a/br/pkg/pdutil/pd_serial_test.go +++ b/br/pkg/pdutil/pd_serial_test.go @@ -3,6 +3,7 @@ package pdutil import ( + "bytes" "context" "encoding/hex" "encoding/json" @@ -186,8 +187,16 @@ func TestPDRequestRetry(t *testing.T) { w.WriteHeader(http.StatusOK) })) cli := http.DefaultClient + cli.Transport = http.DefaultTransport.(*http.Transport).Clone() + // although the real code doesn't disable keep alive, we need to disable it + // in test to avoid the connection being reused and #47930 can't appear. The + // real code will only meet #47930 when go's internal http client just dropped + // all idle connections. + cli.Transport.(*http.Transport).DisableKeepAlives = true + taddr := ts.URL - _, reqErr := pdRequest(ctx, taddr, "", cli, http.MethodGet, nil) + body := bytes.NewBuffer([]byte("test")) + _, reqErr := pdRequest(ctx, taddr, "", cli, http.MethodPost, body) require.NoError(t, reqErr) ts.Close() count = 0 From 97310ad5dbd4bcf7234c9f54077c925dfae7820e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E8=B6=85?= Date: Wed, 25 Oct 2023 18:15:33 +0800 Subject: [PATCH 03/33] types: fix issue that we can insert negative value to unsinged float column sometimes (#47946) close pingcap/tidb#47945 --- pkg/executor/insert_test.go | 13 +++++++++++++ pkg/expression/builtin_cast.go | 2 +- pkg/expression/builtin_cast_vec.go | 2 +- pkg/types/datum.go | 14 +++++++++----- 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/pkg/executor/insert_test.go b/pkg/executor/insert_test.go index 69376dc386999..6cfa7fb258ad8 100644 --- a/pkg/executor/insert_test.go +++ b/pkg/executor/insert_test.go @@ -1615,3 +1615,16 @@ func TestInsertBigScientificNotation(t *testing.T) { tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1264 Out of range value for column 'a' at row 1")) tk.MustQuery("select id, a from t1 order by id asc").Check(testkit.Rows("1 2147483647", "2 -2147483648")) } + +// see issue: https://github.com/pingcap/tidb/issues/47945 +func TestUnsignedDecimalFloatInsertNegative(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec("create table tf(a float(1, 0) unsigned)") + err := tk.ExecToErr("insert into tf values('-100')") + require.EqualError(t, err, "[types:1264]Out of range value for column 'a' at row 1") + tk.MustExec("set @@sql_mode=''") + tk.MustExec("insert into tf values('-100')") + tk.MustQuery("select * from tf").Check(testkit.Rows("0")) +} diff --git a/pkg/expression/builtin_cast.go b/pkg/expression/builtin_cast.go index 5e989bfe76be4..656cc0c4f5780 100644 --- a/pkg/expression/builtin_cast.go +++ b/pkg/expression/builtin_cast.go @@ -1418,7 +1418,7 @@ func (b *builtinCastStringAsRealSig) evalReal(row chunk.Row) (res float64, isNul if b.inUnion && mysql.HasUnsignedFlag(b.tp.GetFlag()) && res < 0 { res = 0 } - res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp) return res, false, err } diff --git a/pkg/expression/builtin_cast_vec.go b/pkg/expression/builtin_cast_vec.go index d06e9d09985cd..f6e839a5776f6 100644 --- a/pkg/expression/builtin_cast_vec.go +++ b/pkg/expression/builtin_cast_vec.go @@ -1692,7 +1692,7 @@ func (b *builtinCastStringAsRealSig) vecEvalReal(input *chunk.Chunk, result *chu if b.inUnion && mysql.HasUnsignedFlag(b.tp.GetFlag()) && res < 0 { res = 0 } - res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp) if err != nil { return err } diff --git a/pkg/types/datum.go b/pkg/types/datum.go index b6c9594fd9a7d..6d4cec1f944a2 100644 --- a/pkg/types/datum.go +++ b/pkg/types/datum.go @@ -988,7 +988,7 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) default: return invalidConv(d, target.GetType()) } - f, err1 := ProduceFloatWithSpecifiedTp(f, target, sc) + f, err1 := ProduceFloatWithSpecifiedTp(f, target) if err == nil && err1 != nil { err = err1 } @@ -1001,7 +1001,7 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) } // ProduceFloatWithSpecifiedTp produces a new float64 according to `flen` and `decimal`. -func ProduceFloatWithSpecifiedTp(f float64, target *FieldType, sc *stmtctx.StatementContext) (_ float64, err error) { +func ProduceFloatWithSpecifiedTp(f float64, target *FieldType) (_ float64, err error) { if math.IsNaN(f) { return 0, overflow(f, target.GetType()) } @@ -1012,13 +1012,17 @@ func ProduceFloatWithSpecifiedTp(f float64, target *FieldType, sc *stmtctx.State // If no D is set, we will handle it like origin float whether M is set or not. if target.GetFlen() != UnspecifiedLength && target.GetDecimal() != UnspecifiedLength { f, err = TruncateFloat(f, target.GetFlen(), target.GetDecimal()) - if err = sc.HandleOverflow(err, err); err != nil { - return f, errors.Trace(err) - } } if mysql.HasUnsignedFlag(target.GetFlag()) && f < 0 { return 0, overflow(f, target.GetType()) } + + if err != nil { + // We must return the error got from TruncateFloat after checking whether the target is unsigned to make sure + // the returned float is not negative when the target type is unsigned. + return f, errors.Trace(err) + } + if target.GetType() == mysql.TypeFloat && (f > math.MaxFloat32 || f < -math.MaxFloat32) { if f > 0 { return math.MaxFloat32, overflow(f, target.GetType()) From cd0587eb75d70c35f01153ac5b3b6bcafaff1bee Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Wed, 25 Oct 2023 19:18:33 +0800 Subject: [PATCH 04/33] *: replace mathutil.Max/Min with built-in max/min (#47939) ref pingcap/tidb#45933 --- pkg/ddl/backfilling_scheduler.go | 9 +++-- pkg/ddl/partition.go | 2 +- pkg/ddl/sequence.go | 2 +- pkg/distsql/BUILD.bazel | 1 - pkg/distsql/distsql_test.go | 5 ++- pkg/infoschema/BUILD.bazel | 1 - pkg/infoschema/builder.go | 7 ++-- pkg/meta/autoid/BUILD.bazel | 1 - pkg/meta/autoid/autoid.go | 25 +++++++------- pkg/metrics/BUILD.bazel | 1 - pkg/metrics/telemetry.go | 5 ++- pkg/privilege/privileges/privileges.go | 4 +-- pkg/resourcemanager/pool/spool/BUILD.bazel | 1 - pkg/resourcemanager/pool/spool/spool.go | 3 +- pkg/session/BUILD.bazel | 1 - pkg/session/nontransactional.go | 3 +- pkg/session/session.go | 3 +- pkg/store/copr/BUILD.bazel | 1 - pkg/store/copr/coprocessor.go | 3 +- pkg/store/copr/mpp.go | 3 +- pkg/store/copr/region_cache.go | 3 +- pkg/store/mockstore/unistore/tikv/BUILD.bazel | 1 - pkg/store/mockstore/unistore/tikv/write.go | 3 +- pkg/testkit/testenv/BUILD.bazel | 1 - pkg/testkit/testenv/testenv.go | 4 +-- pkg/ttl/cache/BUILD.bazel | 1 - pkg/ttl/cache/table.go | 3 +- pkg/types/datum_eval.go | 3 +- pkg/types/mydecimal.go | 33 +++++++++---------- pkg/types/time.go | 2 +- pkg/util/chunk/BUILD.bazel | 2 -- pkg/util/chunk/alloc.go | 3 +- pkg/util/chunk/chunk.go | 5 ++- pkg/util/chunk/chunk_test.go | 3 +- pkg/util/chunk/codec.go | 3 +- pkg/util/chunk/list_test.go | 3 +- pkg/util/chunk/row_in_disk_test.go | 3 +- pkg/util/memory/BUILD.bazel | 2 -- pkg/util/memory/meminfo.go | 5 ++- pkg/util/memory/tracker_test.go | 3 +- pkg/util/ranger/BUILD.bazel | 1 - pkg/util/ranger/detacher.go | 5 ++- 42 files changed, 66 insertions(+), 107 deletions(-) diff --git a/pkg/ddl/backfilling_scheduler.go b/pkg/ddl/backfilling_scheduler.go index cda50fd6be8cc..eca0b9a7c71e8 100644 --- a/pkg/ddl/backfilling_scheduler.go +++ b/pkg/ddl/backfilling_scheduler.go @@ -37,7 +37,6 @@ import ( "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" decoder "github.com/pingcap/tidb/pkg/util/rowDecoder" "go.uber.org/zap" ) @@ -175,7 +174,7 @@ func initSessCtx( func (*txnBackfillScheduler) expectedWorkerSize() (size int) { workerCnt := int(variable.GetDDLReorgWorkerCounter()) - return mathutil.Min(workerCnt, maxBackfillWorkerSize) + return min(workerCnt, maxBackfillWorkerSize) } func (b *txnBackfillScheduler) currentWorkerSize() int { @@ -465,9 +464,9 @@ func (*ingestBackfillScheduler) expectedWorkerSize() (readerSize int, writerSize func expectedIngestWorkerCnt() (readerCnt, writerCnt int) { workerCnt := int(variable.GetDDLReorgWorkerCounter()) - readerCnt = mathutil.Min(workerCnt/2, maxBackfillWorkerSize) - readerCnt = mathutil.Max(readerCnt, 1) - writerCnt = mathutil.Min(workerCnt/2+2, maxBackfillWorkerSize) + readerCnt = min(workerCnt/2, maxBackfillWorkerSize) + readerCnt = max(readerCnt, 1) + writerCnt = min(workerCnt/2+2, maxBackfillWorkerSize) return readerCnt, writerCnt } diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index ac141e49054ba..721d257c03798 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -866,7 +866,7 @@ func getLowerBoundInt(partCols ...*model.ColumnInfo) int64 { if mysql.HasUnsignedFlag(col.FieldType.GetFlag()) { return 0 } - ret = mathutil.Min(ret, types.IntergerSignedLowerBound(col.GetType())) + ret = min(ret, types.IntergerSignedLowerBound(col.GetType())) } return ret } diff --git a/pkg/ddl/sequence.go b/pkg/ddl/sequence.go index 9772322d90f06..e60043be6efe1 100644 --- a/pkg/ddl/sequence.go +++ b/pkg/ddl/sequence.go @@ -132,7 +132,7 @@ func handleSequenceOptions(seqOptions []*ast.SequenceOption, sequenceInfo *model sequenceInfo.MaxValue = model.DefaultNegativeSequenceMaxValue } if !startSetFlag { - sequenceInfo.Start = mathutil.Min(sequenceInfo.MaxValue, model.DefaultNegativeSequenceStartValue) + sequenceInfo.Start = min(sequenceInfo.MaxValue, model.DefaultNegativeSequenceStartValue) } if !minSetFlag { sequenceInfo.MinValue = model.DefaultNegativeSequenceMinValue diff --git a/pkg/distsql/BUILD.bazel b/pkg/distsql/BUILD.bazel index e6acd81c960b6..64a0f10eb10e7 100644 --- a/pkg/distsql/BUILD.bazel +++ b/pkg/distsql/BUILD.bazel @@ -83,7 +83,6 @@ go_test( "//pkg/util/collate", "//pkg/util/disk", "//pkg/util/execdetails", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/mock", "//pkg/util/paging", diff --git a/pkg/distsql/distsql_test.go b/pkg/distsql/distsql_test.go index b49a99accd2f6..461b29285d11d 100644 --- a/pkg/distsql/distsql_test.go +++ b/pkg/distsql/distsql_test.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/mock" "github.com/pingcap/tipb/go-tipb" @@ -228,7 +227,7 @@ func (resp *mockResponse) Next(context.Context) (kv.ResultSubset, error) { if resp.count >= resp.total { return nil, nil } - numRows := mathutil.Min(resp.batch, resp.total-resp.count) + numRows := min(resp.batch, resp.total-resp.count) resp.count += numRows var chunks []tipb.Chunk @@ -245,7 +244,7 @@ func (resp *mockResponse) Next(context.Context) (kv.ResultSubset, error) { } else { chunks = make([]tipb.Chunk, 0) for numRows > 0 { - rows := mathutil.Min(numRows, 1024) + rows := min(numRows, 1024) numRows -= rows colTypes := make([]*types.FieldType, 4) diff --git a/pkg/infoschema/BUILD.bazel b/pkg/infoschema/BUILD.bazel index a1984931d77b8..0e62ee3953ca7 100644 --- a/pkg/infoschema/BUILD.bazel +++ b/pkg/infoschema/BUILD.bazel @@ -42,7 +42,6 @@ go_library( "//pkg/util/domainutil", "//pkg/util/execdetails", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/mock", "//pkg/util/pdapi", "//pkg/util/sem", diff --git a/pkg/infoschema/builder.go b/pkg/infoschema/builder.go index 3b030b5b1ad25..54f8d8a85dc58 100644 --- a/pkg/infoschema/builder.go +++ b/pkg/infoschema/builder.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/util/domainutil" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" ) @@ -420,9 +419,9 @@ func updateAutoIDForExchangePartition(store kv.Storage, ptSchemaID, ptID, ntSche // Set both tables to the maximum auto IDs between normal table and partitioned table. newAutoIDs := meta.AutoIDGroup{ - RowID: mathutil.Max(ptAutoIDs.RowID, ntAutoIDs.RowID), - IncrementID: mathutil.Max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID), - RandomID: mathutil.Max(ptAutoIDs.RandomID, ntAutoIDs.RandomID), + RowID: max(ptAutoIDs.RowID, ntAutoIDs.RowID), + IncrementID: max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID), + RandomID: max(ptAutoIDs.RandomID, ntAutoIDs.RandomID), } err = t.GetAutoIDAccessors(ptSchemaID, ptID).Put(newAutoIDs) if err != nil { diff --git a/pkg/meta/autoid/BUILD.bazel b/pkg/meta/autoid/BUILD.bazel index 107f9f46cf768..523258d50b831 100644 --- a/pkg/meta/autoid/BUILD.bazel +++ b/pkg/meta/autoid/BUILD.bazel @@ -24,7 +24,6 @@ go_library( "//pkg/util/etcd", "//pkg/util/execdetails", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/tracing", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/meta/autoid/autoid.go b/pkg/meta/autoid/autoid.go index 0555868a52cb7..6a3d9f94bc27d 100644 --- a/pkg/meta/autoid/autoid.go +++ b/pkg/meta/autoid/autoid.go @@ -37,7 +37,6 @@ import ( "github.com/pingcap/tidb/pkg/util/etcd" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/tikv/client-go/v2/txnkv/txnsnapshot" tikvutil "github.com/tikv/client-go/v2/util" @@ -353,8 +352,8 @@ func (alloc *allocator) rebase4Unsigned(ctx context.Context, requiredBase uint64 } uCurrentEnd := uint64(currentEnd) if allocIDs { - newBase = mathutil.Max(uCurrentEnd, requiredBase) - newEnd = mathutil.Min(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step) + newBase = max(uCurrentEnd, requiredBase) + newEnd = min(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step) } else { if uCurrentEnd >= requiredBase { newBase = uCurrentEnd @@ -412,8 +411,8 @@ func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, a return err1 } if allocIDs { - newBase = mathutil.Max(currentEnd, requiredBase) - newEnd = mathutil.Min(math.MaxInt64-alloc.step, newBase) + alloc.step + newBase = max(currentEnd, requiredBase) + newEnd = min(math.MaxInt64-alloc.step, newBase) + alloc.step } else { if currentEnd >= requiredBase { newBase = currentEnd @@ -872,7 +871,7 @@ func SeekToFirstAutoIDUnSigned(base, increment, offset uint64) uint64 { return nr } -func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, offset int64) (min int64, max int64, err error) { +func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, offset int64) (mini int64, max int64, err error) { // Check offset rebase if necessary. if offset-1 > alloc.base { if err := alloc.rebase4Signed(ctx, offset-1, true); err != nil { @@ -926,7 +925,7 @@ func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, o if nextStep < n1 { nextStep = n1 } - tmpStep := mathutil.Min(math.MaxInt64-newBase, nextStep) + tmpStep := min(math.MaxInt64-newBase, nextStep) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -953,12 +952,12 @@ func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, o zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", alloc.tbID), zap.Int64("database ID", alloc.dbID)) - min = alloc.base + mini = alloc.base alloc.base += n1 - return min, alloc.base, nil + return mini, alloc.base, nil } -func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, offset int64) (min int64, max int64, err error) { +func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, offset int64) (mini int64, max int64, err error) { // Check offset rebase if necessary. if uint64(offset-1) > uint64(alloc.base) { if err := alloc.rebase4Unsigned(ctx, uint64(offset-1), true); err != nil { @@ -1017,7 +1016,7 @@ func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, if nextStep < n1 { nextStep = n1 } - tmpStep := int64(mathutil.Min(math.MaxUint64-uint64(newBase), uint64(nextStep))) + tmpStep := int64(min(math.MaxUint64-uint64(newBase), uint64(nextStep))) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -1044,10 +1043,10 @@ func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", alloc.tbID), zap.Int64("database ID", alloc.dbID)) - min = alloc.base + mini = alloc.base // Use uint64 n directly. alloc.base = int64(uint64(alloc.base) + uint64(n1)) - return min, alloc.base, nil + return mini, alloc.base, nil } func getAllocatorStatsFromCtx(ctx context.Context) (context.Context, *AllocatorRuntimeStats, **tikvutil.CommitDetails) { diff --git a/pkg/metrics/BUILD.bazel b/pkg/metrics/BUILD.bazel index d302f1bbd7ea4..135275718bd93 100644 --- a/pkg/metrics/BUILD.bazel +++ b/pkg/metrics/BUILD.bazel @@ -35,7 +35,6 @@ go_library( "//pkg/parser/terror", "//pkg/timer/metrics", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/promutil", "@com_github_pingcap_errors//:errors", "@com_github_prometheus_client_golang//prometheus", diff --git a/pkg/metrics/telemetry.go b/pkg/metrics/telemetry.go index 0cb6439053eec..ea07b9b83e520 100644 --- a/pkg/metrics/telemetry.go +++ b/pkg/metrics/telemetry.go @@ -15,7 +15,6 @@ package metrics import ( - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) @@ -402,7 +401,7 @@ func (c TablePartitionUsageCounter) Cal(rhs TablePartitionUsageCounter) TablePar TablePartitionRangeColumnsGt2Cnt: c.TablePartitionRangeColumnsGt2Cnt - rhs.TablePartitionRangeColumnsGt2Cnt, TablePartitionRangeColumnsGt3Cnt: c.TablePartitionRangeColumnsGt3Cnt - rhs.TablePartitionRangeColumnsGt3Cnt, TablePartitionListColumnsCnt: c.TablePartitionListColumnsCnt - rhs.TablePartitionListColumnsCnt, - TablePartitionMaxPartitionsCnt: mathutil.Max(c.TablePartitionMaxPartitionsCnt-rhs.TablePartitionMaxPartitionsCnt, rhs.TablePartitionMaxPartitionsCnt), + TablePartitionMaxPartitionsCnt: max(c.TablePartitionMaxPartitionsCnt-rhs.TablePartitionMaxPartitionsCnt, rhs.TablePartitionMaxPartitionsCnt), TablePartitionCreateIntervalPartitionsCnt: c.TablePartitionCreateIntervalPartitionsCnt - rhs.TablePartitionCreateIntervalPartitionsCnt, TablePartitionAddIntervalPartitionsCnt: c.TablePartitionAddIntervalPartitionsCnt - rhs.TablePartitionAddIntervalPartitionsCnt, TablePartitionDropIntervalPartitionsCnt: c.TablePartitionDropIntervalPartitionsCnt - rhs.TablePartitionDropIntervalPartitionsCnt, @@ -423,7 +422,7 @@ func ResetTablePartitionCounter(pre TablePartitionUsageCounter) TablePartitionUs TablePartitionRangeColumnsGt2Cnt: readCounter(TelemetryTablePartitionRangeColumnsGt2Cnt), TablePartitionRangeColumnsGt3Cnt: readCounter(TelemetryTablePartitionRangeColumnsGt3Cnt), TablePartitionListColumnsCnt: readCounter(TelemetryTablePartitionListColumnsCnt), - TablePartitionMaxPartitionsCnt: mathutil.Max(readCounter(TelemetryTablePartitionMaxPartitionsCnt)-pre.TablePartitionMaxPartitionsCnt, pre.TablePartitionMaxPartitionsCnt), + TablePartitionMaxPartitionsCnt: max(readCounter(TelemetryTablePartitionMaxPartitionsCnt)-pre.TablePartitionMaxPartitionsCnt, pre.TablePartitionMaxPartitionsCnt), TablePartitionReorganizePartitionCnt: readCounter(TelemetryReorganizePartitionCnt), } } diff --git a/pkg/privilege/privileges/privileges.go b/pkg/privilege/privileges/privileges.go index d394bacdb69ae..1fe786399f0dd 100644 --- a/pkg/privilege/privileges/privileges.go +++ b/pkg/privilege/privileges/privileges.go @@ -984,7 +984,7 @@ func (passwordLocking *PasswordLocking) ParseJSON(passwordLockingJSON types.Bina if err != nil { return err } - passwordLocking.FailedLoginAttempts = mathutil.Min(passwordLocking.FailedLoginAttempts, math.MaxInt16) + passwordLocking.FailedLoginAttempts = min(passwordLocking.FailedLoginAttempts, math.MaxInt16) passwordLocking.FailedLoginAttempts = mathutil.Max(passwordLocking.FailedLoginAttempts, 0) passwordLocking.PasswordLockTimeDays, err = @@ -992,7 +992,7 @@ func (passwordLocking *PasswordLocking) ParseJSON(passwordLockingJSON types.Bina if err != nil { return err } - passwordLocking.PasswordLockTimeDays = mathutil.Min(passwordLocking.PasswordLockTimeDays, math.MaxInt16) + passwordLocking.PasswordLockTimeDays = min(passwordLocking.PasswordLockTimeDays, math.MaxInt16) passwordLocking.PasswordLockTimeDays = mathutil.Max(passwordLocking.PasswordLockTimeDays, -1) passwordLocking.FailedLoginCount, err = diff --git a/pkg/resourcemanager/pool/spool/BUILD.bazel b/pkg/resourcemanager/pool/spool/BUILD.bazel index 03bec05e729c0..6fa13aa8492ad 100644 --- a/pkg/resourcemanager/pool/spool/BUILD.bazel +++ b/pkg/resourcemanager/pool/spool/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//pkg/resourcemanager/poolmanager", "//pkg/resourcemanager/util", "//pkg/util/logutil", - "//pkg/util/mathutil", "@com_github_prometheus_client_golang//prometheus", "@com_github_sasha_s_go_deadlock//:go-deadlock", "@org_uber_go_zap//:zap", diff --git a/pkg/resourcemanager/pool/spool/spool.go b/pkg/resourcemanager/pool/spool/spool.go index 0982d1f85893f..699860de5db54 100644 --- a/pkg/resourcemanager/pool/spool/spool.go +++ b/pkg/resourcemanager/pool/spool/spool.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/tidb/pkg/resourcemanager/poolmanager" "github.com/pingcap/tidb/pkg/resourcemanager/util" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/prometheus/client_golang/prometheus" "github.com/sasha-s/go-deadlock" "go.uber.org/zap" @@ -197,7 +196,7 @@ func (p *Pool) checkAndAddRunningInternal(concurrency int32) (conc int32, run bo } // if concurrency is 1 , we must return a goroutine // if concurrency is more than 1, we must return at least one goroutine. - result := mathutil.Min(n, concurrency) + result := min(n, concurrency) p.running.Add(result) return result, true } diff --git a/pkg/session/BUILD.bazel b/pkg/session/BUILD.bazel index 2454f7028edfb..d653018239d85 100644 --- a/pkg/session/BUILD.bazel +++ b/pkg/session/BUILD.bazel @@ -85,7 +85,6 @@ go_library( "//pkg/util/kvcache", "//pkg/util/logutil", "//pkg/util/logutil/consistency", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/parser", "//pkg/util/sem", diff --git a/pkg/session/nontransactional.go b/pkg/session/nontransactional.go index 05cedbb096f34..b9f150529ce39 100644 --- a/pkg/session/nontransactional.go +++ b/pkg/session/nontransactional.go @@ -40,7 +40,6 @@ import ( "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" @@ -850,5 +849,5 @@ func buildExecuteResults(ctx context.Context, jobs []job, maxChunkSize int, reda zap.Int("num_failed_jobs", len(failedJobs)), zap.String("failed_jobs", errStr)) return nil, fmt.Errorf("%d/%d jobs failed in the non-transactional DML: %s, ...(more in logs)", - len(failedJobs), len(jobs), errStr[:mathutil.Min(500, len(errStr)-1)]) + len(failedJobs), len(jobs), errStr[:min(500, len(errStr)-1)]) } diff --git a/pkg/session/session.go b/pkg/session/session.go index 4fc30d8786929..45c9673b862c0 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -94,7 +94,6 @@ import ( "github.com/pingcap/tidb/pkg/util/kvcache" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/logutil/consistency" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sem" "github.com/pingcap/tidb/pkg/util/sli" @@ -1742,7 +1741,7 @@ func (s *session) ParseWithParams(ctx context.Context, sql string, args ...inter } if err != nil { s.rollbackOnError(ctx) - logSQL := sql[:mathutil.Min(500, len(sql))] + logSQL := sql[:min(500, len(sql))] if s.sessionVars.EnableRedactLog { logutil.Logger(ctx).Debug("parse SQL failed", zap.Error(err), zap.String("SQL", logSQL)) } else { diff --git a/pkg/store/copr/BUILD.bazel b/pkg/store/copr/BUILD.bazel index 9ad1a7250ccac..673cbffce8ade 100644 --- a/pkg/store/copr/BUILD.bazel +++ b/pkg/store/copr/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//pkg/util/execdetails", "//pkg/util/intest", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/paging", "//pkg/util/tiflash", diff --git a/pkg/store/copr/coprocessor.go b/pkg/store/copr/coprocessor.go index 3b3456be0f8e7..8f5f55444897c 100644 --- a/pkg/store/copr/coprocessor.go +++ b/pkg/store/copr/coprocessor.go @@ -48,7 +48,6 @@ import ( util2 "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/paging" "github.com/pingcap/tidb/pkg/util/tracing" @@ -377,7 +376,7 @@ func buildCopTasks(bo *Backoffer, ranges *KeyRanges, opt *buildCopTaskOpt) ([]*c pagingSize = req.Paging.MinPagingSize } for i := 0; i < rLen; { - nextI := mathutil.Min(i+rangesPerTaskLimit, rLen) + nextI := min(i+rangesPerTaskLimit, rLen) hint := -1 // calculate the row count hint if hints != nil { diff --git a/pkg/store/copr/mpp.go b/pkg/store/copr/mpp.go index cd0695a3e0d9d..8998a570f67d6 100644 --- a/pkg/store/copr/mpp.go +++ b/pkg/store/copr/mpp.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/tidb/pkg/store/driver/backoff" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/tiflash" "github.com/pingcap/tidb/pkg/util/tiflashcompute" "github.com/tikv/client-go/v2/tikv" @@ -174,7 +173,7 @@ func (c *MPPClient) DispatchMPPTask(param kv.DispatchMPPTaskParam) (resp *mpp.Di } if len(realResp.RetryRegions) > 0 { - logutil.BgLogger().Info("TiFlash found " + strconv.Itoa(len(realResp.RetryRegions)) + " stale regions. Only first " + strconv.Itoa(mathutil.Min(10, len(realResp.RetryRegions))) + " regions will be logged if the log level is higher than Debug") + logutil.BgLogger().Info("TiFlash found " + strconv.Itoa(len(realResp.RetryRegions)) + " stale regions. Only first " + strconv.Itoa(min(10, len(realResp.RetryRegions))) + " regions will be logged if the log level is higher than Debug") for index, retry := range realResp.RetryRegions { id := tikv.NewRegionVerID(retry.Id, retry.RegionEpoch.ConfVer, retry.RegionEpoch.Version) if index < 10 || log.GetLevel() <= zap.DebugLevel { diff --git a/pkg/store/copr/region_cache.go b/pkg/store/copr/region_cache.go index 14ee9fa8357b6..cf962d9abcd96 100644 --- a/pkg/store/copr/region_cache.go +++ b/pkg/store/copr/region_cache.go @@ -27,7 +27,6 @@ import ( derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/pingcap/tidb/pkg/store/driver/options" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/metrics" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" @@ -202,7 +201,7 @@ func (c *RegionCache) OnSendFailForBatchRegions(bo *Backoffer, store *tikv.Store logutil.Logger(bo.GetCtx()).Info("Should not reach here, OnSendFailForBatchRegions only support TiFlash") return } - logutil.Logger(bo.GetCtx()).Info("Send fail for " + strconv.Itoa(len(regionInfos)) + " regions, will switch region peer for these regions. Only first " + strconv.Itoa(mathutil.Min(10, len(regionInfos))) + " regions will be logged if the log level is higher than Debug") + logutil.Logger(bo.GetCtx()).Info("Send fail for " + strconv.Itoa(len(regionInfos)) + " regions, will switch region peer for these regions. Only first " + strconv.Itoa(min(10, len(regionInfos))) + " regions will be logged if the log level is higher than Debug") for index, ri := range regionInfos { if ri.Meta == nil { continue diff --git a/pkg/store/mockstore/unistore/tikv/BUILD.bazel b/pkg/store/mockstore/unistore/tikv/BUILD.bazel index 793f41ef45357..978b52db23886 100644 --- a/pkg/store/mockstore/unistore/tikv/BUILD.bazel +++ b/pkg/store/mockstore/unistore/tikv/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//pkg/tablecodec", "//pkg/types", "//pkg/util/codec", - "//pkg/util/mathutil", "//pkg/util/rowcodec", "@com_github_dgryski_go_farm//:go-farm", "@com_github_gogo_protobuf//proto", diff --git a/pkg/store/mockstore/unistore/tikv/write.go b/pkg/store/mockstore/unistore/tikv/write.go index 0d40dd4bb1c80..722eb6fa870d8 100644 --- a/pkg/store/mockstore/unistore/tikv/write.go +++ b/pkg/store/mockstore/unistore/tikv/write.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/tidb/pkg/store/mockstore/unistore/lockstore" "github.com/pingcap/tidb/pkg/store/mockstore/unistore/tikv/dbreader" "github.com/pingcap/tidb/pkg/store/mockstore/unistore/tikv/mvcc" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -332,7 +331,7 @@ func (writer *dbWriter) collectRangeKeys(it *badger.Iterator, startKey, endKey [ func (writer *dbWriter) deleteKeysInBatch(latchHandle mvcc.LatchHandle, keys []y.Key, batchSize int) error { for len(keys) > 0 { - batchSize := mathutil.Min(len(keys), batchSize) + batchSize := min(len(keys), batchSize) batchKeys := keys[:batchSize] keys = keys[batchSize:] hashVals := userKeysToHashVals(batchKeys...) diff --git a/pkg/testkit/testenv/BUILD.bazel b/pkg/testkit/testenv/BUILD.bazel index 970c503ebb732..abf9ab5e003f5 100644 --- a/pkg/testkit/testenv/BUILD.bazel +++ b/pkg/testkit/testenv/BUILD.bazel @@ -5,5 +5,4 @@ go_library( srcs = ["testenv.go"], importpath = "github.com/pingcap/tidb/pkg/testkit/testenv", visibility = ["//visibility:public"], - deps = ["//pkg/util/mathutil"], ) diff --git a/pkg/testkit/testenv/testenv.go b/pkg/testkit/testenv/testenv.go index f75668fd739cf..e0d2da4e856fb 100644 --- a/pkg/testkit/testenv/testenv.go +++ b/pkg/testkit/testenv/testenv.go @@ -16,11 +16,9 @@ package testenv import ( "runtime" - - "github.com/pingcap/tidb/pkg/util/mathutil" ) // SetGOMAXPROCSForTest sets GOMAXPROCS to 16 if it is greater than 16. func SetGOMAXPROCSForTest() { - runtime.GOMAXPROCS(mathutil.Min(16, runtime.GOMAXPROCS(0))) + runtime.GOMAXPROCS(min(16, runtime.GOMAXPROCS(0))) } diff --git a/pkg/ttl/cache/BUILD.bazel b/pkg/ttl/cache/BUILD.bazel index 839c53f1822b2..a9e6b97887d9f 100644 --- a/pkg/ttl/cache/BUILD.bazel +++ b/pkg/ttl/cache/BUILD.bazel @@ -26,7 +26,6 @@ go_library( "//pkg/util/chunk", "//pkg/util/codec", "//pkg/util/logutil", - "//pkg/util/mathutil", "@com_github_pingcap_errors//:errors", "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_zap//:zap", diff --git a/pkg/ttl/cache/table.go b/pkg/ttl/cache/table.go index 1e69faef9d34b..8be33494900b0 100644 --- a/pkg/ttl/cache/table.go +++ b/pkg/ttl/cache/table.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/tikv" ) @@ -341,7 +340,7 @@ func (t *PhysicalTable) splitRawKeyRanges(ctx context.Context, store tikv.Storag regionsPerRange := len(regionIDs) / splitCnt oversizeCnt := len(regionIDs) % splitCnt - ranges := make([]kv.KeyRange, 0, mathutil.Min(len(regionIDs), splitCnt)) + ranges := make([]kv.KeyRange, 0, min(len(regionIDs), splitCnt)) for len(regionIDs) > 0 { startRegion, err := regionCache.LocateRegionByID(tikv.NewBackofferWithVars(ctx, 20000, nil), regionIDs[0]) diff --git a/pkg/types/datum_eval.go b/pkg/types/datum_eval.go index cb3c0a87148ed..204a873e0e0ad 100644 --- a/pkg/types/datum_eval.go +++ b/pkg/types/datum_eval.go @@ -17,7 +17,6 @@ package types import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/opcode" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // ComputePlus computes the result of a+b. @@ -56,7 +55,7 @@ func ComputePlus(a, b Datum) (d Datum, err error) { r := new(MyDecimal) err = DecimalAdd(a.GetMysqlDecimal(), b.GetMysqlDecimal(), r) d.SetMysqlDecimal(r) - d.SetFrac(mathutil.Max(a.Frac(), b.Frac())) + d.SetFrac(max(a.Frac(), b.Frac())) return d, err } } diff --git a/pkg/types/mydecimal.go b/pkg/types/mydecimal.go index baca38fef4443..f2127bf4fbfe2 100644 --- a/pkg/types/mydecimal.go +++ b/pkg/types/mydecimal.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -354,7 +353,7 @@ func (d *MyDecimal) ToString() (str []byte) { for ; digitsFrac > 0; digitsFrac -= digitsPerWord { x := d.wordBuf[wordIdx] wordIdx++ - for i := mathutil.Min(digitsFrac, digitsPerWord); i > 0; i-- { + for i := min(digitsFrac, digitsPerWord); i > 0; i-- { y := x / digMask str[fracIdx] = byte(y) + '0' fracIdx++ @@ -381,7 +380,7 @@ func (d *MyDecimal) ToString() (str []byte) { for ; digitsInt > 0; digitsInt -= digitsPerWord { wordIdx-- x := d.wordBuf[wordIdx] - for i := mathutil.Min(digitsInt, digitsPerWord); i > 0; i-- { + for i := min(digitsInt, digitsPerWord); i > 0; i-- { y := x / 10 strIdx-- str[strIdx] = '0' + byte(x-y*10) @@ -841,7 +840,7 @@ func (d *MyDecimal) Round(to *MyDecimal, frac int, roundMode RoundMode) (err err if to != d { copy(to.wordBuf[:], d.wordBuf[:]) to.negative = d.negative - to.digitsInt = int8(mathutil.Min(wordsInt, wordBufLen) * digitsPerWord) + to.digitsInt = int8(min(wordsInt, wordBufLen) * digitsPerWord) } if wordsFracTo > wordsFrac { idx := wordsInt + wordsFrac @@ -942,7 +941,7 @@ func (d *MyDecimal) Round(to *MyDecimal, frac int, roundMode RoundMode) (err err frac = wordsFracTo * digitsPerWord err = ErrTruncated } - for toIdx = wordsInt + mathutil.Max(wordsFracTo, 0); toIdx > 0; toIdx-- { + for toIdx = wordsInt + max(wordsFracTo, 0); toIdx > 0; toIdx-- { if toIdx < wordBufLen { to.wordBuf[toIdx] = to.wordBuf[toIdx-1] } else { @@ -966,7 +965,7 @@ func (d *MyDecimal) Round(to *MyDecimal, frac int, roundMode RoundMode) (err err /* making 'zero' with the proper scale */ idx := wordsFracTo + 1 to.digitsInt = 1 - to.digitsFrac = int8(mathutil.Max(frac, 0)) + to.digitsFrac = int8(max(frac, 0)) to.negative = false for toIdx < idx { to.wordBuf[toIdx] = 0 @@ -1603,7 +1602,7 @@ func DecimalNeg(from *MyDecimal) *MyDecimal { // of `to` may be changed during evaluating. func DecimalAdd(from1, from2, to *MyDecimal) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac) + to.resultFrac = max(from1.resultFrac, from2.resultFrac) if from1.negative == from2.negative { return doAdd(from1, from2, to) } @@ -1614,7 +1613,7 @@ func DecimalAdd(from1, from2, to *MyDecimal) error { // DecimalSub subs one decimal from another, sets the result to 'to'. func DecimalSub(from1, from2, to *MyDecimal) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac) + to.resultFrac = max(from1.resultFrac, from2.resultFrac) if from1.negative == from2.negative { _, err := doSub(from1, from2, to) return err @@ -1650,7 +1649,7 @@ func doSub(from1, from2, to *MyDecimal) (cmp int, err error) { wordsFrac1 = digitsToWords(int(from1.digitsFrac)) wordsInt2 = digitsToWords(int(from2.digitsInt)) wordsFrac2 = digitsToWords(int(from2.digitsFrac)) - wordsFracTo = mathutil.Max(wordsFrac1, wordsFrac2) + wordsFracTo = max(wordsFrac1, wordsFrac2) start1 = 0 stop1 = wordsInt1 @@ -1815,8 +1814,8 @@ func doAdd(from1, from2, to *MyDecimal) error { wordsFrac1 = digitsToWords(int(from1.digitsFrac)) wordsInt2 = digitsToWords(int(from2.digitsInt)) wordsFrac2 = digitsToWords(int(from2.digitsFrac)) - wordsIntTo = mathutil.Max(wordsInt1, wordsInt2) - wordsFracTo = mathutil.Max(wordsFrac1, wordsFrac2) + wordsIntTo = max(wordsInt1, wordsInt2) + wordsFracTo = max(wordsFrac1, wordsFrac2) ) var x int32 @@ -1840,7 +1839,7 @@ func doAdd(from1, from2, to *MyDecimal) error { idxTo := wordsIntTo + wordsFracTo to.negative = from1.negative to.digitsInt = int8(wordsIntTo * digitsPerWord) - to.digitsFrac = mathutil.Max(from1.digitsFrac, from2.digitsFrac) + to.digitsFrac = max(from1.digitsFrac, from2.digitsFrac) if err != nil { if to.digitsFrac > int8(wordsFracTo*digitsPerWord) { @@ -1978,7 +1977,7 @@ func DecimalMul(from1, from2, to *MyDecimal) error { tmp1 = wordsIntTo tmp2 = wordsFracTo ) - to.resultFrac = mathutil.Min(from1.resultFrac+from2.resultFrac, mysql.MaxDecimalScale) + to.resultFrac = min(from1.resultFrac+from2.resultFrac, mysql.MaxDecimalScale) wordsIntTo, wordsFracTo, err = fixWordCntError(wordsIntTo, wordsFracTo) to.negative = from1.negative != from2.negative to.digitsFrac = from1.digitsFrac + from2.digitsFrac @@ -2093,7 +2092,7 @@ func DecimalMul(from1, from2, to *MyDecimal) error { // fracIncr - increment of fraction func DecimalDiv(from1, from2, to *MyDecimal, fracIncr int) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Min(from1.resultFrac+int8(fracIncr), mysql.MaxDecimalScale) + to.resultFrac = min(from1.resultFrac+int8(fracIncr), mysql.MaxDecimalScale) return doDivMod(from1, from2, to, nil, fracIncr) } @@ -2123,7 +2122,7 @@ DecimalMod does modulus of two decimals. */ func DecimalMod(from1, from2, to *MyDecimal) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac) + to.resultFrac = max(from1.resultFrac, from2.resultFrac) return doDivMod(from1, from2, nil, to, 0) } @@ -2191,7 +2190,7 @@ func doDivMod(from1, from2, to, mod *MyDecimal, fracIncr int) error { // digitsFrac=max(frac1, frac2), as for subtraction // digitsInt=from2.digitsInt to.negative = from1.negative - to.digitsFrac = mathutil.Max(from1.digitsFrac, from2.digitsFrac) + to.digitsFrac = max(from1.digitsFrac, from2.digitsFrac) } else { wordsFracTo = digitsToWords(frac1 + frac2 + fracIncr) wordsIntTo, wordsFracTo, err = fixWordCntError(wordsIntTo, wordsFracTo) @@ -2356,7 +2355,7 @@ func doDivMod(from1, from2, to, mod *MyDecimal, fracIncr int) error { return ErrOverflow } stop1 = start1 + wordsIntTo + wordsFracTo - to.digitsInt = int8(mathutil.Min(wordsIntTo*digitsPerWord, int(from2.digitsInt))) + to.digitsInt = int8(min(wordsIntTo*digitsPerWord, int(from2.digitsInt))) } if wordsIntTo+wordsFracTo > wordBufLen { stop1 -= wordsIntTo + wordsFracTo - wordBufLen diff --git a/pkg/types/time.go b/pkg/types/time.go index 012f3c4e27df0..35e11aacacc1c 100644 --- a/pkg/types/time.go +++ b/pkg/types/time.go @@ -2681,7 +2681,7 @@ func ParseTimeFromDecimal(sc *stmtctx.StatementContext, dec *MyDecimal) (t Time, if err != nil && !terror.ErrorEqual(err, ErrTruncated) { return ZeroTime, err } - fsp := mathutil.Min(MaxFsp, int(dec.GetDigitsFrac())) + fsp := min(MaxFsp, int(dec.GetDigitsFrac())) t, err = parseDateTimeFromNum(sc, intPart) if err != nil { return ZeroTime, err diff --git a/pkg/util/chunk/BUILD.bazel b/pkg/util/chunk/BUILD.bazel index e8ee18cba64ea..8aa1fa4f47f36 100644 --- a/pkg/util/chunk/BUILD.bazel +++ b/pkg/util/chunk/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//pkg/util/encrypt", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", @@ -67,7 +66,6 @@ go_test( "//pkg/testkit/testsetup", "//pkg/types", "//pkg/util/collate", - "//pkg/util/mathutil", "//pkg/util/memory", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/util/chunk/alloc.go b/pkg/util/chunk/alloc.go index 306ca6283897a..ac40309a88c7c 100644 --- a/pkg/util/chunk/alloc.go +++ b/pkg/util/chunk/alloc.go @@ -18,7 +18,6 @@ import ( "math" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // Allocator is an interface defined to reduce object allocation. @@ -101,7 +100,7 @@ func (a *allocator) Alloc(fields []*types.FieldType, capacity, maxChunkSize int) } // Init the chunk fields. - chk.capacity = mathutil.Min(capacity, maxChunkSize) + chk.capacity = min(capacity, maxChunkSize) chk.requiredRows = maxChunkSize // Allocate the chunk columns from the pool column allocator. for _, f := range fields { diff --git a/pkg/util/chunk/chunk.go b/pkg/util/chunk/chunk.go index db1539ed187af..6242de8450633 100644 --- a/pkg/util/chunk/chunk.go +++ b/pkg/util/chunk/chunk.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) var msgErrSelNotNil = "The selection vector of Chunk is not nil. Please file a bug to the TiDB Team" @@ -67,7 +66,7 @@ func NewChunkWithCapacity(fields []*types.FieldType, capacity int) *Chunk { func New(fields []*types.FieldType, capacity, maxChunkSize int) *Chunk { chk := &Chunk{ columns: make([]*Column, 0, len(fields)), - capacity: mathutil.Min(capacity, maxChunkSize), + capacity: min(capacity, maxChunkSize), // set the default value of requiredRows to maxChunkSize to let chk.IsFull() behave // like how we judge whether a chunk is full now, then the statement // "chk.NumRows() < maxChunkSize" @@ -325,7 +324,7 @@ func reCalcCapacity(c *Chunk, maxChunkSize int) int { if newCapacity == 0 { newCapacity = InitialCapacity } - return mathutil.Min(newCapacity, maxChunkSize) + return min(newCapacity, maxChunkSize) } // Capacity returns the capacity of the Chunk. diff --git a/pkg/util/chunk/chunk_test.go b/pkg/util/chunk/chunk_test.go index 2484f751b472a..4bbb3bc723ed0 100644 --- a/pkg/util/chunk/chunk_test.go +++ b/pkg/util/chunk/chunk_test.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/require" ) @@ -284,7 +283,7 @@ func TestChunkSizeControl(t *testing.T) { chk.Reset() for i := 1; i < maxChunkSize*2; i++ { chk.SetRequiredRows(i, maxChunkSize) - require.Equal(t, mathutil.Min(maxChunkSize, i), chk.RequiredRows()) + require.Equal(t, min(maxChunkSize, i), chk.RequiredRows()) } chk.SetRequiredRows(1, maxChunkSize). diff --git a/pkg/util/chunk/codec.go b/pkg/util/chunk/codec.go index 3a64e48476e6f..324a90c255a25 100644 --- a/pkg/util/chunk/codec.go +++ b/pkg/util/chunk/codec.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // Codec is used to: @@ -152,7 +151,7 @@ func (*Codec) setAllNotNull(col *Column) { numNullBitmapBytes := (col.length + 7) / 8 col.nullBitmap = col.nullBitmap[:0] for i := 0; i < numNullBitmapBytes; { - numAppendBytes := mathutil.Min(numNullBitmapBytes-i, cap(allNotNullBitmap)) + numAppendBytes := min(numNullBitmapBytes-i, cap(allNotNullBitmap)) col.nullBitmap = append(col.nullBitmap, allNotNullBitmap[:numAppendBytes]...) i += numAppendBytes } diff --git a/pkg/util/chunk/list_test.go b/pkg/util/chunk/list_test.go index df6c29fc3591b..106b9ece6f438 100644 --- a/pkg/util/chunk/list_test.go +++ b/pkg/util/chunk/list_test.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/require" ) @@ -164,7 +163,7 @@ func BenchmarkListGetRow(b *testing.B) { } rand.Seed(0) ptrs := make([]RowPtr, 0, b.N) - for i := 0; i < mathutil.Min(b.N, 10000); i++ { + for i := 0; i < min(b.N, 10000); i++ { ptrs = append(ptrs, RowPtr{ ChkIdx: rand.Uint32() % uint32(numChk), RowIdx: rand.Uint32() % uint32(numRow), diff --git a/pkg/util/chunk/row_in_disk_test.go b/pkg/util/chunk/row_in_disk_test.go index 933a30834a779..1c27456a94ea3 100644 --- a/pkg/util/chunk/row_in_disk_test.go +++ b/pkg/util/chunk/row_in_disk_test.go @@ -30,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -121,7 +120,7 @@ func BenchmarkDataInDiskByRowsGetRow(b *testing.B) { } rand.Seed(0) ptrs := make([]RowPtr, 0, b.N) - for i := 0; i < mathutil.Min(b.N, 10000); i++ { + for i := 0; i < min(b.N, 10000); i++ { ptrs = append(ptrs, RowPtr{ ChkIdx: rand.Uint32() % uint32(numChk), RowIdx: rand.Uint32() % uint32(numRow), diff --git a/pkg/util/memory/BUILD.bazel b/pkg/util/memory/BUILD.bazel index 53f33e50054ec..565b1a6567c74 100644 --- a/pkg/util/memory/BUILD.bazel +++ b/pkg/util/memory/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "//pkg/util/cgroup", "//pkg/util/dbterror", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/sqlkiller", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_sysutil//:sysutil", @@ -41,7 +40,6 @@ go_test( "//pkg/errno", "//pkg/parser/terror", "//pkg/testkit/testsetup", - "//pkg/util/mathutil", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], diff --git a/pkg/util/memory/meminfo.go b/pkg/util/memory/meminfo.go index 8d0e25bcb759e..a9d6aa3733d1b 100644 --- a/pkg/util/memory/meminfo.go +++ b/pkg/util/memory/meminfo.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/sysutil" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/util/cgroup" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/shirou/gopsutil/v3/mem" ) @@ -116,7 +115,7 @@ func MemTotalCGroup() (uint64, error) { if err != nil { return 0, err } - memo = mathutil.Min(v.Total, memo) + memo = min(v.Total, memo) memLimit.set(memo, time.Now()) return memo, nil } @@ -135,7 +134,7 @@ func MemUsedCGroup() (uint64, error) { if err != nil { return 0, err } - memo = mathutil.Min(v.Used, memo) + memo = min(v.Used, memo) memUsage.set(memo, time.Now()) return memo, nil } diff --git a/pkg/util/memory/tracker_test.go b/pkg/util/memory/tracker_test.go index ce6d3e3856031..c244b20ad28d2 100644 --- a/pkg/util/memory/tracker_test.go +++ b/pkg/util/memory/tracker_test.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/require" ) @@ -401,7 +400,7 @@ func TestMaxConsumed(t *testing.T) { } consumed += b tracker.Consume(b) - maxConsumed = mathutil.Max(maxConsumed, consumed) + maxConsumed = max(maxConsumed, consumed) require.Equal(t, consumed, r.BytesConsumed()) require.Equal(t, maxConsumed, r.MaxConsumed()) diff --git a/pkg/util/ranger/BUILD.bazel b/pkg/util/ranger/BUILD.bazel index b0e45e7cfca1d..935274ab75cf0 100644 --- a/pkg/util/ranger/BUILD.bazel +++ b/pkg/util/ranger/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//pkg/util/codec", "//pkg/util/collate", "//pkg/util/dbterror", - "//pkg/util/mathutil", "@com_github_pingcap_errors//:errors", ], ) diff --git a/pkg/util/ranger/detacher.go b/pkg/util/ranger/detacher.go index 954bbcee5254e..071f78a8925c3 100644 --- a/pkg/util/ranger/detacher.go +++ b/pkg/util/ranger/detacher.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // detachColumnCNFConditions detaches the condition for calculating range from the other conditions. @@ -207,8 +206,8 @@ func getCNFItemRangeResult(sctx sessionctx.Context, rangeResult *DetachRangeResu maxColNum = len(ran.LowVal) minColNum = len(ran.LowVal) } else { - maxColNum = mathutil.Max(maxColNum, len(ran.LowVal)) - minColNum = mathutil.Min(minColNum, len(ran.LowVal)) + maxColNum = max(maxColNum, len(ran.LowVal)) + minColNum = min(minColNum, len(ran.LowVal)) } } if minColNum != maxColNum { From d9d5b9c3972ceaeac2e670e5b5ce407fe5e8ed95 Mon Sep 17 00:00:00 2001 From: Rustin Liu Date: Wed, 25 Oct 2023 21:03:34 +0800 Subject: [PATCH 05/33] statistic: fix panic when building topN (#47928) close pingcap/tidb#35948 --- pkg/statistics/builder.go | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/pkg/statistics/builder.go b/pkg/statistics/builder.go index 6812015018ef7..363c023a579a9 100644 --- a/pkg/statistics/builder.go +++ b/pkg/statistics/builder.go @@ -24,7 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" + "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" + "go.uber.org/zap" ) // SortedBuilder is used to build histograms for PK and index. @@ -373,12 +375,46 @@ func BuildHistAndTopN( if err != nil { return nil, nil, errors.Trace(err) } + // For debugging invalid sample data. + var ( + foundTwice bool + firstTimeSample types.Datum + ) for j := 0; j < len(topNList); j++ { if bytes.Equal(sampleBytes, topNList[j].Encoded) { - // find the same value in topn: need to skip over this value in samples + // This should never happen, but we met this panic before, so we add this check here. + // See: https://github.com/pingcap/tidb/issues/35948 + if foundTwice { + datumString, err := firstTimeSample.ToString() + if err != nil { + logutil.BgLogger().With( + zap.String("category", "stats"), + ).Error("try to convert datum to string failed", zap.Error(err)) + } + + logutil.BgLogger().With( + zap.String("category", "stats"), + ).Warn( + "invalid sample data", + zap.Bool("isColumn", isColumn), + zap.Int64("columnID", id), + zap.String("datum", datumString), + zap.Binary("sampleBytes", sampleBytes), + zap.Binary("topNBytes", topNList[j].Encoded), + ) + // NOTE: if we don't return here, we may meet panic in the following code. + // The i may decrease to a negative value. + // We haven't fix the issue here, because we don't know how to + // remove the invalid sample data from the samples. + break + } + // First time to find the same value in topN: need to record the sample data for debugging. + firstTimeSample = samples[i].Value + // Found the same value in topn: need to skip over this value in samples. copy(samples[i:], samples[uint64(i)+topNList[j].Count:]) samples = samples[:uint64(len(samples))-topNList[j].Count] i-- + foundTwice = true continue } } From 3a3a7d1c3c221e59542f18871851e6fb58703d56 Mon Sep 17 00:00:00 2001 From: bagechengzi <89894288+bagechengzi@users.noreply.github.com> Date: Thu, 26 Oct 2023 08:57:34 +0800 Subject: [PATCH 06/33] enhancement: Modify the BuildLogicalPlanForTest function (#47988) close pingcap/tidb#47974 --- pkg/executor/brie_test.go | 2 +- .../integration_test/integration_test.go | 2 +- pkg/expression/typeinfer_test.go | 2 +- pkg/planner/cardinality/selectivity_test.go | 6 +-- pkg/planner/cardinality/trace_test.go | 2 +- pkg/planner/cascades/optimize_test.go | 10 ++-- pkg/planner/cascades/stringer_test.go | 2 +- .../cascades/transformation_rules_test.go | 4 +- pkg/planner/core/logical_plans_test.go | 54 +++++++++---------- pkg/planner/core/optimizer.go | 6 +-- .../rule_generate_column_substitute_test.go | 6 +-- pkg/planner/memo/group_test.go | 6 +-- pkg/util/ranger/bench_test.go | 2 +- pkg/util/ranger/ranger_test.go | 22 ++++---- 14 files changed, 63 insertions(+), 63 deletions(-) diff --git a/pkg/executor/brie_test.go b/pkg/executor/brie_test.go index 0fbe566d02422..c63bb33ea414e 100644 --- a/pkg/executor/brie_test.go +++ b/pkg/executor/brie_test.go @@ -78,7 +78,7 @@ func TestFetchShowBRIE(t *testing.T) { p.SetParserConfig(parser.ParserConfig{EnableWindowFunction: true, EnableStrictDoubleTypeCheck: true}) stmt, err := p.ParseOneStmt("show backups", "", "") require.NoError(t, err) - plan, _, err := core.BuildLogicalPlanForTest(ctx, sctx, stmt, infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable(), core.MockView()})) + plan, err := core.BuildLogicalPlanForTest(ctx, sctx, stmt, infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable(), core.MockView()})) require.NoError(t, err) schema := plan.Schema() diff --git a/pkg/expression/integration_test/integration_test.go b/pkg/expression/integration_test/integration_test.go index fa40790a7b78d..2c78919fdc2ef 100644 --- a/pkg/expression/integration_test/integration_test.go +++ b/pkg/expression/integration_test/integration_test.go @@ -422,7 +422,7 @@ func TestFilterExtractFromDNF(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err, "error %v, for resolve name, expr %s", err, tt.exprStr) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err, "error %v, for build plan, expr %s", err, tt.exprStr) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) conds := make([]expression.Expression, len(selection.Conditions)) diff --git a/pkg/expression/typeinfer_test.go b/pkg/expression/typeinfer_test.go index f2cff3c08f9fc..8166f7488ddd4 100644 --- a/pkg/expression/typeinfer_test.go +++ b/pkg/expression/typeinfer_test.go @@ -129,7 +129,7 @@ func TestInferType(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmt, plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err, comment) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmt, ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmt, ret.InfoSchema) require.NoError(t, err, comment) tp := p.Schema().Columns[0].RetType require.Equal(t, tt.tp, tp.GetType(), comment) diff --git a/pkg/planner/cardinality/selectivity_test.go b/pkg/planner/cardinality/selectivity_test.go index 2706e1d89e6da..9c254512bc275 100644 --- a/pkg/planner/cardinality/selectivity_test.go +++ b/pkg/planner/cardinality/selectivity_test.go @@ -88,7 +88,7 @@ func BenchmarkSelectivity(b *testing.B) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoErrorf(b, err, "for %s", exprs) - p, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmts[0], ret.InfoSchema) require.NoErrorf(b, err, "error %v, for building plan, expr %s", err, exprs) file, err := os.Create("cpu.profile") @@ -446,7 +446,7 @@ func TestSelectivity(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoErrorf(t, err, "for expr %s", tt.exprs) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoErrorf(t, err, "for building plan, expr %s", err, tt.exprs) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) @@ -504,7 +504,7 @@ func TestDNFCondSelectivity(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoErrorf(t, err, "error %v, for sql %s", err, tt) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoErrorf(t, err, "error %v, for building plan, sql %s", err, tt) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) diff --git a/pkg/planner/cardinality/trace_test.go b/pkg/planner/cardinality/trace_test.go index 07b8cbbe8149c..186aae1a45202 100644 --- a/pkg/planner/cardinality/trace_test.go +++ b/pkg/planner/cardinality/trace_test.go @@ -205,7 +205,7 @@ func TestTraceDebugSelectivity(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmt, plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmt, ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmt, ret.InfoSchema) require.NoError(t, err) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) diff --git a/pkg/planner/cascades/optimize_test.go b/pkg/planner/cascades/optimize_test.go index 940cc2542727e..977405b4f2540 100644 --- a/pkg/planner/cascades/optimize_test.go +++ b/pkg/planner/cascades/optimize_test.go @@ -42,7 +42,7 @@ func TestImplGroupZeroCost(t *testing.T) { stmt, err := p.ParseOneStmt("select t1.a, t2.a from t as t1 left join t as t2 on t1.a = t2.a where t1.a < 1.0", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -69,7 +69,7 @@ func TestInitGroupSchema(t *testing.T) { stmt, err := p.ParseOneStmt("select a from t", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -94,7 +94,7 @@ func TestFillGroupStats(t *testing.T) { stmt, err := p.ParseOneStmt("select * from t t1 join t t2 on t1.a = t2.a", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -128,7 +128,7 @@ func TestPreparePossibleProperties(t *testing.T) { stmt, err := p.ParseOneStmt("select f, sum(a) from t group by f", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -225,7 +225,7 @@ func TestAppliedRuleSet(t *testing.T) { stmt, err := p.ParseOneStmt("select 1", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) diff --git a/pkg/planner/cascades/stringer_test.go b/pkg/planner/cascades/stringer_test.go index 1356984d54e92..14359e6de8cf1 100644 --- a/pkg/planner/cascades/stringer_test.go +++ b/pkg/planner/cascades/stringer_test.go @@ -61,7 +61,7 @@ func TestGroupStringer(t *testing.T) { stmt, err := p.ParseOneStmt(sql, "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) diff --git a/pkg/planner/cascades/transformation_rules_test.go b/pkg/planner/cascades/transformation_rules_test.go index a6669629ab25d..44af39a46d000 100644 --- a/pkg/planner/cascades/transformation_rules_test.go +++ b/pkg/planner/cascades/transformation_rules_test.go @@ -44,7 +44,7 @@ func testGroupToString(t *testing.T, input []string, output []struct { stmt, err := p.ParseOneStmt(sql, "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -96,7 +96,7 @@ func TestAggPushDownGather(t *testing.T) { stmt, err := p.ParseOneStmt(sql, "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) diff --git a/pkg/planner/core/logical_plans_test.go b/pkg/planner/core/logical_plans_test.go index 82283419f6012..87c6ca25ea925 100644 --- a/pkg/planner/core/logical_plans_test.go +++ b/pkg/planner/core/logical_plans_test.go @@ -115,7 +115,7 @@ func TestPredicatePushDown(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err) @@ -135,7 +135,7 @@ func TestImplicitCastNotNullFlag(t *testing.T) { defer s.Close() stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder|flagPrunColumns|flagEliminateProjection, p.(LogicalPlan)) require.NoError(t, err) @@ -153,7 +153,7 @@ func TestEliminateProjectionUnderUnion(t *testing.T) { defer s.Close() stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder|flagPrunColumns|flagEliminateProjection, p.(LogicalPlan)) require.NoError(t, err) @@ -180,7 +180,7 @@ func TestJoinPredicatePushDown(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -220,7 +220,7 @@ func TestOuterWherePredicatePushDown(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -266,7 +266,7 @@ func TestSimplifyOuterJoin(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -307,7 +307,7 @@ func TestAntiSemiJoinConstFalse(t *testing.T) { comment := fmt.Sprintf("for %s", ca.sql) stmt, err := s.p.ParseOneStmt(ca.sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagDecorrelate|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -335,7 +335,7 @@ func TestDeriveNotNullConds(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagDecorrelate, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -364,7 +364,7 @@ func TestExtraPKNotNullFlag(t *testing.T) { comment := fmt.Sprintf("for %s", sql) stmt, err := s.p.ParseOneStmt(sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) ds := p.(*LogicalProjection).children[0].(*LogicalAggregation).children[0].(*DataSource) require.Equal(t, "_tidb_rowid", ds.Columns[2].Name.L) @@ -385,7 +385,7 @@ func buildLogicPlan4GroupBy(s *plannerSuite, t *testing.T, sql string) (Plan, er stmt.(*ast.SelectStmt).From.TableRefs.Left.(*ast.TableSource).Source.(*ast.TableName).TableInfo = mockedTableInfo - p, _, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) return p, err } @@ -447,7 +447,7 @@ func TestDupRandJoinCondsPushDown(t *testing.T) { defer s.Close() stmt, err := s.p.ParseOneStmt(sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -517,7 +517,7 @@ func TestTablePartition(t *testing.T) { testdata.OnRecord(func() { }) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, isChoices[ca.IsIdx]) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, isChoices[ca.IsIdx]) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain|flagPredicatePushDown|flagPartitionProcessor, p.(LogicalPlan)) require.NoError(t, err) @@ -543,7 +543,7 @@ func TestSubquery(t *testing.T) { err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) if lp, ok := p.(LogicalPlan); ok { p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain|flagSemiJoinRewrite, lp) @@ -572,7 +572,7 @@ func TestPlanBuilder(t *testing.T) { s.ctx.GetSessionVars().SetHashJoinConcurrency(1) err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) if lp, ok := p.(LogicalPlan); ok { p, err = logicalOptimize(context.TODO(), flagPrunColumns|flagPrunColumnsAgain, lp) @@ -597,7 +597,7 @@ func TestJoinReOrder(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder, p.(LogicalPlan)) require.NoError(t, err) @@ -626,7 +626,7 @@ func TestEagerAggregation(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagPushDownAgg, p.(LogicalPlan)) require.NoError(t, err) @@ -652,7 +652,7 @@ func TestColumnPruning(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) lp, err := logicalOptimize(ctx, flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err) @@ -681,7 +681,7 @@ func TestSortByItemsPruning(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) lp, err := logicalOptimize(ctx, flagEliminateProjection|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err) @@ -711,7 +711,7 @@ func TestProjectionEliminator(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt.sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPrunColumns|flagPrunColumnsAgain|flagEliminateProjection, p.(LogicalPlan)) require.NoError(t, err) @@ -725,7 +725,7 @@ func TestCS3389(t *testing.T) { ctx := context.Background() stmt, err := s.p.ParseOneStmt("select count(*) from t where a in (select b from t2 where a is null);", "", "") require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPrunColumns|flagPrunColumnsAgain|flagEliminateProjection|flagJoinReOrder, p.(LogicalPlan)) require.NoError(t, err) @@ -979,7 +979,7 @@ func TestValidate(t *testing.T) { require.NoError(t, err, comment) err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) - _, _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) if tt.err == nil { require.NoError(t, err, comment) } else { @@ -1031,7 +1031,7 @@ func TestUniqueKeyInfo(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) lp, err := logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo, p.(LogicalPlan)) require.NoError(t, err) @@ -1054,7 +1054,7 @@ func TestAggPrune(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) domain.GetDomain(s.ctx).MockInfoCacheAndLoadInfoSchema(s.is) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagBuildKeyInfo|flagEliminateAgg|flagEliminateProjection, p.(LogicalPlan)) @@ -1666,7 +1666,7 @@ func TestNameResolver(t *testing.T) { require.NoError(t, err, comment) s.ctx.GetSessionVars().SetHashJoinConcurrency(1) - _, _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) if test.err == "" { require.NoError(t, err) } else { @@ -2158,7 +2158,7 @@ func TestResolvingCorrelatedAggregate(t *testing.T) { require.NoError(t, err, comment) err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagEliminateProjection|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -2241,7 +2241,7 @@ func TestWindowLogicalPlanAmbiguous(t *testing.T) { for i := 0; i < iterations; i++ { stmt, err := s.p.ParseOneStmt(sql, "", "") require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) require.NoError(t, err) if planString == "" { planString = ToString(p) @@ -2282,7 +2282,7 @@ func TestRemoveOrderbyInSubquery(t *testing.T) { comment := fmt.Sprintf("case:%v sql:%s", i, tt.sql) stmt, err := s.p.ParseOneStmt(tt.sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) require.Equal(t, tt.best, ToString(p), comment) } diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index 74786afb6b166..684ed39b616b2 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -172,15 +172,15 @@ type logicalOptRule interface { } // BuildLogicalPlanForTest builds a logical plan for testing purpose from ast.Node. -func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node ast.Node, infoSchema infoschema.InfoSchema) (Plan, types.NameSlice, error) { +func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node ast.Node, infoSchema infoschema.InfoSchema) (Plan, error) { sctx.GetSessionVars().PlanID.Store(0) sctx.GetSessionVars().PlanColumnID.Store(0) builder, _ := NewPlanBuilder().Init(sctx, infoSchema, &utilhint.BlockHintProcessor{}) p, err := builder.Build(ctx, node) if err != nil { - return nil, nil, err + return nil, err } - return p, p.OutputNames(), err + return p, err } // CheckPrivilege checks the privilege for a user. diff --git a/pkg/planner/core/rule_generate_column_substitute_test.go b/pkg/planner/core/rule_generate_column_substitute_test.go index 385c8fb463f8f..bf6c322c684ca 100644 --- a/pkg/planner/core/rule_generate_column_substitute_test.go +++ b/pkg/planner/core/rule_generate_column_substitute_test.go @@ -97,7 +97,7 @@ import ( // . . 86: fmt.Println(sql) // . . 87: stmt, err := s.GetParser().ParseOneStmt(sql, "", "") // . . 88: require.NoError(b, err, sql) -// . 512.01kB 89: p, _, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) +// . 512.01kB 89: p, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) // . . 90: require.NoError(b, err) // . . 91: selection := p.(core.LogicalPlan).Children()[0] // . . 92: m := make(core.ExprColumnMap, len(selection.Schema().Columns)) @@ -187,7 +187,7 @@ import ( // . . 86: fmt.Println(sql) // . . 87: stmt, err := s.GetParser().ParseOneStmt(sql, "", "") // . . 88: require.NoError(b, err, sql) -// . 512.07kB 89: p, _, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) +// . 512.07kB 89: p, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) // . . 90: require.NoError(b, err) // . . 91: selection := p.(core.LogicalPlan).Children()[0] // . . 92: m := make(core.ExprColumnMap, len(selection.Schema().Columns)) @@ -262,7 +262,7 @@ func BenchmarkSubstituteExpression(b *testing.B) { fmt.Println(sql) stmt, err := s.GetParser().ParseOneStmt(sql, "", "") require.NoError(b, err, sql) - p, _, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) + p, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) require.NoError(b, err) selection := p.(core.LogicalPlan).Children()[0] m := make(core.ExprColumnMap, len(selection.Schema().Columns)) diff --git a/pkg/planner/memo/group_test.go b/pkg/planner/memo/group_test.go index 12dd4ca913192..69958e8ec0398 100644 --- a/pkg/planner/memo/group_test.go +++ b/pkg/planner/memo/group_test.go @@ -104,7 +104,7 @@ func TestGroupFingerPrint(t *testing.T) { do := domain.GetDomain(ctx) do.StatsHandle().Close() }() - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) require.NoError(t, err) logic1, ok := plan.(plannercore.LogicalPlan) require.True(t, ok) @@ -259,7 +259,7 @@ func TestBuildKeyInfo(t *testing.T) { // case 1: primary key has constant constraint stmt1, err := p.ParseOneStmt("select a from t where a = 10", "", "") require.NoError(t, err) - p1, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) + p1, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) require.NoError(t, err) logic1, ok := p1.(plannercore.LogicalPlan) require.True(t, ok) @@ -271,7 +271,7 @@ func TestBuildKeyInfo(t *testing.T) { // case 2: group by column is key stmt2, err := p.ParseOneStmt("select b, sum(a) from t group by b", "", "") require.NoError(t, err) - p2, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt2, is) + p2, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt2, is) require.NoError(t, err) logic2, ok := p2.(plannercore.LogicalPlan) require.True(t, ok) diff --git a/pkg/util/ranger/bench_test.go b/pkg/util/ranger/bench_test.go index 69deaab02ddbb..3d5f2470fbc7c 100644 --- a/pkg/util/ranger/bench_test.go +++ b/pkg/util/ranger/bench_test.go @@ -112,7 +112,7 @@ WHERE err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(b, err) ctx := context.Background() - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(b, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() diff --git a/pkg/util/ranger/ranger_test.go b/pkg/util/ranger/ranger_test.go index 71a4552ae491c..2453cbe9263ed 100644 --- a/pkg/util/ranger/ranger_test.go +++ b/pkg/util/ranger/ranger_test.go @@ -265,7 +265,7 @@ func TestTableRange(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) conds := make([]expression.Expression, len(selection.Conditions)) @@ -454,7 +454,7 @@ create table t( ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -815,7 +815,7 @@ func TestColumnRange(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) ds, ok := sel.Children()[0].(*plannercore.DataSource) @@ -972,7 +972,7 @@ func TestIndexRangeForYear(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -1040,7 +1040,7 @@ func TestPrefixIndexRangeScan(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -1387,7 +1387,7 @@ create table t( ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -1628,7 +1628,7 @@ func TestTableShardIndex(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) conds := make([]expression.Expression, len(selection.Conditions)) @@ -1656,7 +1656,7 @@ func TestTableShardIndex(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection, ok := p.(*plannercore.Update).SelectPlan.(*plannercore.PhysicalSelection) require.True(t, ok) @@ -1674,7 +1674,7 @@ func TestTableShardIndex(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection, ok := p.(*plannercore.Delete).SelectPlan.(*plannercore.PhysicalSelection) require.True(t, ok) @@ -1819,7 +1819,7 @@ func getSelectionFromQuery(t *testing.T, sctx sessionctx.Context, sql string) *p ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection, isSelection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) require.True(t, isSelection) @@ -2255,7 +2255,7 @@ create table t( ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err, fmt.Sprintf("error %v, for resolve name, expr %s", err, tt.exprStr)) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err, fmt.Sprintf("error %v, for build plan, expr %s", err, tt.exprStr)) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() From a7366f936d72eaf6513aec6236670312ecf3b2ed Mon Sep 17 00:00:00 2001 From: D3Hunter Date: Thu, 26 Oct 2023 10:23:03 +0800 Subject: [PATCH 07/33] importinto: fix data race in test (#47965) close pingcap/tidb#47776 --- br/pkg/lightning/mydump/loader.go | 3 ++- pkg/disttask/framework/dispatcher/dispatcher_manager.go | 3 +++ pkg/disttask/importinto/dispatcher_testkit_test.go | 6 ++++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/br/pkg/lightning/mydump/loader.go b/br/pkg/lightning/mydump/loader.go index 091567463cbf7..630a40e015a24 100644 --- a/br/pkg/lightning/mydump/loader.go +++ b/br/pkg/lightning/mydump/loader.go @@ -705,7 +705,8 @@ func calculateFileBytes(ctx context.Context, } defer reader.Close() - compressReader, err := storage.NewLimitedInterceptReader(reader, compressType, storage.DecompressConfig{}, offset) + decompressConfig := storage.DecompressConfig{ZStdDecodeConcurrency: 1} + compressReader, err := storage.NewLimitedInterceptReader(reader, compressType, decompressConfig, offset) if err != nil { return 0, 0, errors.Trace(err) } diff --git a/pkg/disttask/framework/dispatcher/dispatcher_manager.go b/pkg/disttask/framework/dispatcher/dispatcher_manager.go index 76e8af9f4eec9..96cc551874452 100644 --- a/pkg/disttask/framework/dispatcher/dispatcher_manager.go +++ b/pkg/disttask/framework/dispatcher/dispatcher_manager.go @@ -127,6 +127,9 @@ func NewManager(ctx context.Context, taskMgr TaskManager, serverID string) (*Man // Start the dispatcherManager, start the dispatchTaskLoop to start multiple dispatchers. func (dm *Manager) Start() { + failpoint.Inject("disableDispatcherManager", func() { + failpoint.Return() + }) dm.wg.Run(dm.dispatchTaskLoop) dm.wg.Run(dm.gcSubtaskHistoryTableLoop) dm.wg.Run(dm.cleanUpLoop) diff --git a/pkg/disttask/importinto/dispatcher_testkit_test.go b/pkg/disttask/importinto/dispatcher_testkit_test.go index 0efc05da77ed4..269cbee988979 100644 --- a/pkg/disttask/importinto/dispatcher_testkit_test.go +++ b/pkg/disttask/importinto/dispatcher_testkit_test.go @@ -150,6 +150,12 @@ func TestDispatcherExtLocalSort(t *testing.T) { } func TestDispatcherExtGlobalSort(t *testing.T) { + // Domain start dispatcher manager automatically, we need to disable it as + // we test import task management in this case. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/disableDispatcherManager", "return(true)")) + t.Cleanup(func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/disableDispatcherManager")) + }) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) pool := pools.NewResourcePool(func() (pools.Resource, error) { From 6dd6a5e50cd3faf6a0dbc3b26551ba3fe1f8b16e Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Thu, 26 Oct 2023 14:27:34 +0800 Subject: [PATCH 08/33] executor: improve channel length for analyze (#47960) ref pingcap/tidb#47275 --- pkg/executor/analyze.go | 3 +-- pkg/executor/analyze_col_v2.go | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/executor/analyze.go b/pkg/executor/analyze.go index ddfff2f77519c..26acb9ce56bca 100644 --- a/pkg/executor/analyze.go +++ b/pkg/executor/analyze.go @@ -108,8 +108,7 @@ func (e *AnalyzeExec) Next(ctx context.Context, _ *chunk.Chunk) error { // Start workers with channel to collect results. taskCh := make(chan *analyzeTask, concurrency) - resultChLen := min(concurrency*2, len(tasks)) - resultsCh := make(chan *statistics.AnalyzeResults, resultChLen) + resultsCh := make(chan *statistics.AnalyzeResults, 1) for i := 0; i < concurrency; i++ { e.wg.Run(func() { e.analyzeWorker(taskCh, resultsCh) }) } diff --git a/pkg/executor/analyze_col_v2.go b/pkg/executor/analyze_col_v2.go index 40a96efec1afa..6faa8e0f529a4 100644 --- a/pkg/executor/analyze_col_v2.go +++ b/pkg/executor/analyze_col_v2.go @@ -275,8 +275,8 @@ func (e *AnalyzeColumnsExecV2) buildSamplingStats( sc := e.ctx.GetSessionVars().StmtCtx // Start workers to merge the result from collectors. - mergeResultCh := make(chan *samplingMergeResult, samplingStatsConcurrency) - mergeTaskCh := make(chan []byte, samplingStatsConcurrency) + mergeResultCh := make(chan *samplingMergeResult, 1) + mergeTaskCh := make(chan []byte, 1) var taskEg errgroup.Group // Start read data from resultHandler and send them to mergeTaskCh. taskEg.Go(func() (err error) { From d4b81de2dc90d973939570f3b6ef6d1912d18eea Mon Sep 17 00:00:00 2001 From: Hangjie Mo Date: Thu, 26 Oct 2023 14:59:04 +0800 Subject: [PATCH 09/33] tests: move IT in `executor` to `tests/integrationtest` (PART 6) (#47998) ref pingcap/tidb#47076 --- pkg/executor/BUILD.bazel | 2 - pkg/executor/executor_txn_test.go | 91 --- pkg/executor/import_into_test.go | 149 ---- pkg/executor/index_advise_test.go | 132 ---- pkg/executor/index_lookup_merge_join_test.go | 164 ----- pkg/executor/infoschema_reader_test.go | 318 -------- pkg/executor/inspection_common_test.go | 63 -- pkg/executor/merge_join_test.go | 650 ---------------- .../r/executor/executor_txn.result | 169 +++++ .../r/executor/import_into.result | 170 +++++ .../r/executor/index_advise.result | 198 +++++ .../r/executor/index_lookup_merge_join.result | 229 ++++++ .../r/executor/infoschema_reader.result | 285 +++++++ .../r/executor/inspection_common.result | 12 + .../r/executor/merge_join.result | 697 ++++++++++++++++++ .../t/executor/executor_txn.test | 133 ++++ .../t/executor/import_into.test | 174 +++++ .../t/executor/index_advise.test | 166 +++++ .../t/executor/index_lookup_merge_join.test | 108 +++ .../t/executor/infoschema_reader.test | 241 ++++++ .../t/executor/inspection_common.test | 6 + .../t/executor/merge_join.test | 287 ++++++++ 22 files changed, 2875 insertions(+), 1569 deletions(-) delete mode 100644 pkg/executor/inspection_common_test.go create mode 100644 tests/integrationtest/r/executor/executor_txn.result create mode 100644 tests/integrationtest/r/executor/import_into.result create mode 100644 tests/integrationtest/r/executor/index_advise.result create mode 100644 tests/integrationtest/r/executor/index_lookup_merge_join.result create mode 100644 tests/integrationtest/r/executor/infoschema_reader.result create mode 100644 tests/integrationtest/r/executor/inspection_common.result create mode 100644 tests/integrationtest/r/executor/merge_join.result create mode 100644 tests/integrationtest/t/executor/executor_txn.test create mode 100644 tests/integrationtest/t/executor/import_into.test create mode 100644 tests/integrationtest/t/executor/index_advise.test create mode 100644 tests/integrationtest/t/executor/index_lookup_merge_join.test create mode 100644 tests/integrationtest/t/executor/infoschema_reader.test create mode 100644 tests/integrationtest/t/executor/inspection_common.test create mode 100644 tests/integrationtest/t/executor/merge_join.test diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel index a9829f6f83c6b..390431ce741eb 100644 --- a/pkg/executor/BUILD.bazel +++ b/pkg/executor/BUILD.bazel @@ -321,7 +321,6 @@ go_test( "infoschema_reader_internal_test.go", "infoschema_reader_test.go", "insert_test.go", - "inspection_common_test.go", "inspection_result_test.go", "inspection_summary_test.go", "join_pkg_test.go", @@ -441,7 +440,6 @@ go_test( "//pkg/util/mock", "//pkg/util/paging", "//pkg/util/pdapi", - "//pkg/util/plancodec", "//pkg/util/ranger", "//pkg/util/sem", "//pkg/util/set", diff --git a/pkg/executor/executor_txn_test.go b/pkg/executor/executor_txn_test.go index d20da73198b8e..bf63b8d072f8c 100644 --- a/pkg/executor/executor_txn_test.go +++ b/pkg/executor/executor_txn_test.go @@ -462,36 +462,6 @@ func TestTxnSavepoint1(t *testing.T) { } } -func TestRollbackToSavepoint(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(id int, a int, unique index idx(id))") - - tk.MustExec("begin pessimistic") - tk.MustExec("insert into t values (1,1)") - tk.MustExec("savepoint s1") - tk.MustExec("insert into t values (2,2)") - tk.MustExec("rollback to s1") - tk.MustExec("insert into t values (2,2)") - tk.MustQuery("select * from t").Check(testkit.Rows("1 1", "2 2")) - tk.MustExec("rollback to s1") - tk.MustQuery("select * from t").Check(testkit.Rows("1 1")) - tk.MustExec("commit") - tk.MustQuery("select * from t").Check(testkit.Rows("1 1")) - - tk.MustExec("delete from t") - tk.MustExec("insert into t values (1,1)") - tk.MustExec("begin pessimistic") - tk.MustExec("delete from t where id = 1") - tk.MustExec("savepoint s1") - tk.MustExec("insert into t values (1,2)") - tk.MustExec("rollback to s1") - tk.MustQuery("select * from t").Check(testkit.Rows()) - tk.MustExec("commit") - tk.MustQuery("select * from t").Check(testkit.Rows()) -} - func TestRollbackToSavepointReleasePessimisticLock(t *testing.T) { store := testkit.CreateMockStore(t) tk1 := testkit.NewTestKit(t, store) @@ -660,67 +630,6 @@ func TestSavepointInBigTxn(t *testing.T) { tk1.MustQuery("select * from t order by id").Check(testkit.Rows("0 0", "1 1")) } -func TestSavepointRandTestIssue0(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("CREATE TABLE t (a enum('B','C') NOT NULL,UNIQUE KEY idx_1 (a),KEY idx_2 (a));") - tk.MustExec("begin pessimistic") - tk.MustExec("savepoint sp0;") - tk.MustExec("insert ignore into t values ( 'B' ),( 'C' );") - err := tk.ExecToErr("update t set a = 'C' where a = 'B';") - require.Error(t, err) - tk.MustExec("select * from t where a = 'B' for update;") - tk.MustExec("rollback to sp0;") - tk.MustExec("delete from t where a = 'B' ;") -} - -func TestSavepointWithTemporaryTable(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - // Test for local temporary table. - txnModes := []string{"optimistic", "pessimistic", ""} - for _, txnMode := range txnModes { - tk.MustExec(fmt.Sprintf("set session tidb_txn_mode='%v';", txnMode)) - tk.MustExec("drop table if exists tmp1") - tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)") - tk.MustExec("insert into tmp1 values(1, 11, 101)") - tk.MustExec("begin") - tk.MustExec("savepoint sp0;") - tk.MustExec("insert into tmp1 values(2, 22, 202)") - tk.MustExec("savepoint sp1;") - tk.MustExec("insert into tmp1 values(3, 33, 303)") - tk.MustExec("rollback to sp1;") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("1 11 101", "2 22 202")) - tk.MustExec("commit") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("1 11 101", "2 22 202")) - } - - // Test for global temporary table. - for _, txnMode := range txnModes { - tk.MustExec(fmt.Sprintf("set session tidb_txn_mode='%v';", txnMode)) - tk.MustExec("drop table if exists tmp1") - tk.MustExec("create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows") - tk.MustExec("begin") - tk.MustExec("savepoint sp0;") - tk.MustExec("insert into tmp1 values(2, 22, 202)") - tk.MustExec("savepoint sp1;") - tk.MustExec("insert into tmp1 values(3, 33, 303)") - tk.MustExec("savepoint sp2;") - tk.MustExec("insert into tmp1 values(4, 44, 404)") - tk.MustExec("rollback to sp2;") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("2 22 202", "3 33 303")) - tk.MustExec("rollback to sp1;") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("2 22 202")) - tk.MustExec("commit") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows()) - } -} - func TestSavepointWithCacheTable(t *testing.T) { store := testkit.CreateMockStore(t) diff --git a/pkg/executor/import_into_test.go b/pkg/executor/import_into_test.go index f048fe7fc7391..284da6c67974c 100644 --- a/pkg/executor/import_into_test.go +++ b/pkg/executor/import_into_test.go @@ -15,28 +15,12 @@ package executor_test import ( - "fmt" "testing" - "github.com/pingcap/tidb/pkg/executor/importer" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/sem" - "github.com/stretchr/testify/require" ) -func TestImportIntoExplicitTransaction(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (id int);") - tk.MustExec(`BEGIN`) - err := tk.ExecToErr("IMPORT INTO t FROM '/file.csv'") - require.Error(t, err) - require.Regexp(t, "cannot run IMPORT INTO in explicit transaction", err.Error()) - tk.MustExec("commit") -} - func TestSecurityEnhancedMode(t *testing.T) { store := testkit.CreateMockStore(t) @@ -49,136 +33,3 @@ func TestSecurityEnhancedMode(t *testing.T) { // regardless of what privileges they have available. tk.MustGetErrMsg("IMPORT INTO test.t FROM '/file.csv'", "[planner:8132]Feature 'IMPORT INTO from server disk' is not supported when security enhanced mode is enabled") } - -func TestImportIntoOptionsNegativeCase(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (id int);") - - cases := []struct { - OptionStr string - Err error - }{ - {OptionStr: "xx=1", Err: exeerrors.ErrUnknownOption}, - {OptionStr: "detached=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "detached, detached", Err: exeerrors.ErrDuplicateOption}, - - {OptionStr: "character_set=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=''", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set='aa'", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_terminated_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_terminated_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_terminated_by=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_terminated_by=''", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_enclosed_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_enclosed_by='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_enclosed_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_enclosed_by=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_escaped_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_escaped_by='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_escaped_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_escaped_by=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_defined_null_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_defined_null_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_defined_null_by=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "lines_terminated_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "lines_terminated_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "lines_terminated_by=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "lines_terminated_by=''", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "skip_rows=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "skip_rows=''", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "skip_rows=-1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "skip_rows=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "split_file='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "split_file, skip_rows=2", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "disk_quota='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota='220MiBxxx'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota=false", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota=null", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "thread='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=0", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=false", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=-100", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=null", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "max_write_speed='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed='11aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed=-1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed=false", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "checksum_table=''", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "checksum_table=123", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "checksum_table=false", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "checksum_table=null", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "record_errors='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors='111aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors=-123", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "cloud_storage_uri=123", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "cloud_storage_uri=':'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "cloud_storage_uri='sdsd'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "cloud_storage_uri='http://sdsd'", Err: exeerrors.ErrInvalidOptionVal}, - } - - sqlTemplate := "import into t from '/file.csv' with %s" - for _, c := range cases { - sql := fmt.Sprintf(sqlTemplate, c.OptionStr) - err := tk.ExecToErr(sql) - require.ErrorIs(t, err, c.Err, sql) - } - - nonCSVCases := []struct { - OptionStr string - Err error - }{ - {OptionStr: "character_set='utf8'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_terminated_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_enclosed_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_escaped_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_defined_null_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "lines_terminated_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "skip_rows=1", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "split_file", Err: exeerrors.ErrLoadDataUnsupportedOption}, - } - - sqlTemplate = "import into t from '/file.csv' format '%s' with %s" - for _, c := range nonCSVCases { - for _, format := range []string{importer.DataFormatParquet, importer.DataFormatSQL} { - sql := fmt.Sprintf(sqlTemplate, format, c.OptionStr) - err := tk.ExecToErr(sql) - require.ErrorIs(t, err, c.Err, sql) - } - } - - parameterCheck := []struct { - sql string - Err error - }{ - {sql: "import into t from ''", Err: exeerrors.ErrLoadDataEmptyPath}, - {sql: "import into t from '/a.csv' format 'xx'", Err: exeerrors.ErrLoadDataUnsupportedFormat}, - } - - for _, c := range parameterCheck { - err := tk.ExecToErr(c.sql) - require.ErrorIs(t, err, c.Err, c.sql) - } -} diff --git a/pkg/executor/index_advise_test.go b/pkg/executor/index_advise_test.go index 20d5b94bf0fc0..a4cbc6a65dc38 100644 --- a/pkg/executor/index_advise_test.go +++ b/pkg/executor/index_advise_test.go @@ -65,135 +65,3 @@ func TestIndexAdvise(t *testing.T) { require.Equal(t, uint64(4), ia.MaxIndexNum.PerTable) require.Equal(t, uint64(5), ia.MaxIndexNum.PerDB) } - -func TestIndexJoinProjPattern(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_opt_advanced_join_hint=0;") - tk.MustExec(`create table t1( -pnbrn_cnaps varchar(5) not null, -new_accno varchar(18) not null, -primary key(pnbrn_cnaps,new_accno) nonclustered -);`) - tk.MustExec(`create table t2( -pnbrn_cnaps varchar(5) not null, -txn_accno varchar(18) not null, -txn_dt date not null, -yn_frz varchar(1) default null -);`) - tk.MustExec(`insert into t1(pnbrn_cnaps,new_accno) values ("40001","123")`) - tk.MustExec(`insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0");`) - - sql := `update -/*+ inl_join(a) */ -t2 b, -( -select t1.pnbrn_cnaps, -t1.new_accno -from t1 -where t1.pnbrn_cnaps = '40001' -) a -set b.yn_frz = '1' -where b.txn_dt = str_to_date('20221201', '%Y%m%d') -and b.pnbrn_cnaps = a.pnbrn_cnaps -and b.txn_accno = a.new_accno;` - rows := [][]interface{}{ - {"Update_8"}, - {"└─IndexJoin_14"}, - {" ├─TableReader_23(Build)"}, - {" │ └─Selection_22"}, - {" │ └─TableFullScan_21"}, - {" └─IndexReader_12(Probe)"}, - {" └─IndexRangeScan_11"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - rows = [][]interface{}{ - {"Update_8"}, - {"└─HashJoin_12"}, - {" ├─TableReader_15(Build)"}, - {" │ └─Selection_14"}, - {" │ └─TableFullScan_13"}, - {" └─IndexReader_18(Probe)"}, - {" └─IndexRangeScan_17"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") - tk.MustExec(sql) - tk.MustQuery("select yn_frz from t2").Check(testkit.Rows("1")) -} - -func TestIndexJoinSelPattern(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`set @@tidb_opt_advanced_join_hint=0`) - tk.MustExec(` create table tbl_miss( -id bigint(20) unsigned not null -,txn_dt date default null -,perip_sys_uuid varchar(32) not null -,rvrs_idr varchar(1) not null -,primary key(id) clustered -,key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) -); -`) - tk.MustExec(`insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1");`) - tk.MustExec(`create table tbl_src( -txn_dt date default null -,uuid varchar(32) not null -,rvrs_idr char(1) -,expd_inf varchar(5000) -,primary key(uuid,rvrs_idr) nonclustered -); -`) - tk.MustExec(`insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1");`) - sql := `select /*+ use_index(mis,) inl_join(src) */ - * - from tbl_miss mis - ,tbl_src src - where src.txn_dt >= str_to_date('20221201', '%Y%m%d') - and mis.id between 1 and 10000 - and mis.perip_sys_uuid = src.uuid - and mis.rvrs_idr = src.rvrs_idr - and mis.txn_dt = src.txn_dt - and ( - case when isnull(src.expd_inf) = 1 then '' - else - substr(concat_ws('',src.expd_inf,'~~'), - instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, - instr(substr(concat_ws('',src.expd_inf,'~~'), - instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) - end - ) != '01';` - rows := [][]interface{}{ - {"HashJoin_9"}, - {"├─TableReader_12(Build)"}, - {"│ └─Selection_11"}, - {"│ └─TableRangeScan_10"}, - {"└─Selection_13(Probe)"}, - {" └─TableReader_16"}, - {" └─Selection_15"}, - {" └─TableFullScan_14"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - rows = [][]interface{}{ - {"IndexJoin_13"}, - {"├─TableReader_25(Build)"}, - {"│ └─Selection_24"}, - {"│ └─TableRangeScan_23"}, - {"└─Selection_12(Probe)"}, - {" └─IndexLookUp_11"}, - {" ├─IndexRangeScan_8(Build)"}, - {" └─Selection_10(Probe)"}, - {" └─TableRowIDScan_9"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - tk.MustQuery(sql).Check(testkit.Rows("1 2022-12-01 123 1 2022-12-01 123 1 ")) - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") - tk.MustQuery(sql).Check(testkit.Rows("1 2022-12-01 123 1 2022-12-01 123 1 ")) -} diff --git a/pkg/executor/index_lookup_merge_join_test.go b/pkg/executor/index_lookup_merge_join_test.go index a7a8d5ce7e0c5..502064642c2f6 100644 --- a/pkg/executor/index_lookup_merge_join_test.go +++ b/pkg/executor/index_lookup_merge_join_test.go @@ -15,14 +15,10 @@ package executor_test import ( - "strings" "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/testkit/testdata" - "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/stretchr/testify/require" ) @@ -45,23 +41,6 @@ func TestIndexLookupMergeJoinHang(t *testing.T) { require.Equal(t, "OOM test index merge join doesn't hang here.", err.Error()) } -func TestIssue28052(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t` (" + - "`col_tinyint_key_signed` tinyint(4) DEFAULT NULL," + - "`col_year_key_signed` year(4) DEFAULT NULL," + - "KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`)," + - "KEY `col_year_key_signed` (`col_year_key_signed`)" + - " ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin") - - tk.MustExec("insert into t values(-100,NULL);") - tk.MustQuery("select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`").Check(testkit.Rows("1")) -} - func TestIssue18068(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/testIssue18068", `return(true)`)) defer func() { @@ -85,146 +64,3 @@ func TestIssue18068(t *testing.T) { tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1") tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1") } - -func TestIssue18631(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int, c int, d int, primary key(a,b,c))") - tk.MustExec("create table t2(a int, b int, c int, d int, primary key(a,b,c))") - tk.MustExec("insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3)") - tk.MustExec("insert into t2 values(1,1,1,1),(2,2,2,2)") - firstOperator := tk.MustQuery("explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc").Rows()[0][0].(string) - require.Equal(t, 0, strings.Index(firstOperator, plancodec.TypeIndexMergeJoin)) - tk.MustQuery("select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc").Check(testkit.Rows( - "3 3 3 3 ", - "2 2 2 2 2 2 2 2", - "1 1 1 1 1 1 1 1")) -} - -func TestIssue19408(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, primary key(c_int))") - tk.MustExec("create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4") - tk.MustExec("insert into t1 values (1), (2), (3), (4), (5)") - tk.MustExec("insert into t2 select * from t1") - tk.MustExec("begin") - tk.MustExec("delete from t1 where c_int = 1") - tk.MustQuery("select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows( - "2 2", - "3 3", - "4 4", - "5 5")) - tk.MustQuery("select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows( - "2 2", - "3 3", - "4 4", - "5 5")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows( - "2 2", - "3 3", - "4 4", - "5 5")) - tk.MustExec("commit") -} - -func TestIssue20137(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (id bigint(20) unsigned, primary key(id))") - tk.MustExec("create table t2 (id bigint(20) unsigned)") - tk.MustExec("insert into t1 values (8738875760185212610)") - tk.MustExec("insert into t1 values (9814441339970117597)") - tk.MustExec("insert into t2 values (8738875760185212610)") - tk.MustExec("insert into t2 values (9814441339970117597)") - tk.MustQuery("select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id").Check( - testkit.Rows("8738875760185212610 8738875760185212610", "9814441339970117597 9814441339970117597")) -} - -func TestIndexJoinOnSinglePartitionTable(t *testing.T) { - // For issue 19145 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`set @@tidb_opt_advanced_join_hint=0`) - for _, val := range []string{string(variable.Static), string(variable.Dynamic)} { - tk.MustExec("set @@tidb_partition_prune_mode= '" + val + "'") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )") - tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )") - tk.MustExec("insert into t1 values (1, 'Alice')") - tk.MustExec("insert into t2 values (1, 'Bob')") - tk.MustExec("analyze table t1, t2") - sql := "select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str" - tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob")) - rows := testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows()) - // Partition table can't be inner side of index merge join, because it can't keep order. - require.Equal(t, -1, strings.Index(rows[0], "IndexMergeJoin")) - require.Equal(t, true, len(tk.MustQuery("show warnings").Rows()) > 0) - - sql = "select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str" - tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob")) - rows = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows()) - require.Equal(t, 0, strings.Index(rows[0], "IndexHashJoin")) - - sql = "select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str" - tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob")) - rows = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows()) - require.Equal(t, 0, strings.Index(rows[0], "IndexJoin")) - } -} - -func TestIssue20400(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t, s") - tk.MustExec("create table s(a int, index(a))") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1)") - tk.MustQuery("select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1").Check( - testkit.Rows("1 ")) - tk.MustQuery("select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1").Check( - testkit.Rows("1 ")) -} - -func TestIssue20549(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`));") - tk.MustExec("INSERT INTO `t1` VALUES (1,NULL);") - tk.MustExec("CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`));") - tk.MustQuery("SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id;").Check( - testkit.Rows("1")) - tk.MustQuery("SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id;\n").Check( - testkit.Rows("1")) -} - -func TestIssue24473AndIssue25669(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists x, t2, t3") - tk.MustExec("CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));") - tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");") - tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) - tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) - - tk.MustExec("drop table if exists x, t2, t3") - tk.MustExec("CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));") - tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");") - tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) - tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) -} diff --git a/pkg/executor/infoschema_reader_test.go b/pkg/executor/infoschema_reader_test.go index de8480dfac90a..4412db2021a52 100644 --- a/pkg/executor/infoschema_reader_test.go +++ b/pkg/executor/infoschema_reader_test.go @@ -81,217 +81,6 @@ func TestInspectionTables(t *testing.T) { tk.Session().GetSessionVars().InspectionTableCache = nil } -func TestProfiling(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.profiling").Check(testkit.Rows()) - tk.MustExec("set @@profiling=1") - tk.MustQuery("select * from information_schema.profiling").Check(testkit.Rows("0 0 0 0 0 0 0 0 0 0 0 0 0 0 0")) -} - -func TestSchemataTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustQuery("select * from information_schema.SCHEMATA where schema_name='mysql';").Check( - testkit.Rows("def mysql utf8mb4 utf8mb4_bin ")) - - // Test the privilege of new user for information_schema.schemata. - tk.MustExec("create user schemata_tester") - schemataTester := testkit.NewTestKit(t, store) - schemataTester.MustExec("use information_schema") - require.NoError(t, schemataTester.Session().Auth(&auth.UserIdentity{ - Username: "schemata_tester", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - schemataTester.MustQuery("select count(*) from information_schema.SCHEMATA;").Check(testkit.Rows("1")) - schemataTester.MustQuery("select * from information_schema.SCHEMATA where schema_name='mysql';").Check( - [][]interface{}{}) - schemataTester.MustQuery("select * from information_schema.SCHEMATA where schema_name='INFORMATION_SCHEMA';").Check( - testkit.Rows("def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin ")) - - // Test the privilege of user with privilege of mysql for information_schema.schemata. - tk.MustExec("CREATE ROLE r_mysql_priv;") - tk.MustExec("GRANT ALL PRIVILEGES ON mysql.* TO r_mysql_priv;") - tk.MustExec("GRANT r_mysql_priv TO schemata_tester;") - schemataTester.MustExec("set role r_mysql_priv") - schemataTester.MustQuery("select count(*) from information_schema.SCHEMATA;").Check(testkit.Rows("2")) - schemataTester.MustQuery("select * from information_schema.SCHEMATA;").Check( - testkit.Rows("def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin ", "def mysql utf8mb4 utf8mb4_bin ")) -} - -func TestTableIDAndIndexID(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop table if exists test.t") - tk.MustExec("create table test.t (a int, b int, primary key(a), key k1(b))") - tk.MustQuery("select index_id from information_schema.tidb_indexes where table_schema = 'test' and table_name = 't'").Check(testkit.Rows("0", "1")) - tblID, err := strconv.Atoi(tk.MustQuery("select tidb_table_id from information_schema.tables where table_schema = 'test' and table_name = 't'").Rows()[0][0].(string)) - require.NoError(t, err) - require.Greater(t, tblID, 0) -} - -func TestSchemataCharacterSet(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE DATABASE `foo` DEFAULT CHARACTER SET = 'utf8mb4'") - tk.MustQuery("select default_character_set_name, default_collation_name FROM information_schema.SCHEMATA WHERE schema_name = 'foo'").Check( - testkit.Rows("utf8mb4 utf8mb4_bin")) - tk.MustExec("drop database `foo`") -} - -func TestViews(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE DEFINER='root'@'localhost' VIEW test.v1 AS SELECT 1") - tk.MustQuery("select TABLE_COLLATION is null from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'").Check(testkit.Rows("1", "1")) - tk.MustQuery("SELECT * FROM information_schema.views WHERE table_schema='test' AND table_name='v1'").Check(testkit.Rows("def test v1 SELECT 1 AS `1` CASCADED NO root@localhost DEFINER utf8mb4 utf8mb4_bin")) - tk.MustQuery("SELECT table_catalog, table_schema, table_name, table_type, engine, version, row_format, table_rows, avg_row_length, data_length, max_data_length, index_length, data_free, auto_increment, update_time, check_time, table_collation, checksum, create_options, table_comment FROM information_schema.tables WHERE table_schema='test' AND table_name='v1'").Check(testkit.Rows("def test v1 VIEW VIEW")) -} - -func TestColumnsTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (bit bit(10) DEFAULT b'100')") - tk.MustQuery("SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't'").Check(testkit.Rows( - "def test t bit 1 b'100' YES bit 10 0 bit(10) select,insert,update,references ")) - tk.MustExec("drop table if exists t") - - tk.MustExec("set time_zone='+08:00'") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (b timestamp(3) NOT NULL DEFAULT '1970-01-01 08:00:01.000')") - tk.MustQuery("select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='test';").Check(testkit.Rows("1970-01-01 08:00:01.000")) - tk.MustExec("set time_zone='+04:00'") - tk.MustQuery("select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='test';").Check(testkit.Rows("1970-01-01 04:00:01.000")) - tk.MustExec("set time_zone=default") - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a bit DEFAULT (rand()))") - tk.MustQuery("select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='test';").Check(testkit.Rows("rand()")) - - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE t (`COL3` bit(1) NOT NULL,b year) ;") - tk.MustQuery("select column_type from information_schema.columns where TABLE_SCHEMA = 'test' and TABLE_NAME = 't';"). - Check(testkit.Rows("bit(1)", "year(4)")) - - // For issue: https://github.com/pingcap/tidb/issues/43379 - tk.MustQuery("select ordinal_position from information_schema.columns where table_schema=database() and table_name='t' and column_name='b'"). - Check(testkit.Rows("2")) -} - -func TestEngines(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.ENGINES;").Check(testkit.Rows("InnoDB DEFAULT Supports transactions, row-level locking, and foreign keys YES YES YES")) -} - -// https://github.com/pingcap/tidb/issues/25467. -func TestDataTypesMaxLengthAndOctLength(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists test_oct_length;") - tk.MustExec("create database test_oct_length;") - tk.MustExec("use test_oct_length;") - - testCases := []struct { - colTp string - maxLen int - octLen int - }{ - {"varchar(255) collate ascii_bin", 255, 255}, - {"varchar(255) collate utf8mb4_bin", 255, 255 * 4}, - {"varchar(255) collate utf8_bin", 255, 255 * 3}, - {"char(10) collate ascii_bin", 10, 10}, - {"char(10) collate utf8mb4_bin", 10, 10 * 4}, - {"set('a', 'b', 'cccc') collate ascii_bin", 8, 8}, - {"set('a', 'b', 'cccc') collate utf8mb4_bin", 8, 8 * 4}, - {"enum('a', 'b', 'cccc') collate ascii_bin", 4, 4}, - {"enum('a', 'b', 'cccc') collate utf8mb4_bin", 4, 4 * 4}, - } - for _, tc := range testCases { - createSQL := fmt.Sprintf("create table t (a %s);", tc.colTp) - tk.MustExec(createSQL) - result := tk.MustQuery("select character_maximum_length, character_octet_length " + - "from information_schema.columns " + - "where table_schema=(select database()) and table_name='t';") - expectedRows := testkit.Rows(fmt.Sprintf("%d %d", tc.maxLen, tc.octLen)) - result.Check(expectedRows) - tk.MustExec("drop table t;") - } -} - -func TestDDLJobs(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create database if not exists test_ddl_jobs") - tk.MustQuery("select db_name, job_type from information_schema.DDL_JOBS limit 1").Check( - testkit.Rows("test_ddl_jobs create schema")) - - tk.MustExec("use test_ddl_jobs") - tk.MustExec("create table t (a int);") - tk.MustQuery("select db_name, table_name, job_type from information_schema.DDL_JOBS where table_name = 't'").Check( - testkit.Rows("test_ddl_jobs t create table")) - - tk.MustQuery("select job_type from information_schema.DDL_JOBS group by job_type having job_type = 'create table'").Check( - testkit.Rows("create table")) - - // Test the START_TIME and END_TIME field. - tk.MustQuery("select distinct job_type from information_schema.DDL_JOBS where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s')").Check( - testkit.Rows("create table")) - - // Test the privilege of new user for information_schema.DDL_JOBS. - tk.MustExec("create user DDL_JOBS_tester") - DDLJobsTester := testkit.NewTestKit(t, store) - DDLJobsTester.MustExec("use information_schema") - require.NoError(t, DDLJobsTester.Session().Auth(&auth.UserIdentity{ - Username: "DDL_JOBS_tester", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - - // Test the privilege of user for information_schema.ddl_jobs. - DDLJobsTester.MustQuery("select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't';").Check( - [][]interface{}{}) - tk.MustExec("CREATE ROLE r_priv;") - tk.MustExec("GRANT ALL PRIVILEGES ON test_ddl_jobs.* TO r_priv;") - tk.MustExec("GRANT r_priv TO DDL_JOBS_tester;") - DDLJobsTester.MustExec("set role r_priv") - DDLJobsTester.MustQuery("select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't';").Check( - testkit.Rows("test_ddl_jobs t")) - - tk.MustExec("create table tt (a int);") - tk.MustExec("alter table tt add index t(a), add column b int") - tk.MustQuery("select db_name, table_name, job_type from information_schema.DDL_JOBS limit 3").Check( - testkit.Rows("test_ddl_jobs tt alter table multi-schema change", "test_ddl_jobs tt add column /* subjob */", "test_ddl_jobs tt add index /* subjob */ /* txn-merge */")) -} - -func TestKeyColumnUsage(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustQuery("select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta' and COLUMN_NAME='table_id';").Check( - testkit.Rows("def mysql tbl def mysql stats_meta table_id 1 ")) - - // test the privilege of new user for information_schema.table_constraints - tk.MustExec("create user key_column_tester") - keyColumnTester := testkit.NewTestKit(t, store) - keyColumnTester.MustExec("use information_schema") - require.NoError(t, keyColumnTester.Session().Auth(&auth.UserIdentity{ - Username: "key_column_tester", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - keyColumnTester.MustQuery("select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME != 'CLUSTER_SLOW_QUERY';").Check([][]interface{}{}) - - // test the privilege of user with privilege of mysql.gc_delete_range for information_schema.table_constraints - tk.MustExec("CREATE ROLE r_stats_meta ;") - tk.MustExec("GRANT ALL PRIVILEGES ON mysql.stats_meta TO r_stats_meta;") - tk.MustExec("GRANT r_stats_meta TO key_column_tester;") - keyColumnTester.MustExec("set role r_stats_meta") - rows := keyColumnTester.MustQuery("select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta';").Rows() - require.Greater(t, len(rows), 0) -} - func TestUserPrivileges(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -359,29 +148,6 @@ func TestUserPrivileges(t *testing.T) { require.Greater(t, len(rows), 0) } -func TestUserPrivilegesTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk1 := testkit.NewTestKit(t, store) - - // test the privilege of new user for information_schema.user_privileges - tk.MustExec("create user usageuser") - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ - Username: "usageuser", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def USAGE NO")) - // the usage row disappears when there is a non-dynamic privilege added - tk1.MustExec("GRANT SELECT ON *.* to usageuser") - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def SELECT NO")) - // test grant privilege - tk1.MustExec("GRANT SELECT ON *.* to usageuser WITH GRANT OPTION") - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def SELECT YES")) - // test DYNAMIC privs - tk1.MustExec("GRANT BACKUP_ADMIN ON *.* to usageuser") - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type`).Check(testkit.Rows("'usageuser'@'%' def BACKUP_ADMIN NO", "'usageuser'@'%' def SELECT YES")) -} - func TestDataForTableStatsField(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) h := dom.StatsHandle() @@ -504,52 +270,6 @@ func TestPartitionsTable(t *testing.T) { tk.MustExec("drop table test_partitions") } -// https://github.com/pingcap/tidb/issues/32693. -func TestPartitionTablesStatsCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - tk.MustExec(` -CREATE TABLE e ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)) PARTITION BY RANGE (id) ( - PARTITION p0 VALUES LESS THAN (50), - PARTITION p1 VALUES LESS THAN (100), - PARTITION p2 VALUES LESS THAN (150), - PARTITION p3 VALUES LESS THAN (MAXVALUE));`) - tk.MustExec(`CREATE TABLE e2 ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30));`) - // Load the stats cache. - tk.MustQuery(`SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e';`) - // p0: 1 row, p3: 3 rows - tk.MustExec(`INSERT INTO e VALUES (1669, "Jim", "Smith"), (337, "Mary", "Jones"), (16, "Frank", "White"), (2005, "Linda", "Black");`) - tk.MustExec(`set tidb_enable_exchange_partition='on';`) - tk.MustExec(`ALTER TABLE e EXCHANGE PARTITION p0 WITH TABLE e2;`) - // p0: 1 rows, p3: 3 rows - tk.MustExec(`INSERT INTO e VALUES (41, "Michael", "Green");`) - tk.MustExec(`analyze table e;`) // The stats_meta should be effective immediately. - tk.MustQuery(`SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e';`). - Check(testkit.Rows("p0 1", "p1 0", "p2 0", "p3 3")) -} - -func TestMetricTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use information_schema") - tk.MustQuery("select count(*) > 0 from `METRICS_TABLES`").Check(testkit.Rows("1")) - tk.MustQuery("select * from `METRICS_TABLES` where table_name='tidb_qps'"). - Check(testkit.RowsWithSep("|", "tidb_qps|sum(rate(tidb_server_query_total{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (result,type,instance)|instance,type,result|0|TiDB query processing numbers per second")) -} - -func TestTableConstraintsTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME='gc_delete_range';").Check(testkit.Rows("def mysql delete_range_index mysql gc_delete_range UNIQUE")) -} - -func TestTableSessionVar(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.SESSION_VARIABLES where VARIABLE_NAME='tidb_retry_limit';").Check(testkit.Rows("tidb_retry_limit 10")) -} - func TestForAnalyzeStatus(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -640,19 +360,6 @@ func TestForServersInfo(t *testing.T) { require.Equal(t, stringutil.BuildStringFromLabels(info.Labels), rows[0][8]) } -func TestSequences(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE SEQUENCE test.seq maxvalue 10000000") - tk.MustQuery("SELECT * FROM information_schema.sequences WHERE sequence_schema='test' AND sequence_name='seq'").Check(testkit.Rows("def test seq 1 1000 0 1 10000000 1 1 ")) - tk.MustExec("DROP SEQUENCE test.seq") - tk.MustExec("CREATE SEQUENCE test.seq start = -1 minvalue -1 maxvalue 10 increment 1 cache 10") - tk.MustQuery("SELECT * FROM information_schema.sequences WHERE sequence_schema='test' AND sequence_name='seq'").Check(testkit.Rows("def test seq 1 10 0 1 10 -1 -1 ")) - tk.MustExec("CREATE SEQUENCE test.seq2 start = -9 minvalue -10 maxvalue 10 increment -1 cache 15") - tk.MustQuery("SELECT * FROM information_schema.sequences WHERE sequence_schema='test' AND sequence_name='seq2'").Check(testkit.Rows("def test seq2 1 15 0 -1 10 -10 -9 ")) - tk.MustQuery("SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME , TABLE_TYPE, ENGINE, TABLE_ROWS FROM information_schema.tables WHERE TABLE_TYPE='SEQUENCE' AND TABLE_NAME='seq2'").Check(testkit.Rows("def test seq2 SEQUENCE InnoDB 1")) -} - func TestTiFlashSystemTableWithTiFlashV620(t *testing.T) { instances := []string{ "tiflash,127.0.0.1:3933,127.0.0.1:7777,,", @@ -770,21 +477,6 @@ func TestTiFlashSystemTableWithTiFlashV640(t *testing.T) { tk.MustQuery("show warnings").Check(testkit.Rows()) } -func TestTablesPKType(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t_int (a int primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_int'").Check(testkit.Rows("CLUSTERED")) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - tk.MustExec("create table t_implicit (a varchar(64) primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_implicit'").Check(testkit.Rows("NONCLUSTERED")) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("create table t_common (a varchar(64) primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_common'").Check(testkit.Rows("CLUSTERED")) - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'").Check(testkit.Rows("NONCLUSTERED")) -} - // https://github.com/pingcap/tidb/issues/32459. func TestJoinSystemTableContainsView(t *testing.T) { store := testkit.CreateMockStore(t) @@ -860,16 +552,6 @@ func TestShowColumnsWithSubQueryView(t *testing.T) { require.NoError(t, failpoint.Disable("tikvclient/tikvStoreSendReqResult")) } -func TestNullColumns(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("CREATE TABLE t ( id int DEFAULT NULL);") - tk.MustExec("CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`1.1.1.1` SQL SECURITY DEFINER VIEW `v_test` (`type`) AS SELECT NULL AS `type` FROM `t` AS `f`;") - tk.MustQuery("select * from information_schema.columns where TABLE_SCHEMA = 'test' and TABLE_NAME = 'v_test';"). - Check(testkit.Rows("def test v_test type 1 YES binary 0 0 binary(0) select,insert,update,references ")) -} - // Code below are helper utilities for the test cases. type getTiFlashSystemTableRequestMocker struct { diff --git a/pkg/executor/inspection_common_test.go b/pkg/executor/inspection_common_test.go deleted file mode 100644 index 9a63a1ed9f9fc..0000000000000 --- a/pkg/executor/inspection_common_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "context" - "testing" - - "github.com/pingcap/tidb/pkg/executor" - "github.com/pingcap/tidb/pkg/session" - "github.com/pingcap/tidb/pkg/testkit" - "github.com/stretchr/testify/require" -) - -func TestInspectionRules(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - inspectionCount := len(executor.InspectionRules) - summaryCount := len(executor.InspectionSummaryRules) - var cases = []struct { - sql string - ruleCount int - }{ - { - sql: "select * from information_schema.inspection_rules", - ruleCount: inspectionCount + summaryCount, - }, - { - sql: "select * from information_schema.inspection_rules where type='inspection'", - ruleCount: inspectionCount, - }, - { - sql: "select * from information_schema.inspection_rules where type='summary'", - ruleCount: summaryCount, - }, - { - sql: "select * from information_schema.inspection_rules where type='inspection' and type='summary'", - ruleCount: 0, - }, - } - - for _, ca := range cases { - rs, err := tk.Exec(ca.sql) - require.NoError(t, err) - rules, err := session.ResultSetToStringSlice(context.Background(), tk.Session(), rs) - require.NoError(t, err) - require.Len(t, rules, ca.ruleCount) - require.NoError(t, rs.Close()) - } -} diff --git a/pkg/executor/merge_join_test.go b/pkg/executor/merge_join_test.go index 02b7725715ead..c0168c2cd2c24 100644 --- a/pkg/executor/merge_join_test.go +++ b/pkg/executor/merge_join_test.go @@ -30,198 +30,6 @@ import ( "github.com/stretchr/testify/require" ) -const plan1 = `[[TableScan_12 { - "db": "test", - "table": "t1", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [TableScan_15 { - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [MergeJoin_17 { - "eqCond": [ - "eq(test.t1.c1, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "TableScan_12", - "rightPlan": "TableScan_15", - "desc": "false" -} MergeJoin_8] [TableScan_22 { - "db": "test", - "table": "t3", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_8] [MergeJoin_8 { - "eqCond": [ - "eq(test.t2.c1, test.t3.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "MergeJoin_17", - "rightPlan": "TableScan_22", - "desc": "false" -} Sort_23] [Sort_23 { - "exprs": [ - { - "Expr": "test.t1.c1", - "Desc": false - } - ], - "limit": null, - "child": "MergeJoin_8" -} ]]` - -const plan2 = `[[TableScan_12 { - "db": "test", - "table": "t1", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [TableScan_15 { - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [MergeJoin_17 { - "eqCond": [ - "eq(test.t1.c1, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "TableScan_12", - "rightPlan": "TableScan_15", - "desc": "false" -} MergeJoin_8] [TableScan_22 { - "db": "test", - "table": "t3", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_8] [MergeJoin_8 { - "eqCond": [ - "eq(test.t2.c1, test.t3.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "MergeJoin_17", - "rightPlan": "TableScan_22", - "desc": "false" -} Sort_23] [Sort_23 { - "exprs": [ - { - "Expr": "test.t1.c1", - "Desc": false - } - ], - "limit": null, - "child": "MergeJoin_8" -} ]]` - -const plan3 = `[[TableScan_12 { - "db": "test", - "table": "t1", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_9] [TableScan_15 { - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_9] [MergeJoin_9 { - "eqCond": [ - "eq(test.t1.c1, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "TableScan_12", - "rightPlan": "TableScan_15", - "desc": "false" -} Sort_16] [Sort_16 { - "exprs": [ - { - "Expr": "test.t1.c1", - "Desc": false - } - ], - "limit": null, - "child": "MergeJoin_9" -} MergeJoin_8] [TableScan_23 { - "db": "test", - "table": "t3", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_8] [MergeJoin_8 { - "eqCond": [ - "eq(test.t1.c1, test.t3.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "Sort_16", - "rightPlan": "TableScan_23", - "desc": "false" -} ]]` - func checkMergeAndRun(tk *testkit.TestKit, t *testing.T, sql string) *testkit.Result { explainedSQL := "explain format = 'brief' " + sql result := tk.MustQuery(explainedSQL) @@ -230,15 +38,6 @@ func checkMergeAndRun(tk *testkit.TestKit, t *testing.T, sql string) *testkit.Re return tk.MustQuery(sql) } -func checkPlanAndRun(tk *testkit.TestKit, t *testing.T, plan string, sql string) *testkit.Result { - explainedSQL := "explain format = 'brief' " + sql - /* result := */ tk.MustQuery(explainedSQL) - // TODO: Reopen it after refactoring explain. - // resultStr := fmt.Sprintf("%v", result.Rows()) - // require.Equal(t, resultStr, plan) - return tk.MustQuery(sql) -} - func TestShuffleMergeJoinInDisk(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/testMergeJoinRowContainerSpill", "return(true)")) defer func() { @@ -318,418 +117,6 @@ func TestMergeJoinInDisk(t *testing.T) { require.Greater(t, tk.Session().GetSessionVars().StmtCtx.DiskTracker.MaxConsumed(), int64(0)) } -func TestMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, c2 int)") - tk.MustExec("create table t1(c1 int, c2 int)") - tk.MustExec("insert into t values(1,1),(2,2)") - tk.MustExec("insert into t1 values(2,3),(4,4)") - - result := checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows("1 1 ")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows(" 1 1")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1") - result.Check(testkit.Rows("1 1 ", "2 2 2 3")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - - tk.MustExec("create table t1 (c1 int, c2 int)") - tk.MustExec("create table t2 (c1 int, c2 int)") - tk.MustExec("create table t3 (c1 int, c2 int)") - - tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)") - tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)") - tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)") - - result = tk.MustQuery("select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;") - result.Check(testkit.Rows(" 5 5", " 9 9", "1 1 1 1 1 1")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (c1 int)") - tk.MustExec("insert into t1 values (1), (1), (1)") - result = tk.MustQuery("select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1;") - result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, index k(c1))") - tk.MustExec("create table t1(c1 int)") - tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)") - tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)") - result = tk.MustQuery("select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - // Test LogicalSelection under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - tk.MustExec("begin;") - // Test LogicalLock under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - // Test LogicalUnionScan under LogicalJoin. - tk.MustExec("insert into t1 values(8);") - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - tk.MustExec("rollback;") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int)") - tk.MustExec("create table t1(c1 int unsigned)") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t1 values (1)") - result = tk.MustQuery("select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1") - result.Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, index a(a), index b(b))") - tk.MustExec("insert into t values(1, 2)") - tk.MustQuery("select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a").Check(testkit.Rows(" 2")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int, primary key(a, b))") - tk.MustExec("insert into t value(1,1),(1,2),(1,3),(1,4)") - tk.MustExec("create table s(a int, primary key(a))") - tk.MustExec("insert into s value(1)") - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a").Check(testkit.Rows("4")) - - // Test TIDB_SMJ for cartesian product. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t value(1),(2)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "Sort 100000000.00 root test.t.a, test.t.a", - "└─MergeJoin 100000000.00 root inner join", - " ├─TableReader(Build) 10000.00 root data:TableFullScan", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "1 1", - "1 2", - "2 1", - "2 2", - )) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into t values(1,1),(1,2)") - tk.MustExec("create table s(a int, b int)") - tk.MustExec("insert into s values(1,1)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "MergeJoin 10000.00 root left outer semi join, other cond:eq(test.t.a, test.s.a), ge(test.s.b, test.t.b)", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "1", - "0", - )) - - // Test TIDB_SMJ for join with order by desc, see https://github.com/pingcap/tidb/issues/14483 - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t (a int, key(a))") - tk.MustExec("create table t1 (a int, key(a))") - tk.MustExec("insert into t values (1), (2), (3)") - tk.MustExec("insert into t1 values (1), (2), (3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc").Check(testkit.Rows( - "3", "2", "1")) - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, key(a), key(b))") - tk.MustExec("insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc").Check(testkit.Rows( - "3", "3", "3", "3", "3", "3", - "2", "2", "2", "2", "2", "2", - "1", "1", "1", "1", "1", "1", "1", "1", "1")) - - tk.MustExec("drop table if exists s") - tk.MustExec("create table s (a int)") - tk.MustExec("insert into s values (4), (1), (3), (2)") - tk.MustQuery("explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "Sort 12487.50 root test.s.a:desc", - "└─HashJoin 12487.50 root inner join, equal:[eq(test.s.a, test.s.a)]", - " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - )) - tk.MustQuery("select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "4", "3", "2", "1")) -} - -func TestShuffleMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_merge_join_concurrency = 4;") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, c2 int)") - tk.MustExec("create table t1(c1 int, c2 int)") - tk.MustExec("insert into t values(1,1),(2,2)") - tk.MustExec("insert into t1 values(2,3),(4,4)") - - result := checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows("1 1 ")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows(" 1 1")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1") - result.Check(testkit.Rows("1 1 ", "2 2 2 3")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - - tk.MustExec("create table t1 (c1 int, c2 int)") - tk.MustExec("create table t2 (c1 int, c2 int)") - tk.MustExec("create table t3 (c1 int, c2 int)") - - tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)") - tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)") - tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)") - - result = tk.MustQuery("select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;") - result.Check(testkit.Rows(" 5 5", " 9 9", "1 1 1 1 1 1")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (c1 int)") - tk.MustExec("insert into t1 values (1), (1), (1)") - result = tk.MustQuery("select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1;") - result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, index k(c1))") - tk.MustExec("create table t1(c1 int)") - tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)") - tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)") - result = tk.MustQuery("select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - // Test LogicalSelection under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - tk.MustExec("begin;") - // Test LogicalLock under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - // Test LogicalUnionScan under LogicalJoin. - tk.MustExec("insert into t1 values(8);") - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - tk.MustExec("rollback;") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int)") - tk.MustExec("create table t1(c1 int unsigned)") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t1 values (1)") - result = tk.MustQuery("select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1") - result.Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, index a(a), index b(b))") - tk.MustExec("insert into t values(1, 2)") - tk.MustQuery("select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a").Check(testkit.Rows(" 2")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int, primary key(a, b))") - tk.MustExec("insert into t value(1,1),(1,2),(1,3),(1,4)") - tk.MustExec("create table s(a int, primary key(a))") - tk.MustExec("insert into s value(1)") - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a").Check(testkit.Rows("4")) - - // Test TIDB_SMJ for cartesian product. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t value(1),(2)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "Sort 100000000.00 root test.t.a, test.t.a", - "└─MergeJoin 100000000.00 root inner join", - " ├─TableReader(Build) 10000.00 root data:TableFullScan", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "1 1", - "1 2", - "2 1", - "2 2", - )) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into t values(1,1),(1,2)") - tk.MustExec("create table s(a int, b int)") - tk.MustExec("insert into s values(1,1)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "MergeJoin 10000.00 root left outer semi join, other cond:eq(test.t.a, test.s.a), ge(test.s.b, test.t.b)", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "1", - "0", - )) - - // Test TIDB_SMJ for join with order by desc, see https://github.com/pingcap/tidb/issues/14483 - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t (a int, key(a))") - tk.MustExec("create table t1 (a int, key(a))") - tk.MustExec("insert into t values (1), (2), (3)") - tk.MustExec("insert into t1 values (1), (2), (3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc").Check(testkit.Rows( - "3", "2", "1")) - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, key(a), key(b))") - tk.MustExec("insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc").Check(testkit.Rows( - "3", "3", "3", "3", "3", "3", - "2", "2", "2", "2", "2", "2", - "1", "1", "1", "1", "1", "1", "1", "1", "1")) - - tk.MustExec("drop table if exists s") - tk.MustExec("create table s (a int)") - tk.MustExec("insert into s values (4), (1), (3), (2)") - tk.MustQuery("explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "Sort 12487.50 root test.s.a:desc", - "└─HashJoin 12487.50 root inner join, equal:[eq(test.s.a, test.s.a)]", - " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - )) - tk.MustQuery("select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "4", "3", "2", "1")) -} - -func Test3WaysMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - tk.MustExec("create table t1(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t2(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t3(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("insert into t1 values(1,1),(2,2),(3,3)") - tk.MustExec("insert into t2 values(2,3),(3,4),(4,5)") - tk.MustExec("insert into t3 values(1,2),(2,4),(3,10)") - result := checkPlanAndRun(tk, t, plan1, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - result = checkPlanAndRun(tk, t, plan2, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - // In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended - // On the other hand, t1 order kept so no final sort appended - result = checkPlanAndRun(tk, t, plan3, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) -} - -func Test3WaysShuffleMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_merge_join_concurrency = 4;") - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - tk.MustExec("create table t1(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t2(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t3(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("insert into t1 values(1,1),(2,2),(3,3)") - tk.MustExec("insert into t2 values(2,3),(3,4),(4,5)") - tk.MustExec("insert into t3 values(1,2),(2,4),(3,10)") - result := checkPlanAndRun(tk, t, plan1, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - result = checkPlanAndRun(tk, t, plan2, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - // In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended - // On the other hand, t1 order kept so no final sort appended - result = checkPlanAndRun(tk, t, plan3, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) -} - -func TestMergeJoinDifferentTypes(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@session.tidb_executor_concurrency = 4;") - tk.MustExec("set @@session.tidb_hash_join_concurrency = 5;") - tk.MustExec("set @@session.tidb_distsql_scan_concurrency = 15;") - - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`drop table if exists t2;`) - tk.MustExec(`create table t1(a bigint, b bit(1), index idx_a(a));`) - tk.MustExec(`create table t2(a bit(1) not null, b bit(1), index idx_a(a));`) - tk.MustExec(`insert into t1 values(1, 1);`) - tk.MustExec(`insert into t2 values(1, 1);`) - tk.MustQuery(`select hex(t1.a), hex(t2.a) from t1 inner join t2 on t1.a=t2.a;`).Check(testkit.Rows(`1 1`)) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`drop table if exists t2;`) - tk.MustExec(`create table t1(a float, b double, index idx_a(a));`) - tk.MustExec(`create table t2(a double not null, b double, index idx_a(a));`) - tk.MustExec(`insert into t1 values(1, 1);`) - tk.MustExec(`insert into t2 values(1, 1);`) - tk.MustQuery(`select t1.a, t2.a from t1 inner join t2 on t1.a=t2.a;`).Check(testkit.Rows(`1 1`)) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`drop table if exists t2;`) - tk.MustExec(`create table t1(a bigint signed, b bigint, index idx_a(a));`) - tk.MustExec(`create table t2(a bigint unsigned, b bigint, index idx_a(a));`) - tk.MustExec(`insert into t1 values(-1, 0), (-1, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0);`) - tk.MustExec(`insert into t2 values(18446744073709551615, 0), (18446744073709551615, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0);`) - tk.MustQuery(`select t1.a, t2.a from t1 join t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows( - `0 0`, - `0 0`, - `0 0`, - `0 0`, - )) -} - // TestVectorizedMergeJoin is used to test vectorized merge join with some corner cases. // //nolint:gosimple // generates false positive fmt.Sprintf warnings which keep aligned @@ -965,40 +352,3 @@ func TestVectorizedShuffleMergeJoin(t *testing.T) { runTest(ca.t2, ca.t1) } } - -func TestMergeJoinWithOtherConditions(t *testing.T) { - // more than one inner tuple should be filtered on other conditions - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists R;`) - tk.MustExec(`drop table if exists Y;`) - tk.MustExec(`create table Y (a int primary key, b int, index id_b(b));`) - tk.MustExec(`insert into Y values (0,2),(2,2);`) - tk.MustExec(`create table R (a int primary key, b int);`) - tk.MustExec(`insert into R values (2,2);`) - // the max() limits the required rows at most one - // TODO(fangzhuhe): specify Y as the build side using hints - tk.MustQuery(`select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a;`).Check(testkit.Rows( - `2`, - )) -} - -func TestShuffleMergeJoinWithOtherConditions(t *testing.T) { - // more than one inner tuple should be filtered on other conditions - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec("set @@session.tidb_merge_join_concurrency = 4;") - tk.MustExec(`drop table if exists R;`) - tk.MustExec(`drop table if exists Y;`) - tk.MustExec(`create table Y (a int primary key, b int, index id_b(b));`) - tk.MustExec(`insert into Y values (0,2),(2,2);`) - tk.MustExec(`create table R (a int primary key, b int);`) - tk.MustExec(`insert into R values (2,2);`) - // the max() limits the required rows at most one - // TODO(fangzhuhe): specify Y as the build side using hints - tk.MustQuery(`select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a;`).Check(testkit.Rows( - `2`, - )) -} diff --git a/tests/integrationtest/r/executor/executor_txn.result b/tests/integrationtest/r/executor/executor_txn.result new file mode 100644 index 0000000000000..d1083e9cd6351 --- /dev/null +++ b/tests/integrationtest/r/executor/executor_txn.result @@ -0,0 +1,169 @@ +drop table if exists t; +create table t(id int, a int, unique index idx(id)); +begin pessimistic; +insert into t values (1,1); +savepoint s1; +insert into t values (2,2); +rollback to s1; +insert into t values (2,2); +select * from t; +id a +1 1 +2 2 +rollback to s1; +select * from t; +id a +1 1 +commit; +select * from t; +id a +1 1 +delete from t; +insert into t values (1,1); +begin pessimistic; +delete from t where id = 1; +savepoint s1; +insert into t values (1,2); +rollback to s1; +select * from t; +id a +commit; +select * from t; +id a +drop table if exists t; +CREATE TABLE t (a enum('B','C') NOT NULL,UNIQUE KEY idx_1 (a),KEY idx_2 (a)); +begin pessimistic; +savepoint sp0; +insert ignore into t values ( 'B' ),( 'C' ); +update t set a = 'C' where a = 'B'; +Error 1062 (23000): Duplicate entry 'C' for key 't.idx_1' +select * from t where a = 'B' for update; +a +B +rollback to sp0; +delete from t where a = 'B' ; +rollback; +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +commit; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +commit; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +set session tidb_txn_mode=''; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +commit; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +id u v +2 22 202 +3 33 303 +rollback to sp1; +select * from tmp1 order by id; +id u v +2 22 202 +commit; +select * from tmp1 order by id; +id u v +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +id u v +2 22 202 +3 33 303 +rollback to sp1; +select * from tmp1 order by id; +id u v +2 22 202 +commit; +select * from tmp1 order by id; +id u v +set session tidb_txn_mode=''; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +id u v +2 22 202 +3 33 303 +rollback to sp1; +select * from tmp1 order by id; +id u v +2 22 202 +commit; +select * from tmp1 order by id; +id u v +set session tidb_txn_mode=default; diff --git a/tests/integrationtest/r/executor/import_into.result b/tests/integrationtest/r/executor/import_into.result new file mode 100644 index 0000000000000..ce7896e9cd4fb --- /dev/null +++ b/tests/integrationtest/r/executor/import_into.result @@ -0,0 +1,170 @@ +drop table if exists t; +create table t (id int); +BEGIN; +IMPORT INTO t FROM '/file.csv'; +Error 1105 (HY000): cannot run IMPORT INTO in explicit transaction +commit; +drop table if exists t; +create table t (id int); +import into t from '/file.csv' with xx=1; +Error 8163 (HY000): Unknown option xx +import into t from '/file.csv' with detached=1; +Error 8164 (HY000): Invalid option value for detached +import into t from '/file.csv' with character_set; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with detached, detached; +Error 8165 (HY000): Option detached specified more than once +import into t from '/file.csv' with character_set=true; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=null; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=1; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=true; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=''; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set='aa'; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with fields_terminated_by=null; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_terminated_by=1; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_terminated_by=true; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_terminated_by=''; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_enclosed_by=null; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_enclosed_by='aa'; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_enclosed_by=1; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_enclosed_by=true; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_escaped_by=null; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_escaped_by='aa'; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_escaped_by=1; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_escaped_by=true; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_defined_null_by=null; +Error 8164 (HY000): Invalid option value for fields_defined_null_by +import into t from '/file.csv' with fields_defined_null_by=1; +Error 8164 (HY000): Invalid option value for fields_defined_null_by +import into t from '/file.csv' with fields_defined_null_by=true; +Error 8164 (HY000): Invalid option value for fields_defined_null_by +import into t from '/file.csv' with lines_terminated_by=null; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with lines_terminated_by=1; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with lines_terminated_by=true; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with lines_terminated_by=''; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with skip_rows=null; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with skip_rows=''; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with skip_rows=-1; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with skip_rows=true; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with split_file='aa'; +Error 8164 (HY000): Invalid option value for split_file +import into t from '/file.csv' with split_file, skip_rows=2; +Error 8164 (HY000): Invalid option value for skip_rows, should be <= 1 when split-file is enabled +import into t from '/file.csv' with disk_quota='aa'; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota='220MiBxxx'; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota=1; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota=false; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota=null; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with thread='aa'; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=0; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=false; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=-100; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=null; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with max_write_speed='aa'; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed='11aa'; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed=null; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed=-1; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed=false; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with checksum_table=''; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with checksum_table=123; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with checksum_table=false; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with checksum_table=null; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with record_errors='aa'; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors='111aa'; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors=-123; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors=null; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors=true; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with cloud_storage_uri=123; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' with cloud_storage_uri=':'; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' with cloud_storage_uri='sdsd'; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' with cloud_storage_uri='http://sdsd'; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' format 'parquet' with character_set='utf8'; +Error 8166 (HY000): Unsupported option character_set for non-CSV format +import into t from '/file.csv' format 'sql' with character_set='utf8'; +Error 8166 (HY000): Unsupported option character_set for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_terminated_by='a'; +Error 8166 (HY000): Unsupported option fields_terminated_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_terminated_by='a'; +Error 8166 (HY000): Unsupported option fields_terminated_by for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_enclosed_by='a'; +Error 8166 (HY000): Unsupported option fields_enclosed_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_enclosed_by='a'; +Error 8166 (HY000): Unsupported option fields_enclosed_by for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_escaped_by='a'; +Error 8166 (HY000): Unsupported option fields_escaped_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_escaped_by='a'; +Error 8166 (HY000): Unsupported option fields_escaped_by for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_defined_null_by='a'; +Error 8166 (HY000): Unsupported option fields_defined_null_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_defined_null_by='a'; +Error 8166 (HY000): Unsupported option fields_defined_null_by for non-CSV format +import into t from '/file.csv' format 'parquet' with lines_terminated_by='a'; +Error 8166 (HY000): Unsupported option lines_terminated_by for non-CSV format +import into t from '/file.csv' format 'sql' with lines_terminated_by='a'; +Error 8166 (HY000): Unsupported option lines_terminated_by for non-CSV format +import into t from '/file.csv' format 'parquet' with skip_rows=1; +Error 8166 (HY000): Unsupported option skip_rows for non-CSV format +import into t from '/file.csv' format 'sql' with skip_rows=1; +Error 8166 (HY000): Unsupported option skip_rows for non-CSV format +import into t from '/file.csv' format 'parquet' with split_file; +Error 8166 (HY000): Unsupported option split_file for non-CSV format +import into t from '/file.csv' format 'sql' with split_file; +Error 8166 (HY000): Unsupported option split_file for non-CSV format +import into t from ''; +Error 8156 (HY000): The value of INFILE must not be empty when LOAD DATA from LOCAL +import into t from '/a.csv' format 'xx'; +Error 8157 (HY000): The FORMAT 'xx' is not supported diff --git a/tests/integrationtest/r/executor/index_advise.result b/tests/integrationtest/r/executor/index_advise.result new file mode 100644 index 0000000000000..01b62af4a386c --- /dev/null +++ b/tests/integrationtest/r/executor/index_advise.result @@ -0,0 +1,198 @@ +set @@session.tidb_opt_advanced_join_hint=0; +drop table if exists t1, t2; +create table t1( +pnbrn_cnaps varchar(5) not null, +new_accno varchar(18) not null, +primary key(pnbrn_cnaps,new_accno) nonclustered +); +create table t2( +pnbrn_cnaps varchar(5) not null, +txn_accno varchar(18) not null, +txn_dt date not null, +yn_frz varchar(1) default null +); +insert into t1(pnbrn_cnaps,new_accno) values ("40001","123"); +insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0"); +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +id estRows task access object operator info +Update N/A root N/A +└─IndexJoin 12.50 root inner join, inner:IndexReader, outer key:executor__index_advise.t2.pnbrn_cnaps, executor__index_advise.t2.txn_accno, inner key:executor__index_advise.t1.pnbrn_cnaps, executor__index_advise.t1.new_accno, equal cond:eq(executor__index_advise.t2.pnbrn_cnaps, executor__index_advise.t1.pnbrn_cnaps), eq(executor__index_advise.t2.txn_accno, executor__index_advise.t1.new_accno) + ├─TableReader(Build) 10.00 root data:Selection + │ └─Selection 10.00 cop[tikv] eq(executor__index_advise.t2.txn_dt, 2022-12-01) + │ └─TableFullScan 10000.00 cop[tikv] table:b keep order:false, stats:pseudo + └─IndexReader(Probe) 10.00 root index:Selection + └─Selection 10.00 cop[tikv] eq(executor__index_advise.t1.pnbrn_cnaps, "40001") + └─IndexRangeScan 10.00 cop[tikv] table:t1, index:PRIMARY(pnbrn_cnaps, new_accno) range: decided by [eq(executor__index_advise.t1.pnbrn_cnaps, executor__index_advise.t2.pnbrn_cnaps) eq(executor__index_advise.t1.new_accno, executor__index_advise.t2.txn_accno)], keep order:false, stats:pseudo +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +id estRows task access object operator info +Update N/A root N/A +└─HashJoin 12.50 root inner join, equal:[eq(executor__index_advise.t2.pnbrn_cnaps, executor__index_advise.t1.pnbrn_cnaps) eq(executor__index_advise.t2.txn_accno, executor__index_advise.t1.new_accno)] + ├─IndexReader(Build) 10.00 root index:IndexRangeScan + │ └─IndexRangeScan 10.00 cop[tikv] table:t1, index:PRIMARY(pnbrn_cnaps, new_accno) range:["40001","40001"], keep order:false, stats:pseudo + └─TableReader(Probe) 10.00 root data:Selection + └─Selection 10.00 cop[tikv] eq(executor__index_advise.t2.txn_dt, 2022-12-01) + └─TableFullScan 10000.00 cop[tikv] table:b keep order:false, stats:pseudo +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +select yn_frz from t2; +yn_frz +1 +set @@session.tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; +set @@tidb_opt_advanced_join_hint=0; +drop table if exists tbl_miss, tbl_src; +create table tbl_miss( +id bigint(20) unsigned not null, +txn_dt date default null, +perip_sys_uuid varchar(32) not null, +rvrs_idr varchar(1) not null, +primary key(id) clustered, +key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) +); +insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1"); +create table tbl_src( +txn_dt date default null, +uuid varchar(32) not null, +rvrs_idr char(1), +expd_inf varchar(5000), +primary key(uuid,rvrs_idr) nonclustered +); +insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1"); +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id estRows task access object operator info +HashJoin 104.17 root inner join, equal:[eq(executor__index_advise.tbl_miss.perip_sys_uuid, executor__index_advise.tbl_src.uuid) eq(executor__index_advise.tbl_miss.rvrs_idr, executor__index_advise.tbl_src.rvrs_idr) eq(executor__index_advise.tbl_miss.txn_dt, executor__index_advise.tbl_src.txn_dt)] +├─TableReader(Build) 83.33 root data:Selection +│ └─Selection 83.33 cop[tikv] ge(executor__index_advise.tbl_miss.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_miss.txn_dt)) +│ └─TableRangeScan 250.00 cop[tikv] table:mis range:[1,10000], keep order:false, stats:pseudo +└─Selection(Probe) 2666.67 root ne(case(eq(isnull(executor__index_advise.tbl_src.expd_inf), 1), "", substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), minus(instr(substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), length(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"))), "~~"), 1))), "01") + └─TableReader 3333.33 root data:Selection + └─Selection 3333.33 cop[tikv] ge(executor__index_advise.tbl_src.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_src.txn_dt)) + └─TableFullScan 10000.00 cop[tikv] table:src keep order:false, stats:pseudo +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id estRows task access object operator info +IndexJoin 104.17 root inner join, inner:Selection, outer key:executor__index_advise.tbl_miss.perip_sys_uuid, executor__index_advise.tbl_miss.rvrs_idr, inner key:executor__index_advise.tbl_src.uuid, executor__index_advise.tbl_src.rvrs_idr, equal cond:eq(executor__index_advise.tbl_miss.perip_sys_uuid, executor__index_advise.tbl_src.uuid), eq(executor__index_advise.tbl_miss.rvrs_idr, executor__index_advise.tbl_src.rvrs_idr), eq(executor__index_advise.tbl_miss.txn_dt, executor__index_advise.tbl_src.txn_dt) +├─TableReader(Build) 83.33 root data:Selection +│ └─Selection 83.33 cop[tikv] ge(executor__index_advise.tbl_miss.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_miss.txn_dt)) +│ └─TableRangeScan 250.00 cop[tikv] table:mis range:[1,10000], keep order:false, stats:pseudo +└─Selection(Probe) 222222.22 root ne(case(eq(isnull(executor__index_advise.tbl_src.expd_inf), 1), "", substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), minus(instr(substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), length(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"))), "~~"), 1))), "01") + └─IndexLookUp 83.33 root + ├─IndexRangeScan(Build) 83.33 cop[tikv] table:src, index:PRIMARY(uuid, rvrs_idr) range: decided by [eq(executor__index_advise.tbl_src.uuid, executor__index_advise.tbl_miss.perip_sys_uuid) eq(executor__index_advise.tbl_src.rvrs_idr, executor__index_advise.tbl_miss.rvrs_idr)], keep order:false, stats:pseudo + └─Selection(Probe) 83.33 cop[tikv] ge(executor__index_advise.tbl_src.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_src.txn_dt)) + └─TableRowIDScan 83.33 cop[tikv] table:src keep order:false, stats:pseudo +select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id txn_dt perip_sys_uuid rvrs_idr txn_dt uuid rvrs_idr expd_inf +1 2022-12-01 123 1 2022-12-01 123 1 NULL +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id txn_dt perip_sys_uuid rvrs_idr txn_dt uuid rvrs_idr expd_inf +1 2022-12-01 123 1 2022-12-01 123 1 NULL +set @@tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; diff --git a/tests/integrationtest/r/executor/index_lookup_merge_join.result b/tests/integrationtest/r/executor/index_lookup_merge_join.result new file mode 100644 index 0000000000000..9e472f04fe0be --- /dev/null +++ b/tests/integrationtest/r/executor/index_lookup_merge_join.result @@ -0,0 +1,229 @@ +drop table if exists t; +CREATE TABLE `t` (`col_tinyint_key_signed` tinyint(4) DEFAULT NULL,`col_year_key_signed` year(4) DEFAULT NULL,KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`),KEY `col_year_key_signed` (`col_year_key_signed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(-100,NULL); +select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`; +count(*) +1 +drop table if exists t1, t2; +create table t1(a int, b int, c int, d int, primary key(a,b,c)); +create table t2(a int, b int, c int, d int, primary key(a,b,c)); +insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into t2 values(1,1,1,1),(2,2,2,2); +explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; +id estRows task access object operator info +IndexMergeJoin 100000000.00 root left outer join, inner:Projection, outer key:executor__index_lookup_merge_join.t1.a, executor__index_lookup_merge_join.t1.c, executor__index_lookup_merge_join.t1.b, inner key:executor__index_lookup_merge_join.t2.a, executor__index_lookup_merge_join.t2.c, executor__index_lookup_merge_join.t2.b +├─Projection(Build) 10000.00 root executor__index_lookup_merge_join.t1.a, executor__index_lookup_merge_join.t1.b, executor__index_lookup_merge_join.t1.c, executor__index_lookup_merge_join.t1.d +│ └─IndexLookUp 10000.00 root +│ ├─IndexFullScan(Build) 10000.00 cop[tikv] table:t1, index:PRIMARY(a, b, c) keep order:true, desc, stats:pseudo +│ └─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Projection(Probe) 10000.00 root executor__index_lookup_merge_join.t2.a, executor__index_lookup_merge_join.t2.b, executor__index_lookup_merge_join.t2.c, executor__index_lookup_merge_join.t2.d + └─IndexLookUp 10000.00 root + ├─IndexRangeScan(Build) 10000.00 cop[tikv] table:t2, index:PRIMARY(a, b, c) range: decided by [eq(executor__index_lookup_merge_join.t2.a, executor__index_lookup_merge_join.t1.a) eq(executor__index_lookup_merge_join.t2.b, executor__index_lookup_merge_join.t1.b) eq(executor__index_lookup_merge_join.t2.c, executor__index_lookup_merge_join.t1.c)], keep order:true, desc, stats:pseudo + └─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; +a b c d a b c d +3 3 3 3 NULL NULL NULL NULL +2 2 2 2 2 2 2 2 +1 1 1 1 1 1 1 1 +drop table if exists t1, t2; +create table t1 (c_int int, primary key(c_int)); +create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1), (2), (3), (4), (5); +insert into t2 select * from t1; +begin; +delete from t1 where c_int = 1; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +c_int c_int +2 2 +3 3 +4 4 +5 5 +select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +c_int c_int +2 2 +3 3 +4 4 +5 5 +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +c_int c_int +2 2 +3 3 +4 4 +5 5 +commit; +drop table if exists t1, t2; +create table t1 (id bigint(20) unsigned, primary key(id)); +create table t2 (id bigint(20) unsigned); +insert into t1 values (8738875760185212610); +insert into t1 values (9814441339970117597); +insert into t2 values (8738875760185212610); +insert into t2 values (9814441339970117597); +select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id; +id id +8738875760185212610 8738875760185212610 +9814441339970117597 9814441339970117597 +set @@tidb_opt_advanced_join_hint=0; +set @@tidb_partition_prune_mode= 'static'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +HashJoin 1.25 root inner join, equal:[eq(executor__index_lookup_merge_join.t1.c_int, executor__index_lookup_merge_join.t2.c_int)], other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2, partition:p0 keep order:false +└─PartitionUnion(Probe) 9991.00 root + ├─TableReader 1.00 root data:Selection + │ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + │ └─TableFullScan 1.00 cop[tikv] table:t1, partition:p0 keep order:false + └─TableReader 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo +show warnings; +Level Code Message +Warning 1815 Optimizer Hint /*+ INL_MERGE_JOIN(t1, t2) */ is inapplicable +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexHashJoin 1.25 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t1.c_int, inner key:executor__index_lookup_merge_join.t2.c_int, equal cond:eq(executor__index_lookup_merge_join.t1.c_int, executor__index_lookup_merge_join.t2.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─PartitionUnion(Build) 9991.00 root +│ ├─TableReader 1.00 root data:Selection +│ │ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ │ └─TableFullScan 1.00 cop[tikv] table:t1, partition:p0 keep order:false +│ └─TableReader 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo +└─TableReader(Probe) 1.25 root data:Selection + └─Selection 1.25 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) + └─TableRangeScan 1.25 cop[tikv] table:t2, partition:p0 range: decided by [executor__index_lookup_merge_join.t1.c_int], keep order:false +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexJoin 1.25 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t1.c_int, inner key:executor__index_lookup_merge_join.t2.c_int, equal cond:eq(executor__index_lookup_merge_join.t1.c_int, executor__index_lookup_merge_join.t2.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─PartitionUnion(Build) 9991.00 root +│ ├─TableReader 1.00 root data:Selection +│ │ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ │ └─TableFullScan 1.00 cop[tikv] table:t1, partition:p0 keep order:false +│ └─TableReader 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo +└─TableReader(Probe) 1.25 root data:Selection + └─Selection 1.25 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) + └─TableRangeScan 1.25 cop[tikv] table:t2, partition:p0 range: decided by [executor__index_lookup_merge_join.t1.c_int], keep order:false +set @@tidb_partition_prune_mode= 'dynamic'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +MergeJoin 1.00 root inner join, left key:executor__index_lookup_merge_join.t1.c_int, right key:executor__index_lookup_merge_join.t2.c_int, other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root partition:p0 data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2 keep order:true +└─TableReader(Probe) 1.00 root partition:all data:Selection + └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableFullScan 1.00 cop[tikv] table:t1 keep order:true +show warnings; +Level Code Message +Warning 1815 Optimizer Hint /*+ INL_MERGE_JOIN(t1, t2) */ is inapplicable +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexHashJoin 1.00 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t2.c_int, inner key:executor__index_lookup_merge_join.t1.c_int, equal cond:eq(executor__index_lookup_merge_join.t2.c_int, executor__index_lookup_merge_join.t1.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root partition:p0 data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2 keep order:false +└─TableReader(Probe) 1.00 root partition:all data:Selection + └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableRangeScan 1.00 cop[tikv] table:t1 range: decided by [executor__index_lookup_merge_join.t2.c_int], keep order:false +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexJoin 1.00 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t2.c_int, inner key:executor__index_lookup_merge_join.t1.c_int, equal cond:eq(executor__index_lookup_merge_join.t2.c_int, executor__index_lookup_merge_join.t1.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root partition:p0 data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2 keep order:false +└─TableReader(Probe) 1.00 root partition:all data:Selection + └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableRangeScan 1.00 cop[tikv] table:t1 range: decided by [executor__index_lookup_merge_join.t2.c_int], keep order:false +set @@tidb_opt_advanced_join_hint=DEFAULT; +set @@tidb_partition_prune_mode= DEFAULT; +drop table if exists t, s; +create table s(a int, index(a)); +create table t(a int); +insert into t values(1); +select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; +a a +1 NULL +select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; +a a +1 NULL +drop table if exists t1, t2; +CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`)); +INSERT INTO `t1` VALUES (1,NULL); +CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`)); +SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; +1 +1 +SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; +1 +1 +drop table if exists x; +CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y +drop table if exists x; +CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y diff --git a/tests/integrationtest/r/executor/infoschema_reader.result b/tests/integrationtest/r/executor/infoschema_reader.result new file mode 100644 index 0000000000000..15ab1dfb7ce46 --- /dev/null +++ b/tests/integrationtest/r/executor/infoschema_reader.result @@ -0,0 +1,285 @@ +select * from information_schema.profiling; +QUERY_ID SEQ STATE DURATION CPU_USER CPU_SYSTEM CONTEXT_VOLUNTARY CONTEXT_INVOLUNTARY BLOCK_OPS_IN BLOCK_OPS_OUT MESSAGES_SENT MESSAGES_RECEIVED PAGE_FAULTS_MAJOR PAGE_FAULTS_MINOR SWAPS SOURCE_FUNCTION SOURCE_FILE SOURCE_LINE +set @@profiling=1; +select * from information_schema.profiling; +QUERY_ID SEQ STATE DURATION CPU_USER CPU_SYSTEM CONTEXT_VOLUNTARY CONTEXT_INVOLUNTARY BLOCK_OPS_IN BLOCK_OPS_OUT MESSAGES_SENT MESSAGES_RECEIVED PAGE_FAULTS_MAJOR PAGE_FAULTS_MINOR SWAPS SOURCE_FUNCTION SOURCE_FILE SOURCE_LINE +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +select * from information_schema.SCHEMATA where schema_name='mysql'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def mysql utf8mb4 utf8mb4_bin NULL NULL +drop user if exists schemata_tester; +create user schemata_tester; +select count(*) from information_schema.SCHEMATA; +count(*) +1 +select * from information_schema.SCHEMATA where schema_name='mysql'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +select * from information_schema.SCHEMATA where schema_name='INFORMATION_SCHEMA'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin NULL NULL +CREATE ROLE r_mysql_priv; +GRANT ALL PRIVILEGES ON mysql.* TO r_mysql_priv; +GRANT r_mysql_priv TO schemata_tester; +set role r_mysql_priv; +select count(*) from information_schema.SCHEMATA; +count(*) +2 +select * from information_schema.SCHEMATA; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin NULL NULL +def mysql utf8mb4 utf8mb4_bin NULL NULL +drop table if exists executor__infoschema_reader.t; +create table executor__infoschema_reader.t (a int, b int, primary key(a), key k1(b)); +select index_id from information_schema.tidb_indexes where table_schema = 'executor__infoschema_reader' and table_name = 't'; +index_id +0 +1 +select tidb_table_id > 0 from information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't'; +tidb_table_id > 0 +1 +drop database if exists `foo`; +CREATE DATABASE `foo` DEFAULT CHARACTER SET = 'utf8mb4'; +select default_character_set_name, default_collation_name FROM information_schema.SCHEMATA WHERE schema_name = 'foo'; +default_character_set_name default_collation_name +utf8mb4 utf8mb4_bin +drop database `foo`; +drop view if exists executor__infoschema_reader.v1; +CREATE DEFINER='root'@'localhost' VIEW executor__infoschema_reader.v1 AS SELECT 1; +select TABLE_COLLATION is null from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'; +TABLE_COLLATION is null +1 +1 +SELECT * FROM information_schema.views WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE CHARACTER_SET_CLIENT COLLATION_CONNECTION +def executor__infoschema_reader v1 SELECT 1 AS `1` CASCADED NO root@localhost DEFINER utf8mb4 utf8mb4_general_ci +SELECT table_catalog, table_schema, table_name, table_type, engine, version, row_format, table_rows, avg_row_length, data_length, max_data_length, index_length, data_free, auto_increment, update_time, check_time, table_collation, checksum, create_options, table_comment FROM information_schema.tables WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; +table_catalog table_schema table_name table_type engine version row_format table_rows avg_row_length data_length max_data_length index_length data_free auto_increment update_time check_time table_collation checksum create_options table_comment +def executor__infoschema_reader v1 VIEW NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL VIEW +drop table if exists t; +create table t (bit bit(10) DEFAULT b'100'); +SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'executor__infoschema_reader' AND TABLE_NAME = 't'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT GENERATION_EXPRESSION +def executor__infoschema_reader t bit 1 b'100' YES bit NULL NULL 10 0 NULL NULL NULL bit(10) select,insert,update,references +drop table if exists t; +set time_zone='+08:00'; +drop table if exists t; +create table t (b timestamp(3) NOT NULL DEFAULT '1970-01-01 08:00:01.000'); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +column_default +1970-01-01 08:00:01.000 +set time_zone='+04:00'; +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +column_default +1970-01-01 04:00:01.000 +set time_zone=default; +drop table if exists t; +create table t (a bit DEFAULT (rand())); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +column_default +rand() +drop table if exists t; +CREATE TABLE t (`COL3` bit(1) NOT NULL,b year) ; +select column_type from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 't'; +column_type +bit(1) +year(4) +select ordinal_position from information_schema.columns where table_schema=database() and table_name='t' and column_name='b'; +ordinal_position +2 +select * from information_schema.ENGINES; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +InnoDB DEFAULT Supports transactions, row-level locking, and foreign keys YES YES YES +drop table if exists t; +create table t (a varchar(255) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +255 255 +drop table t; +create table t (a varchar(255) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +255 1020 +drop table t; +create table t (a varchar(255) collate utf8_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +255 765 +drop table t; +create table t (a char(10) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +10 10 +drop table t; +create table t (a char(10) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +10 40 +drop table t; +create table t (a set('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +8 8 +drop table t; +create table t (a set('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +8 32 +drop table t; +create table t (a enum('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +4 4 +drop table t; +create table t (a enum('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +4 16 +drop table t; +set global tidb_ddl_enable_fast_reorg = false; +drop database if exists test_ddl_jobs; +create database test_ddl_jobs; +select db_name, job_type from information_schema.DDL_JOBS limit 1; +db_name job_type +test_ddl_jobs create schema +use test_ddl_jobs; +create table t (a int); +select db_name, table_name, job_type from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and table_name = 't'; +db_name table_name job_type +test_ddl_jobs t create table +select job_type from information_schema.DDL_JOBS group by job_type having job_type = 'create table'; +job_type +create table +select distinct job_type from information_schema.DDL_JOBS where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s'); +job_type +create table +drop user if exists DDL_JOBS_tester; +create user DDL_JOBS_tester; +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; +DB_NAME TABLE_NAME +CREATE ROLE r_priv; +GRANT ALL PRIVILEGES ON test_ddl_jobs.* TO r_priv; +GRANT r_priv TO DDL_JOBS_tester; +set role r_priv; +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; +DB_NAME TABLE_NAME +test_ddl_jobs t +create table tt (a int); +alter table tt add index t(a), add column b int; +select db_name, table_name, job_type from information_schema.DDL_JOBS limit 3; +db_name table_name job_type +test_ddl_jobs tt alter table multi-schema change +test_ddl_jobs tt add column /* subjob */ +test_ddl_jobs tt add index /* subjob */ /* txn */ +drop database test_ddl_jobs; +use executor__infoschema_reader; +set global tidb_ddl_enable_fast_reorg = default; +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta' and COLUMN_NAME='table_id'; +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +def mysql tbl def mysql stats_meta table_id 1 NULL NULL NULL NULL +create user key_column_tester; +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME != 'CLUSTER_SLOW_QUERY'; +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +CREATE ROLE r_stats_meta ; +GRANT ALL PRIVILEGES ON mysql.stats_meta TO r_stats_meta; +GRANT r_stats_meta TO key_column_tester; +set role r_stats_meta; +select count(*)>0 from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta'; +count(*)>0 +1 +drop table if exists e, e2; +CREATE TABLE e ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)) PARTITION BY RANGE (id) ( +PARTITION p0 VALUES LESS THAN (50), +PARTITION p1 VALUES LESS THAN (100), +PARTITION p2 VALUES LESS THAN (150), +PARTITION p3 VALUES LESS THAN (MAXVALUE)); +CREATE TABLE e2 ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)); +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e' and table_schema=(select database()); +PARTITION_NAME TABLE_ROWS +p0 0 +p1 0 +p2 0 +p3 0 +INSERT INTO e VALUES (1669, "Jim", "Smith"), (337, "Mary", "Jones"), (16, "Frank", "White"), (2005, "Linda", "Black"); +set tidb_enable_exchange_partition='on'; +ALTER TABLE e EXCHANGE PARTITION p0 WITH TABLE e2; +INSERT INTO e VALUES (41, "Michael", "Green"); +analyze table e; +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e'; +PARTITION_NAME TABLE_ROWS +p0 1 +p1 0 +p2 0 +p3 3 +set tidb_enable_exchange_partition=default; +select count(*) > 0 from information_schema.`METRICS_TABLES`; +count(*) > 0 +1 +select * from information_schema.`METRICS_TABLES` where table_name='tidb_qps'; +TABLE_NAME PROMQL LABELS QUANTILE COMMENT +tidb_qps sum(rate(tidb_server_query_total{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (result,type,instance) instance,type,result 0 TiDB query processing numbers per second +select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME='gc_delete_range'; +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE +def mysql delete_range_index mysql gc_delete_range UNIQUE +select * from information_schema.SESSION_VARIABLES where VARIABLE_NAME='tidb_retry_limit'; +VARIABLE_NAME VARIABLE_VALUE +tidb_retry_limit 10 +drop sequence if exists seq, seq2; +CREATE SEQUENCE seq maxvalue 10000000; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +TABLE_CATALOG SEQUENCE_SCHEMA SEQUENCE_NAME CACHE CACHE_VALUE CYCLE INCREMENT MAX_VALUE MIN_VALUE START COMMENT +def executor__infoschema_reader seq 1 1000 0 1 10000000 1 1 +DROP SEQUENCE seq; +CREATE SEQUENCE seq start = -1 minvalue -1 maxvalue 10 increment 1 cache 10; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +TABLE_CATALOG SEQUENCE_SCHEMA SEQUENCE_NAME CACHE CACHE_VALUE CYCLE INCREMENT MAX_VALUE MIN_VALUE START COMMENT +def executor__infoschema_reader seq 1 10 0 1 10 -1 -1 +CREATE SEQUENCE seq2 start = -9 minvalue -10 maxvalue 10 increment -1 cache 15; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq2'; +TABLE_CATALOG SEQUENCE_SCHEMA SEQUENCE_NAME CACHE CACHE_VALUE CYCLE INCREMENT MAX_VALUE MIN_VALUE START COMMENT +def executor__infoschema_reader seq2 1 15 0 -1 10 -10 -9 +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME , TABLE_TYPE, ENGINE, TABLE_ROWS FROM information_schema.tables WHERE TABLE_TYPE='SEQUENCE' AND TABLE_NAME='seq2' and table_schema='executor__infoschema_reader'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE TABLE_ROWS +def executor__infoschema_reader seq2 SEQUENCE InnoDB 1 +drop table if exists t_int, t_implicit, t_common; +create table t_int (a int primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_int'; +TIDB_PK_TYPE +CLUSTERED +set tidb_enable_clustered_index=int_only; +create table t_implicit (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_implicit'; +TIDB_PK_TYPE +NONCLUSTERED +set tidb_enable_clustered_index=on; +create table t_common (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_common'; +TIDB_PK_TYPE +CLUSTERED +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'; +TIDB_PK_TYPE +NONCLUSTERED +set tidb_enable_clustered_index=default; +drop table if exists t; +CREATE TABLE t ( id int DEFAULT NULL); +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`1.1.1.1` SQL SECURITY DEFINER VIEW `v_test` (`type`) AS SELECT NULL AS `type` FROM `t` AS `f`; +select * from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 'v_test'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT GENERATION_EXPRESSION +def executor__infoschema_reader v_test type 1 NULL YES binary 0 0 NULL NULL NULL NULL NULL binary(0) select,insert,update,references +drop user if exists usageuser; +create user usageuser; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def USAGE NO +GRANT SELECT ON *.* to usageuser; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def SELECT NO +GRANT SELECT ON *.* to usageuser WITH GRANT OPTION; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def SELECT YES +GRANT BACKUP_ADMIN ON *.* to usageuser; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def BACKUP_ADMIN NO +'usageuser'@'%' def SELECT YES diff --git a/tests/integrationtest/r/executor/inspection_common.result b/tests/integrationtest/r/executor/inspection_common.result new file mode 100644 index 0000000000000..cb36c9d1dd0fd --- /dev/null +++ b/tests/integrationtest/r/executor/inspection_common.result @@ -0,0 +1,12 @@ +select count(*) from information_schema.inspection_rules; +count(*) +15 +select count(*) from information_schema.inspection_rules where type='inspection'; +count(*) +5 +select count(*) from information_schema.inspection_rules where type='summary'; +count(*) +10 +select count(*) from information_schema.inspection_rules where type='inspection' and type='summary'; +count(*) +0 diff --git a/tests/integrationtest/r/executor/merge_join.result b/tests/integrationtest/r/executor/merge_join.result new file mode 100644 index 0000000000000..bdd50b38aef8d --- /dev/null +++ b/tests/integrationtest/r/executor/merge_join.result @@ -0,0 +1,697 @@ +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +1 1 NULL NULL +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─MergeJoin 10000.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +NULL NULL 1 1 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 9990.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─MergeJoin 12487.50 root right outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 9990.00 root executor__merge_join.t.c1 + │ └─TableReader 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.t.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t1.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +id estRows task access object operator info +MergeJoin 12.50 root inner join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 +├─Sort(Build) 10.00 root executor__merge_join.t1.c1 +│ └─TableReader 10.00 root data:Selection +│ └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 3), 0) +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Sort(Probe) 10.00 root executor__merge_join.t.c1 + └─TableReader 10.00 root data:Selection + └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t.c1)), or(eq(executor__merge_join.t.c1, 3), 0) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +id estRows task access object operator info +Sort 10000.00 root executor__merge_join.t1.c1 +└─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1, left cond:[ne(executor__merge_join.t.c1, 1)] + ├─Sort(Build) 6656.67 root executor__merge_join.t1.c1 + │ └─TableReader 6656.67 root data:Selection + │ └─Selection 6656.67 cop[tikv] ne(executor__merge_join.t1.c1, 1), not(isnull(executor__merge_join.t1.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +c1 c2 c1 c2 +1 1 NULL NULL +2 2 2 3 +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +c1 c2 c1 c2 c1 c2 +NULL NULL NULL NULL 5 5 +NULL NULL NULL NULL 9 9 +1 1 1 1 1 1 +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +c1 c1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +c1 +1 +2 +3 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +c1 +1 +2 +3 +begin; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +c1 +1 +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +a b +NULL 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +count(*) +4 +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +id estRows task access object operator info +Sort 100000000.00 root executor__merge_join.t.a, executor__merge_join.t.a +└─MergeJoin 100000000.00 root inner join + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +a a +1 1 +1 2 +2 1 +2 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +id estRows task access object operator info +MergeJoin 10000.00 root left outer semi join, other cond:eq(executor__merge_join.t.a, executor__merge_join.s.a), ge(executor__merge_join.s.b, executor__merge_join.t.b) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +a in (select a from s where s.b >= t.b) +1 +0 +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +a +3 +2 +1 +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +a +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +id estRows task access object operator info +Sort 12487.50 root executor__merge_join.s.a:desc +└─HashJoin 12487.50 root inner join, equal:[eq(executor__merge_join.s.a, executor__merge_join.s.a)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +a1 +4 +3 +2 +1 +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +1 1 NULL NULL +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 10000.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +NULL NULL 1 1 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 9990.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─Shuffle 12487.50 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 12487.50 root right outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 9990.00 root executor__merge_join.t.c1 + │ └─TableReader 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.t.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t1.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +id estRows task access object operator info +Shuffle 12.50 root execution info: concurrency:4, data sources:[TableReader TableReader] +└─MergeJoin 12.50 root inner join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 10.00 root executor__merge_join.t1.c1 + │ └─TableReader 10.00 root data:Selection + │ └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 3), 0) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10.00 root executor__merge_join.t.c1 + └─TableReader 10.00 root data:Selection + └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t.c1)), or(eq(executor__merge_join.t.c1, 3), 0) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +id estRows task access object operator info +Sort 10000.00 root executor__merge_join.t1.c1 +└─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1, left cond:[ne(executor__merge_join.t.c1, 1)] + ├─Sort(Build) 6656.67 root executor__merge_join.t1.c1 + │ └─TableReader 6656.67 root data:Selection + │ └─Selection 6656.67 cop[tikv] ne(executor__merge_join.t1.c1, 1), not(isnull(executor__merge_join.t1.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +c1 c2 c1 c2 +1 1 NULL NULL +2 2 2 3 +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +c1 c2 c1 c2 c1 c2 +NULL NULL NULL NULL 5 5 +NULL NULL NULL NULL 9 9 +1 1 1 1 1 1 +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +c1 c1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +c1 +1 +2 +3 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +c1 +1 +2 +3 +begin; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +c1 +1 +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +a b +NULL 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +count(*) +4 +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +id estRows task access object operator info +Sort 100000000.00 root executor__merge_join.t.a, executor__merge_join.t.a +└─MergeJoin 100000000.00 root inner join + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +a a +1 1 +1 2 +2 1 +2 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +id estRows task access object operator info +MergeJoin 10000.00 root left outer semi join, other cond:eq(executor__merge_join.t.a, executor__merge_join.s.a), ge(executor__merge_join.s.b, executor__merge_join.t.b) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +a in (select a from s where s.b >= t.b) +1 +0 +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +a +3 +2 +1 +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +a +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +id estRows task access object operator info +Sort 12487.50 root executor__merge_join.s.a:desc +└─HashJoin 12487.50 root inner join, equal:[eq(executor__merge_join.s.a, executor__merge_join.s.a)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +a1 +4 +3 +2 +1 +set @@session.tidb_merge_join_concurrency = default; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +id estRows task access object operator info +MergeJoin 15625.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t3.c1 +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo +└─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +id estRows task access object operator info +MergeJoin 15625.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t3.c1 +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo +└─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +set @@session.tidb_merge_join_concurrency = default; +set @@session.tidb_executor_concurrency = 4; +set @@session.tidb_hash_join_concurrency = 5; +set @@session.tidb_distsql_scan_concurrency = 15; +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint, b bit(1), index idx_a(a)); +create table t2(a bit(1) not null, b bit(1), index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select hex(t1.a), hex(t2.a) from t1 inner join t2 on t1.a=t2.a; +hex(t1.a) hex(t2.a) +1 1 +drop table if exists t1; +drop table if exists t2; +create table t1(a float, b double, index idx_a(a)); +create table t2(a double not null, b double, index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select t1.a, t2.a from t1 inner join t2 on t1.a=t2.a; +a a +1 1 +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint signed, b bigint, index idx_a(a)); +create table t2(a bigint unsigned, b bigint, index idx_a(a)); +insert into t1 values(-1, 0), (-1, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +insert into t2 values(18446744073709551615, 0), (18446744073709551615, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +select t1.a, t2.a from t1 join t2 on t1.a=t2.a order by t1.a; +a a +0 0 +0 0 +0 0 +0 0 +set @@session.tidb_executor_concurrency = default; +set @@session.tidb_hash_join_concurrency = default; +set @@session.tidb_distsql_scan_concurrency = default; +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; +max(Y.a) +2 +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; +max(Y.a) +2 +set @@session.tidb_merge_join_concurrency = default; diff --git a/tests/integrationtest/t/executor/executor_txn.test b/tests/integrationtest/t/executor/executor_txn.test new file mode 100644 index 0000000000000..557c6f1b041fc --- /dev/null +++ b/tests/integrationtest/t/executor/executor_txn.test @@ -0,0 +1,133 @@ +# TestRollbackToSavepoint +drop table if exists t; +create table t(id int, a int, unique index idx(id)); +begin pessimistic; +insert into t values (1,1); +savepoint s1; +insert into t values (2,2); +rollback to s1; +insert into t values (2,2); +select * from t; +rollback to s1; +select * from t; +commit; +select * from t; +delete from t; +insert into t values (1,1); +begin pessimistic; +delete from t where id = 1; +savepoint s1; +insert into t values (1,2); +rollback to s1; +select * from t; +commit; +select * from t; + +# TestSavepointRandTestIssue0 +drop table if exists t; +CREATE TABLE t (a enum('B','C') NOT NULL,UNIQUE KEY idx_1 (a),KEY idx_2 (a)); +begin pessimistic; +savepoint sp0; +insert ignore into t values ( 'B' ),( 'C' ); +-- error 1062 +update t set a = 'C' where a = 'B'; +select * from t where a = 'B' for update; +rollback to sp0; +delete from t where a = 'B' ; +rollback; + +# TestSavepointWithTemporaryTable +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode=''; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode=''; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode=default; diff --git a/tests/integrationtest/t/executor/import_into.test b/tests/integrationtest/t/executor/import_into.test new file mode 100644 index 0000000000000..6c1dec4ae4e21 --- /dev/null +++ b/tests/integrationtest/t/executor/import_into.test @@ -0,0 +1,174 @@ +# TestImportIntoExplicitTransaction +drop table if exists t; +create table t (id int); +BEGIN; +-- error 1105 +IMPORT INTO t FROM '/file.csv'; +commit; + +# TestImportIntoOptionsNegativeCase +drop table if exists t; +create table t (id int); +-- error 8163 +import into t from '/file.csv' with xx=1; +-- error 8164 +import into t from '/file.csv' with detached=1; +-- error 8164 +import into t from '/file.csv' with character_set; +-- error 8165 +import into t from '/file.csv' with detached, detached; +-- error 8164 +import into t from '/file.csv' with character_set=true; +-- error 8164 +import into t from '/file.csv' with character_set=null; +-- error 8164 +import into t from '/file.csv' with character_set=1; +-- error 8164 +import into t from '/file.csv' with character_set=true; +-- error 8164 +import into t from '/file.csv' with character_set=''; +-- error 8164 +import into t from '/file.csv' with character_set='aa'; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=null; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=1; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=true; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=''; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by=null; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by='aa'; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by=1; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by=true; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by=null; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by='aa'; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by=1; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by=true; +-- error 8164 +import into t from '/file.csv' with fields_defined_null_by=null; +-- error 8164 +import into t from '/file.csv' with fields_defined_null_by=1; +-- error 8164 +import into t from '/file.csv' with fields_defined_null_by=true; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=null; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=1; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=true; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=''; +-- error 8164 +import into t from '/file.csv' with skip_rows=null; +-- error 8164 +import into t from '/file.csv' with skip_rows=''; +-- error 8164 +import into t from '/file.csv' with skip_rows=-1; +-- error 8164 +import into t from '/file.csv' with skip_rows=true; +-- error 8164 +import into t from '/file.csv' with split_file='aa'; +-- error 8164 +import into t from '/file.csv' with split_file, skip_rows=2; +-- error 8164 +import into t from '/file.csv' with disk_quota='aa'; +-- error 8164 +import into t from '/file.csv' with disk_quota='220MiBxxx'; +-- error 8164 +import into t from '/file.csv' with disk_quota=1; +-- error 8164 +import into t from '/file.csv' with disk_quota=false; +-- error 8164 +import into t from '/file.csv' with disk_quota=null; +-- error 8164 +import into t from '/file.csv' with thread='aa'; +-- error 8164 +import into t from '/file.csv' with thread=0; +-- error 8164 +import into t from '/file.csv' with thread=false; +-- error 8164 +import into t from '/file.csv' with thread=-100; +-- error 8164 +import into t from '/file.csv' with thread=null; +-- error 8164 +import into t from '/file.csv' with max_write_speed='aa'; +-- error 8164 +import into t from '/file.csv' with max_write_speed='11aa'; +-- error 8164 +import into t from '/file.csv' with max_write_speed=null; +-- error 8164 +import into t from '/file.csv' with max_write_speed=-1; +-- error 8164 +import into t from '/file.csv' with max_write_speed=false; +-- error 8164 +import into t from '/file.csv' with checksum_table=''; +-- error 8164 +import into t from '/file.csv' with checksum_table=123; +-- error 8164 +import into t from '/file.csv' with checksum_table=false; +-- error 8164 +import into t from '/file.csv' with checksum_table=null; +-- error 8164 +import into t from '/file.csv' with record_errors='aa'; +-- error 8164 +import into t from '/file.csv' with record_errors='111aa'; +-- error 8164 +import into t from '/file.csv' with record_errors=-123; +-- error 8164 +import into t from '/file.csv' with record_errors=null; +-- error 8164 +import into t from '/file.csv' with record_errors=true; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri=123; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri=':'; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri='sdsd'; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri='http://sdsd'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with character_set='utf8'; +-- error 8166 +import into t from '/file.csv' format 'sql' with character_set='utf8'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_enclosed_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_enclosed_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_escaped_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_escaped_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_defined_null_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_defined_null_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with lines_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with lines_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with skip_rows=1; +-- error 8166 +import into t from '/file.csv' format 'sql' with skip_rows=1; +-- error 8166 +import into t from '/file.csv' format 'parquet' with split_file; +-- error 8166 +import into t from '/file.csv' format 'sql' with split_file; +-- error 8156 +import into t from ''; +-- error 8157 +import into t from '/a.csv' format 'xx'; + diff --git a/tests/integrationtest/t/executor/index_advise.test b/tests/integrationtest/t/executor/index_advise.test new file mode 100644 index 0000000000000..17f11ae545ac9 --- /dev/null +++ b/tests/integrationtest/t/executor/index_advise.test @@ -0,0 +1,166 @@ +# TestIndexJoinProjPattern +set @@session.tidb_opt_advanced_join_hint=0; +drop table if exists t1, t2; +create table t1( + pnbrn_cnaps varchar(5) not null, + new_accno varchar(18) not null, + primary key(pnbrn_cnaps,new_accno) nonclustered +); +create table t2( + pnbrn_cnaps varchar(5) not null, + txn_accno varchar(18) not null, + txn_dt date not null, + yn_frz varchar(1) default null +); +insert into t1(pnbrn_cnaps,new_accno) values ("40001","123"); +insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0"); +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( + select t1.pnbrn_cnaps, + t1.new_accno + from t1 + where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( + select t1.pnbrn_cnaps, + t1.new_accno + from t1 + where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +update +/*+ inl_join(a) */ +t2 b, +( + select t1.pnbrn_cnaps, + t1.new_accno + from t1 + where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +select yn_frz from t2; + +set @@session.tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; + +# TestIndexJoinSelPattern +set @@tidb_opt_advanced_join_hint=0; +drop table if exists tbl_miss, tbl_src; +create table tbl_miss( + id bigint(20) unsigned not null, + txn_dt date default null, + perip_sys_uuid varchar(32) not null, + rvrs_idr varchar(1) not null, + primary key(id) clustered, + key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) +); +insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1"); +create table tbl_src( + txn_dt date default null, + uuid varchar(32) not null, + rvrs_idr char(1), + expd_inf varchar(5000), + primary key(uuid,rvrs_idr) nonclustered +); +insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1"); + +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; +select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; + +set @@tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; diff --git a/tests/integrationtest/t/executor/index_lookup_merge_join.test b/tests/integrationtest/t/executor/index_lookup_merge_join.test new file mode 100644 index 0000000000000..13b6a97f99133 --- /dev/null +++ b/tests/integrationtest/t/executor/index_lookup_merge_join.test @@ -0,0 +1,108 @@ +# TestIssue28052 +drop table if exists t; +CREATE TABLE `t` (`col_tinyint_key_signed` tinyint(4) DEFAULT NULL,`col_year_key_signed` year(4) DEFAULT NULL,KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`),KEY `col_year_key_signed` (`col_year_key_signed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(-100,NULL); +select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`; + +# TestIssue18631 +drop table if exists t1, t2; +create table t1(a int, b int, c int, d int, primary key(a,b,c)); +create table t2(a int, b int, c int, d int, primary key(a,b,c)); +insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into t2 values(1,1,1,1),(2,2,2,2); +explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; +select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; + +# TestIssue19408 +drop table if exists t1, t2; +create table t1 (c_int int, primary key(c_int)); +create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1), (2), (3), (4), (5); +insert into t2 select * from t1; +begin; +delete from t1 where c_int = 1; +--sorted_result +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +--sorted_result +select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +--sorted_result +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +commit; + +# TestIssue20137 +drop table if exists t1, t2; +create table t1 (id bigint(20) unsigned, primary key(id)); +create table t2 (id bigint(20) unsigned); +insert into t1 values (8738875760185212610); +insert into t1 values (9814441339970117597); +insert into t2 values (8738875760185212610); +insert into t2 values (9814441339970117597); +select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id; + +# TestIndexJoinOnSinglePartitionTable +set @@tidb_opt_advanced_join_hint=0; +set @@tidb_partition_prune_mode= 'static'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +show warnings; +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +set @@tidb_partition_prune_mode= 'dynamic'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +show warnings; +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +set @@tidb_opt_advanced_join_hint=DEFAULT; +set @@tidb_partition_prune_mode= DEFAULT; + +# TestIssue20400 +drop table if exists t, s; +create table s(a int, index(a)); +create table t(a int); +insert into t values(1); +select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; +select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; + +# TestIssue20549 +drop table if exists t1, t2; +CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`)); +INSERT INTO `t1` VALUES (1,NULL); +CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`)); +SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; +SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; + +# TestIssue24473 +drop table if exists x; +CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +--sorted_result +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +--sorted_result +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; + +# TestIssue25669 +drop table if exists x; +CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +--sorted_result +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +--sorted_result +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; + diff --git a/tests/integrationtest/t/executor/infoschema_reader.test b/tests/integrationtest/t/executor/infoschema_reader.test new file mode 100644 index 0000000000000..b25bb80e93da6 --- /dev/null +++ b/tests/integrationtest/t/executor/infoschema_reader.test @@ -0,0 +1,241 @@ +# TestProfiling +select * from information_schema.profiling; +set @@profiling=1; +select * from information_schema.profiling; + +# TestSchemataTables +select * from information_schema.SCHEMATA where schema_name='mysql'; +drop user if exists schemata_tester; +create user schemata_tester; + +connect (conn1, localhost, schemata_tester,, information_schema); +select count(*) from information_schema.SCHEMATA; +select * from information_schema.SCHEMATA where schema_name='mysql'; +select * from information_schema.SCHEMATA where schema_name='INFORMATION_SCHEMA'; + +connection default; +CREATE ROLE r_mysql_priv; +GRANT ALL PRIVILEGES ON mysql.* TO r_mysql_priv; +GRANT r_mysql_priv TO schemata_tester; + +connection conn1; +set role r_mysql_priv; +select count(*) from information_schema.SCHEMATA; +select * from information_schema.SCHEMATA; + +connection default; +disconnect conn1; + +# TestTableIDAndIndexID +drop table if exists executor__infoschema_reader.t; +create table executor__infoschema_reader.t (a int, b int, primary key(a), key k1(b)); +select index_id from information_schema.tidb_indexes where table_schema = 'executor__infoschema_reader' and table_name = 't'; +select tidb_table_id > 0 from information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't'; + +# TestSchemataCharacterSet +drop database if exists `foo`; +CREATE DATABASE `foo` DEFAULT CHARACTER SET = 'utf8mb4'; +select default_character_set_name, default_collation_name FROM information_schema.SCHEMATA WHERE schema_name = 'foo'; +drop database `foo`; + +# TestViews +drop view if exists executor__infoschema_reader.v1; +CREATE DEFINER='root'@'localhost' VIEW executor__infoschema_reader.v1 AS SELECT 1; +select TABLE_COLLATION is null from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'; +SELECT * FROM information_schema.views WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; +SELECT table_catalog, table_schema, table_name, table_type, engine, version, row_format, table_rows, avg_row_length, data_length, max_data_length, index_length, data_free, auto_increment, update_time, check_time, table_collation, checksum, create_options, table_comment FROM information_schema.tables WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; + +# TestColumnsTables +drop table if exists t; +create table t (bit bit(10) DEFAULT b'100'); +SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'executor__infoschema_reader' AND TABLE_NAME = 't'; +drop table if exists t; +set time_zone='+08:00'; +drop table if exists t; +create table t (b timestamp(3) NOT NULL DEFAULT '1970-01-01 08:00:01.000'); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +set time_zone='+04:00'; +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +set time_zone=default; +drop table if exists t; +create table t (a bit DEFAULT (rand())); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +drop table if exists t; +CREATE TABLE t (`COL3` bit(1) NOT NULL,b year) ; +select column_type from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 't'; +## For issue: https://github.com/pingcap/tidb/issues/43379 +select ordinal_position from information_schema.columns where table_schema=database() and table_name='t' and column_name='b'; + +# TestEngines +select * from information_schema.ENGINES; + +# TestDataTypesMaxLengthAndOctLength +# https://github.com/pingcap/tidb/issues/25467 +drop table if exists t; +create table t (a varchar(255) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a varchar(255) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a varchar(255) collate utf8_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a char(10) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a char(10) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a set('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a set('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a enum('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a enum('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; + +# TestDDLJobs +set global tidb_ddl_enable_fast_reorg = false; +drop database if exists test_ddl_jobs; +create database test_ddl_jobs; +select db_name, job_type from information_schema.DDL_JOBS limit 1; +use test_ddl_jobs; +create table t (a int); +select db_name, table_name, job_type from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and table_name = 't'; +select job_type from information_schema.DDL_JOBS group by job_type having job_type = 'create table'; +select distinct job_type from information_schema.DDL_JOBS where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s'); +drop user if exists DDL_JOBS_tester; +create user DDL_JOBS_tester; + +connect(conn1, localhost, DDL_JOBS_tester,, information_schema); +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; + +connection default; +CREATE ROLE r_priv; +GRANT ALL PRIVILEGES ON test_ddl_jobs.* TO r_priv; +GRANT r_priv TO DDL_JOBS_tester; + +connection conn1; +set role r_priv; +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; + +connection default; +create table tt (a int); +alter table tt add index t(a), add column b int; +select db_name, table_name, job_type from information_schema.DDL_JOBS limit 3; + +disconnect conn1; +drop database test_ddl_jobs; +use executor__infoschema_reader; +set global tidb_ddl_enable_fast_reorg = default; + +# TestKeyColumnUsage +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta' and COLUMN_NAME='table_id'; +create user key_column_tester; + +connect (conn1, localhost, key_column_tester,, information_schema); +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME != 'CLUSTER_SLOW_QUERY'; + +connection default; +CREATE ROLE r_stats_meta ; +GRANT ALL PRIVILEGES ON mysql.stats_meta TO r_stats_meta; +GRANT r_stats_meta TO key_column_tester; + +connection conn1; +set role r_stats_meta; +select count(*)>0 from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta'; + +connection default; +disconnect conn1; + +# TestPartitionTablesStatsCache +# https://github.com/pingcap/tidb/issues/32693 +drop table if exists e, e2; +CREATE TABLE e ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)) PARTITION BY RANGE (id) ( + PARTITION p0 VALUES LESS THAN (50), + PARTITION p1 VALUES LESS THAN (100), + PARTITION p2 VALUES LESS THAN (150), + PARTITION p3 VALUES LESS THAN (MAXVALUE)); +CREATE TABLE e2 ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)); +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e' and table_schema=(select database()); +INSERT INTO e VALUES (1669, "Jim", "Smith"), (337, "Mary", "Jones"), (16, "Frank", "White"), (2005, "Linda", "Black"); +set tidb_enable_exchange_partition='on'; +ALTER TABLE e EXCHANGE PARTITION p0 WITH TABLE e2; +INSERT INTO e VALUES (41, "Michael", "Green"); +analyze table e; +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e'; +set tidb_enable_exchange_partition=default; + +# TestMetricTables +select count(*) > 0 from information_schema.`METRICS_TABLES`; +select * from information_schema.`METRICS_TABLES` where table_name='tidb_qps'; + +# TestTableConstraintsTable +select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME='gc_delete_range'; + +# TestTableSessionVar +select * from information_schema.SESSION_VARIABLES where VARIABLE_NAME='tidb_retry_limit'; + +# TestSequences +drop sequence if exists seq, seq2; +CREATE SEQUENCE seq maxvalue 10000000; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +DROP SEQUENCE seq; +CREATE SEQUENCE seq start = -1 minvalue -1 maxvalue 10 increment 1 cache 10; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +CREATE SEQUENCE seq2 start = -9 minvalue -10 maxvalue 10 increment -1 cache 15; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq2'; +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME , TABLE_TYPE, ENGINE, TABLE_ROWS FROM information_schema.tables WHERE TABLE_TYPE='SEQUENCE' AND TABLE_NAME='seq2' and table_schema='executor__infoschema_reader'; + +# TestTablesPKType +drop table if exists t_int, t_implicit, t_common; +create table t_int (a int primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_int'; +set tidb_enable_clustered_index=int_only; +create table t_implicit (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_implicit'; +set tidb_enable_clustered_index=on; +create table t_common (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_common'; +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'; +set tidb_enable_clustered_index=default; + +# TestNullColumns +drop table if exists t; +CREATE TABLE t ( id int DEFAULT NULL); +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`1.1.1.1` SQL SECURITY DEFINER VIEW `v_test` (`type`) AS SELECT NULL AS `type` FROM `t` AS `f`; +select * from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 'v_test'; + +# TestUserPrivilegesTable +drop user if exists usageuser; +create user usageuser; + +connect (conn1, localhost, usageuser,, information_schema); +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; + +connection default; +GRANT SELECT ON *.* to usageuser; + +connection conn1; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; + +connection default; +GRANT SELECT ON *.* to usageuser WITH GRANT OPTION; + +connection conn1; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; + +connection default; +GRANT BACKUP_ADMIN ON *.* to usageuser; + +connection conn1; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type; + +connection default; +disconnect conn1; diff --git a/tests/integrationtest/t/executor/inspection_common.test b/tests/integrationtest/t/executor/inspection_common.test new file mode 100644 index 0000000000000..5b70a3a8f1fae --- /dev/null +++ b/tests/integrationtest/t/executor/inspection_common.test @@ -0,0 +1,6 @@ +# TestInspectionRules +select count(*) from information_schema.inspection_rules; +select count(*) from information_schema.inspection_rules where type='inspection'; +select count(*) from information_schema.inspection_rules where type='summary'; +select count(*) from information_schema.inspection_rules where type='inspection' and type='summary'; + diff --git a/tests/integrationtest/t/executor/merge_join.test b/tests/integrationtest/t/executor/merge_join.test new file mode 100644 index 0000000000000..7e20d21501823 --- /dev/null +++ b/tests/integrationtest/t/executor/merge_join.test @@ -0,0 +1,287 @@ +# TestMergeJoin +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +## Test LogicalSelection under LogicalJoin. +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +begin; +## Test LogicalLock under LogicalJoin. +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +## Test LogicalUnionScan under LogicalJoin. +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +## Test TIDB_SMJ for cartesian product. +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +## Test TIDB_SMJ for join with order by desc, see https://github.com/pingcap/tidb/issues/14483 +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; + +# TestShuffleMergeJoin +## Same as TestMergeJoin except `tidb_merge_join_concurrency = 4;` +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +begin; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +set @@session.tidb_merge_join_concurrency = default; + +# Test3WaysMergeJoin +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +# In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended +# On the other hand, t1 order kept so no final sort appended +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; + +# Test3WaysShuffleMergeJoin +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +# In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended +# On the other hand, t1 order kept so no final sort appended +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +set @@session.tidb_merge_join_concurrency = default; + +# TestMergeJoinDifferentTypes +set @@session.tidb_executor_concurrency = 4; +set @@session.tidb_hash_join_concurrency = 5; +set @@session.tidb_distsql_scan_concurrency = 15; +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint, b bit(1), index idx_a(a)); +create table t2(a bit(1) not null, b bit(1), index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select hex(t1.a), hex(t2.a) from t1 inner join t2 on t1.a=t2.a; +drop table if exists t1; +drop table if exists t2; +create table t1(a float, b double, index idx_a(a)); +create table t2(a double not null, b double, index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select t1.a, t2.a from t1 inner join t2 on t1.a=t2.a; +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint signed, b bigint, index idx_a(a)); +create table t2(a bigint unsigned, b bigint, index idx_a(a)); +insert into t1 values(-1, 0), (-1, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +insert into t2 values(18446744073709551615, 0), (18446744073709551615, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +select t1.a, t2.a from t1 join t2 on t1.a=t2.a order by t1.a; +set @@session.tidb_executor_concurrency = default; +set @@session.tidb_hash_join_concurrency = default; +set @@session.tidb_distsql_scan_concurrency = default; + +# TestMergeJoinWithOtherConditions +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +# the max() limits the required rows at most one +# TODO(fangzhuhe): specify Y as the build side using hints +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; + +# TestShuffleMergeJoinWithOtherConditions +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +# the max() limits the required rows at most one +# TODO(fangzhuhe): specify Y as the build side using hints +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; +set @@session.tidb_merge_join_concurrency = default; + From fd08ae3ebb1cf37e6ac0ea6bc8ef3c35cc3e331d Mon Sep 17 00:00:00 2001 From: Shenghui Wu <793703860@qq.com> Date: Thu, 26 Oct 2023 16:02:04 +0800 Subject: [PATCH 10/33] executor: fix wrong ConnID for query out of memory quota (#47980) close pingcap/tidb#47979 --- pkg/executor/executor.go | 1 + pkg/sessionctx/variable/session.go | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index 17c5d70435fc2..b660343f59c38 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -1996,6 +1996,7 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { vars.MemTracker.Killer = &vars.SQLKiller vars.DiskTracker.Killer = &vars.SQLKiller vars.SQLKiller.Reset() + vars.SQLKiller.ConnID = vars.ConnectionID vars.StmtCtx.TableStats = make(map[int64]interface{}) isAnalyze := false diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index 5df93fa0e0acc..23e7df757cbbd 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -2066,7 +2066,6 @@ func NewSessionVars(hctx HookContext) *SessionVars { vars.DiskTracker = disk.NewTracker(memory.LabelForSession, -1) vars.MemTracker = memory.NewTracker(memory.LabelForSession, vars.MemQuotaQuery) vars.MemTracker.IsRootTrackerOfSess = true - vars.SQLKiller.ConnID = vars.ConnectionID vars.MemTracker.Killer = &vars.SQLKiller for _, engine := range config.GetGlobalConfig().IsolationRead.Engines { From a565692dab68be58b12c582470e241e1169f2022 Mon Sep 17 00:00:00 2001 From: Lakshmi Narayanan Sreethar <1407970+lkshminarayanan@users.noreply.github.com> Date: Thu, 26 Oct 2023 15:32:06 +0530 Subject: [PATCH 11/33] session: display mDDLTableVersion in mysql.tidb table (#47900) close pingcap/tidb#39778 --- pkg/session/bootstrap.go | 37 ++++++++++++++++++- pkg/session/bootstrap_test.go | 68 +++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 1 deletion(-) diff --git a/pkg/session/bootstrap.go b/pkg/session/bootstrap.go index 01b19a458eb86..27799f7b1a700 100644 --- a/pkg/session/bootstrap.go +++ b/pkg/session/bootstrap.go @@ -751,6 +751,8 @@ const ( // The variable name in mysql.tidb table and it records the default value of // oom-action when upgrade from v3.0.x to v4.0.11+. tidbDefOOMAction = "default_oom_action" + // The variable name in mysql.tidb table and it records the current DDLTableVersion + tidbDDLTableVersion = "ddl_table_version" // Const for TiDB server version 2. version2 = 2 version3 = 3 @@ -1014,11 +1016,15 @@ const ( // version 177 // add `mysql.dist_framework_meta` version177 = 177 + + // version 178 + // write mDDLTableVersion into `mysql.tidb` table + version178 = 178 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version177 +var currentBootstrapVersion int64 = version178 // DDL owner key's expired time is ManagerSessionTTL seconds, we should wait the time and give more time to have a chance to finish it. var internalSQLTimeout = owner.ManagerSessionTTL + 15 @@ -1171,6 +1177,7 @@ var ( upgradeToVer175, upgradeToVer176, upgradeToVer177, + upgradeToVer178, } ) @@ -2846,6 +2853,32 @@ func upgradeToVer177(s Session, ver int64) { } } +// writeDDLTableVersion writes mDDLTableVersion into mysql.tidb +func writeDDLTableVersion(s Session) { + var err error + var ddlTableVersion meta.DDLTableVersion + err = kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap), s.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + t := meta.NewMeta(txn) + ddlTableVersion, err = t.CheckDDLTableVersion() + return err + }) + terror.MustNil(err) + mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, "DDL Table Version. Do not delete.") ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, + mysql.SystemDB, + mysql.TiDBTable, + tidbDDLTableVersion, + ddlTableVersion, + ddlTableVersion, + ) +} + +func upgradeToVer178(s Session, ver int64) { + if ver >= version178 { + return + } + writeDDLTableVersion(s) +} + func writeOOMAction(s Session) { comment := "oom-action is `log` by default in v3.0.x, `cancel` by default in v4.0.11+" mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, %?) ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, @@ -3102,6 +3135,8 @@ func doDMLWorks(s Session) { writeStmtSummaryVars(s) + writeDDLTableVersion(s) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) _, err := s.ExecuteInternal(ctx, "COMMIT") if err != nil { diff --git a/pkg/session/bootstrap_test.go b/pkg/session/bootstrap_test.go index 6b37120bec02b..a7c9965001eac 100644 --- a/pkg/session/bootstrap_test.go +++ b/pkg/session/bootstrap_test.go @@ -2118,3 +2118,71 @@ func TestTiDBUpgradeToVer177(t *testing.T) { MustExec(t, seV176, "SELECT * from mysql.dist_framework_meta") dom.Close() } + +func TestWriteDDLTableVersionToMySQLTiDB(t *testing.T) { + ctx := context.Background() + store, dom := CreateStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + ddlTableVer, err := m.CheckDDLTableVersion() + require.NoError(t, err) + + // Verify that 'ddl_table_version' has been set to the correct value + se := CreateSessionAndSetID(t, store) + r := MustExecToRecodeSet(t, se, fmt.Sprintf(`SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME='%s'`, tidbDDLTableVersion)) + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, req.NumRows()) + require.Equal(t, []byte(fmt.Sprintf("%d", ddlTableVer)), req.GetRow(0).GetBytes(0)) + require.NoError(t, r.Close()) + dom.Close() +} + +func TestWriteDDLTableVersionToMySQLTiDBWhenUpgradingTo178(t *testing.T) { + ctx := context.Background() + store, _ := CreateStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + ddlTableVer, err := m.CheckDDLTableVersion() + require.NoError(t, err) + + // bootstrap as version177 + ver177 := version177 + seV177 := CreateSessionAndSetID(t, store) + err = m.FinishBootstrap(int64(ver177)) + require.NoError(t, err) + MustExec(t, seV177, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver177)) + // remove the ddl_table_version entry from mysql.tidb table + MustExec(t, seV177, fmt.Sprintf("delete from mysql.tidb where VARIABLE_NAME='%s'", tidbDDLTableVersion)) + err = txn.Commit(ctx) + require.NoError(t, err) + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV177) + require.NoError(t, err) + require.Equal(t, int64(ver177), ver) + + // upgrade to current version + domCurVer, err := BootstrapSession(store) + require.NoError(t, err) + defer domCurVer.Close() + seCurVer := CreateSessionAndSetID(t, store) + ver, err = getBootstrapVersion(seCurVer) + require.NoError(t, err) + require.Equal(t, currentBootstrapVersion, ver) + + // check if the DDLTableVersion has been set in the `mysql.tidb` table during upgrade + r := MustExecToRecodeSet(t, seCurVer, fmt.Sprintf(`SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME='%s'`, tidbDDLTableVersion)) + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, req.NumRows()) + require.Equal(t, []byte(fmt.Sprintf("%d", ddlTableVer)), req.GetRow(0).GetBytes(0)) + require.NoError(t, r.Close()) +} From e13113a14ae8db940c397966eef1ea69046bed12 Mon Sep 17 00:00:00 2001 From: YangKeao Date: Thu, 26 Oct 2023 20:12:34 +0800 Subject: [PATCH 12/33] types: fix the behavior of casting json string to integers (#48010) close pingcap/tidb#47864 --- pkg/types/convert.go | 15 ++++++++++----- tests/integrationtest/r/expression/json.result | 15 +++++++++++++++ tests/integrationtest/t/expression/json.test | 10 ++++++++++ 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/pkg/types/convert.go b/pkg/types/convert.go index fc98f1fb5a1e2..d1de90bdb1000 100644 --- a/pkg/types/convert.go +++ b/pkg/types/convert.go @@ -630,12 +630,17 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool, return int64(u), sc.HandleOverflow(err, err) case JSONTypeCodeString: str := string(hack.String(j.GetString())) - if !unsigned { - r, e := StrToInt(sc.TypeCtxOrDefault(), str, false) - return r, sc.HandleOverflow(e, e) + // The behavior of casting json string as an integer is consistent with casting a string as an integer. + // See the `builtinCastStringAsIntSig` in `expression` pkg. The only difference is that this function + // doesn't append any warning. This behavior is compatible with MySQL. + isNegative := len(str) > 1 && str[0] == '-' + if !isNegative { + r, err := StrToUint(sc.TypeCtxOrDefault(), str, false) + return int64(r), sc.HandleOverflow(err, err) } - u, err := StrToUint(sc.TypeCtxOrDefault(), str, false) - return int64(u), sc.HandleOverflow(err, err) + + r, err := StrToInt(sc.TypeCtxOrDefault(), str, false) + return r, sc.HandleOverflow(err, err) } return 0, errors.New("Unknown type code in JSON") } diff --git a/tests/integrationtest/r/expression/json.result b/tests/integrationtest/r/expression/json.result index 8a1472456f31d..243a8388b0ab9 100644 --- a/tests/integrationtest/r/expression/json.result +++ b/tests/integrationtest/r/expression/json.result @@ -602,3 +602,18 @@ json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to last]') select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to 2]'); json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to 2]') [1, 2, 3] +drop table if exists t; +create table t (a json); +insert into t values ('"-1"'); +insert into t values ('"18446744073709551615"'); +insert into t values ('"18446744073709552000"'); +select a, cast(a as unsigned) from t; +a cast(a as unsigned) +"-1" 18446744073709551615 +"18446744073709551615" 18446744073709551615 +"18446744073709552000" 18446744073709551615 +select a, cast(a as signed) from t; +a cast(a as signed) +"-1" -1 +"18446744073709551615" -1 +"18446744073709552000" -1 diff --git a/tests/integrationtest/t/expression/json.test b/tests/integrationtest/t/expression/json.test index ca41e0b701fea..142b46288ce18 100644 --- a/tests/integrationtest/t/expression/json.test +++ b/tests/integrationtest/t/expression/json.test @@ -359,3 +359,13 @@ select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[1 to 100]'); select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to last]'); select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to 2]'); +# TestCastJSONStringToInteger +drop table if exists t; +create table t (a json); +insert into t values ('"-1"'); +insert into t values ('"18446744073709551615"'); +insert into t values ('"18446744073709552000"'); +-- sorted_result +select a, cast(a as unsigned) from t; +-- sorted_result +select a, cast(a as signed) from t; From a51ea16fb7e3b9d434f01f44bc33fb15315f64a6 Mon Sep 17 00:00:00 2001 From: Yuanjia Zhang Date: Fri, 27 Oct 2023 00:02:04 +0800 Subject: [PATCH 13/33] planner: introduce a new fix-control flag to control whether to cache plans that access generated columns (#48011) ref pingcap/tidb#45798 --- pkg/planner/core/plan_cache_test.go | 89 ++++++++++++++++++++++ pkg/planner/core/plan_cacheable_checker.go | 20 ++++- pkg/planner/util/fixcontrol/get.go | 2 + 3 files changed, 107 insertions(+), 4 deletions(-) diff --git a/pkg/planner/core/plan_cache_test.go b/pkg/planner/core/plan_cache_test.go index 3a70630733020..2c45bc31f0d48 100644 --- a/pkg/planner/core/plan_cache_test.go +++ b/pkg/planner/core/plan_cache_test.go @@ -339,6 +339,95 @@ func TestIssue38533(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) } +func TestPlanCacheGeneratedCols(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`set @@tidb_opt_fix_control = "45798:on"`) + tk.MustExec(`create table t1 (a int, info json, city varchar(64) as (JSON_UNQUOTE(JSON_EXTRACT(info, '$.city'))))`) + tk.MustExec(`create table t2 (a int, info json, city varchar(64) as (JSON_UNQUOTE(JSON_EXTRACT(info, '$.city'))) virtual)`) + tk.MustExec(`create table t3 (a int, info json, city varchar(64) as (JSON_UNQUOTE(JSON_EXTRACT(info, '$.city'))) stored)`) + tk.MustExec(`create table t4 (a int, info json, index zips( (CAST(info->'$.zipcode' AS UNSIGNED ARRAY))))`) + + tk.MustExec(`set @a=1`) + tk.MustExec(`set @b=2`) + + tk.MustExec(`prepare s1 from 'select * from t1 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache + + tk.MustExec(`prepare s1 from 'select * from t2 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache + + tk.MustExec(`prepare s1 from 'select * from t3 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache + + tk.MustExec(`prepare s1 from 'select * from t4 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache +} + +func TestPlanCacheGeneratedCols2(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`set @@tidb_opt_fix_control = "45798:on"`) + tk.MustExec(`CREATE TABLE t1 ( + ipk varbinary(255) NOT NULL, + i_id varchar(45) DEFAULT NULL, + i_set_id varchar(45) DEFAULT NULL, + p_id varchar(45) DEFAULT NULL, + p_set_id varchar(45) DEFAULT NULL, + m_id bigint(20) DEFAULT NULL, + m_i_id varchar(127) DEFAULT NULL, + m_i_set_id varchar(127) DEFAULT NULL, + d json DEFAULT NULL, + p_sources json DEFAULT NULL, + nslc json DEFAULT NULL, + cl json DEFAULT NULL, + fii json DEFAULT NULL, + fpi json DEFAULT NULL, + PRIMARY KEY (ipk) /*T![clustered_index] CLUSTERED */, + UNIQUE KEY i_id (i_id), + KEY d ((cast(d as char(253) array))), + KEY m_i_id (m_i_id), + KEY m_i_set_id (m_i_set_id), + KEY fpi ((cast(fpi as unsigned array))), + KEY nslc ((cast(nslc as char(1000) array))), + KEY cl ((cast(cl as char(3000) array))), + KEY fii ((cast(fii as unsigned array))), + KEY m_id (m_id), + KEY i_set_id (i_set_id), + KEY m_i_and_m_id (m_i_id,m_id))`) + + tk.MustExec(`CREATE TABLE t2 ( + ipk varbinary(255) NOT NULL, + created_time bigint(20) DEFAULT NULL, + arrival_time bigint(20) DEFAULT NULL, + updated_time bigint(20) DEFAULT NULL, + timestamp_data json DEFAULT NULL, + PRIMARY KEY (ipk) /*T![clustered_index] CLUSTERED */)`) + + tk.MustExec(`prepare stmt from 'select * + from ( t1 left outer join t2 on ( t1 . ipk = t2 . ipk ) ) + where ( t1 . i_id = ? )'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustExec(`set @a='a', @b='b'`) + tk.MustQuery(`execute stmt using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute stmt using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache +} + func TestInvalidRange(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/pkg/planner/core/plan_cacheable_checker.go b/pkg/planner/core/plan_cacheable_checker.go index 717f16d97d50e..3ecfa73f93b25 100644 --- a/pkg/planner/core/plan_cacheable_checker.go +++ b/pkg/planner/core/plan_cacheable_checker.go @@ -580,7 +580,7 @@ func isPhysicalPlanCacheable(sctx sessionctx.Context, p PhysicalPlan, paramNum, case *PhysicalMemTable: return false, "PhysicalMemTable plan is un-cacheable" case *PhysicalIndexMergeReader: - if x.AccessMVIndex { + if x.AccessMVIndex && !enablePlanCacheForGeneratedCols(sctx) { return false, "the plan with IndexMerge accessing Multi-Valued Index is un-cacheable" } underIndexMerge = true @@ -622,6 +622,15 @@ func getMaxParamLimit(sctx sessionctx.Context) int { return v } +func enablePlanCacheForGeneratedCols(sctx sessionctx.Context) bool { + // disable this by default since it's not well tested. + // TODO: complete its test and enable it by default. + if sctx == nil || sctx.GetSessionVars() == nil || sctx.GetSessionVars().GetOptimizerFixControlMap() == nil { + return false + } + return fixcontrol.GetBoolWithDefault(sctx.GetSessionVars().GetOptimizerFixControlMap(), fixcontrol.Fix45798, false) +} + // checkTableCacheable checks whether a query accessing this table is cacheable. func checkTableCacheable(ctx context.Context, sctx sessionctx.Context, schema infoschema.InfoSchema, node *ast.TableName, isNonPrep bool) (cacheable bool, reason string) { tableSchema := node.Schema @@ -653,9 +662,12 @@ func checkTableCacheable(ctx context.Context, sctx sessionctx.Context, schema in */ return false, "query accesses partitioned tables is un-cacheable" } - for _, col := range tb.Cols() { - if col.IsGenerated() { - return false, "query accesses generated columns is un-cacheable" + + if !enablePlanCacheForGeneratedCols(sctx) { + for _, col := range tb.Cols() { + if col.IsGenerated() { + return false, "query accesses generated columns is un-cacheable" + } } } if tb.Meta().TempTableType != model.TempTableNone { diff --git a/pkg/planner/util/fixcontrol/get.go b/pkg/planner/util/fixcontrol/get.go index fad759f363cf8..7c8c976e73dca 100644 --- a/pkg/planner/util/fixcontrol/get.go +++ b/pkg/planner/util/fixcontrol/get.go @@ -34,6 +34,8 @@ const ( Fix44855 uint64 = 44855 // Fix45132 controls whether to use access range row count to determine access path on the Skyline pruning. Fix45132 uint64 = 45132 + // Fix45798 controls whether to cache plans that access generated columns. + Fix45798 uint64 = 45798 ) // GetStr fetches the given key from the fix control map as a string type. From 9224f62ef802ea9112f3c16bd3942156d84852e6 Mon Sep 17 00:00:00 2001 From: EasonBall <592838129@qq.com> Date: Fri, 27 Oct 2023 11:11:05 +0800 Subject: [PATCH 14/33] disttask: fix removing meta when met network partition for so long then recover from it (#48005) close pingcap/tidb#47954 --- pkg/disttask/framework/framework_test.go | 3 + pkg/disttask/framework/scheduler/BUILD.bazel | 1 + pkg/disttask/framework/scheduler/manager.go | 94 +++++++++++++------- pkg/disttask/framework/storage/table_test.go | 1 + pkg/disttask/framework/storage/task_table.go | 3 +- pkg/domain/domain.go | 2 +- pkg/executor/set.go | 4 +- 7 files changed, 72 insertions(+), 36 deletions(-) diff --git a/pkg/disttask/framework/framework_test.go b/pkg/disttask/framework/framework_test.go index 286b2c86ee1a7..c59a9a4fcd570 100644 --- a/pkg/disttask/framework/framework_test.go +++ b/pkg/disttask/framework/framework_test.go @@ -525,13 +525,16 @@ func TestFrameworkSetLabel(t *testing.T) { RegisterTaskMeta(t, ctrl, &m, &testDispatcherExt{}) distContext := testkit.NewDistExecutionContext(t, 3) tk := testkit.NewTestKit(t, distContext.Store) + // 1. all "" role. DispatchTaskAndCheckSuccess("😁", t, &m) + // 2. one "background" role. tk.MustExec("set global tidb_service_scope=background") tk.MustQuery("select @@global.tidb_service_scope").Check(testkit.Rows("background")) tk.MustQuery("select @@tidb_service_scope").Check(testkit.Rows("background")) DispatchTaskAndCheckSuccess("😊", t, &m) + // 3. 2 "background" role. tk.MustExec("update mysql.dist_framework_meta set role = \"background\" where host = \":4001\"") DispatchTaskAndCheckSuccess("😆", t, &m) diff --git a/pkg/disttask/framework/scheduler/BUILD.bazel b/pkg/disttask/framework/scheduler/BUILD.bazel index b72f51527a99e..7d4d274cf0b80 100644 --- a/pkg/disttask/framework/scheduler/BUILD.bazel +++ b/pkg/disttask/framework/scheduler/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "//pkg/metrics", "//pkg/resourcemanager/pool/spool", "//pkg/resourcemanager/util", + "//pkg/util", "//pkg/util/backoff", "//pkg/util/logutil", "@com_github_pingcap_errors//:errors", diff --git a/pkg/disttask/framework/scheduler/manager.go b/pkg/disttask/framework/scheduler/manager.go index d917d2cdac826..95c9aa99c9bba 100644 --- a/pkg/disttask/framework/scheduler/manager.go +++ b/pkg/disttask/framework/scheduler/manager.go @@ -25,8 +25,10 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/disttask/framework/proto" "github.com/pingcap/tidb/pkg/domain/infosync" + "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/resourcemanager/pool/spool" "github.com/pingcap/tidb/pkg/resourcemanager/util" + tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" ) @@ -34,9 +36,10 @@ import ( var ( schedulerPoolSize int32 = 4 // same as dispatcher - checkTime = 300 * time.Millisecond - retrySQLTimes = 3 - retrySQLInterval = 500 * time.Millisecond + checkTime = 300 * time.Millisecond + recoverMetaInterval = 90 * time.Second + retrySQLTimes = 30 + retrySQLInterval = 500 * time.Millisecond ) // ManagerBuilder is used to build a Manager. @@ -70,7 +73,7 @@ type Manager struct { } // id, it's the same as server id now, i.e. host:port. id string - wg sync.WaitGroup + wg tidbutil.WaitGroupWrapper ctx context.Context cancel context.CancelFunc logCtx context.Context @@ -97,36 +100,33 @@ func (b *ManagerBuilder) BuildManager(ctx context.Context, id string, taskTable return m, nil } -// Start starts the Manager. -func (m *Manager) Start() error { - logutil.Logger(m.logCtx).Debug("manager start") - var err error +func (m *Manager) initMeta() (err error) { for i := 0; i < retrySQLTimes; i++ { err = m.taskTable.StartManager(m.id, config.GetGlobalConfig().Instance.TiDBServiceScope) if err == nil { break } if i%10 == 0 { - logutil.Logger(m.logCtx).Warn("start manager failed", zap.String("scope", config.GetGlobalConfig().Instance.TiDBServiceScope), - zap.Int("retry times", retrySQLTimes), zap.Error(err)) + logutil.Logger(m.logCtx).Warn("start manager failed", + zap.String("scope", config.GetGlobalConfig().Instance.TiDBServiceScope), + zap.Int("retry times", i), + zap.Error(err)) } time.Sleep(retrySQLInterval) } - if err != nil { + return err +} + +// Start starts the Manager. +func (m *Manager) Start() error { + logutil.Logger(m.logCtx).Debug("manager start") + if err := m.initMeta(); err != nil { return err } - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.fetchAndHandleRunnableTasks(m.ctx) - }() - - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.fetchAndFastCancelTasks(m.ctx) - }() + m.wg.Run(m.fetchAndHandleRunnableTasksLoop) + m.wg.Run(m.fetchAndFastCancelTasksLoop) + m.wg.Run(m.recoverMetaLoop) return nil } @@ -138,12 +138,13 @@ func (m *Manager) Stop() { } // fetchAndHandleRunnableTasks fetches the runnable tasks from the global task table and handles them. -func (m *Manager) fetchAndHandleRunnableTasks(ctx context.Context) { +func (m *Manager) fetchAndHandleRunnableTasksLoop() { + defer tidbutil.Recover(metrics.LabelDomain, "fetchAndHandleRunnableTasksLoop", m.fetchAndHandleRunnableTasksLoop, false) ticker := time.NewTicker(checkTime) for { select { - case <-ctx.Done(): - logutil.Logger(m.logCtx).Info("fetchAndHandleRunnableTasks done") + case <-m.ctx.Done(): + logutil.Logger(m.logCtx).Info("fetchAndHandleRunnableTasksLoop done") return case <-ticker.C: tasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStateRunning, proto.TaskStateReverting) @@ -151,19 +152,21 @@ func (m *Manager) fetchAndHandleRunnableTasks(ctx context.Context) { m.logErr(err) continue } - m.onRunnableTasks(ctx, tasks) + m.onRunnableTasks(m.ctx, tasks) } } } // fetchAndFastCancelTasks fetches the reverting/pausing tasks from the global task table and fast cancels them. -func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { +func (m *Manager) fetchAndFastCancelTasksLoop() { + defer tidbutil.Recover(metrics.LabelDomain, "fetchAndFastCancelTasksLoop", m.fetchAndFastCancelTasksLoop, false) + ticker := time.NewTicker(checkTime) for { select { - case <-ctx.Done(): + case <-m.ctx.Done(): m.cancelAllRunningTasks() - logutil.Logger(m.logCtx).Info("fetchAndFastCancelTasks done") + logutil.Logger(m.logCtx).Info("fetchAndFastCancelTasksLoop done") return case <-ticker.C: tasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStateReverting) @@ -171,7 +174,7 @@ func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { m.logErr(err) continue } - m.onCanceledTasks(ctx, tasks) + m.onCanceledTasks(m.ctx, tasks) // cancel pending/running subtasks, and mark them as paused. pausingTasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStatePausing) @@ -189,6 +192,9 @@ func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { // onRunnableTasks handles runnable tasks. func (m *Manager) onRunnableTasks(ctx context.Context, tasks []*proto.Task) { + if len(tasks) == 0 { + return + } tasks = m.filterAlreadyHandlingTasks(tasks) for _, task := range tasks { exist, err := m.taskTable.HasSubtasksInStates(m.id, task.ID, task.Step, @@ -221,6 +227,9 @@ func (m *Manager) onRunnableTasks(ctx context.Context, tasks []*proto.Task) { // onCanceledTasks cancels the running subtasks. func (m *Manager) onCanceledTasks(_ context.Context, tasks []*proto.Task) { + if len(tasks) == 0 { + return + } m.mu.RLock() defer m.mu.RUnlock() for _, task := range tasks { @@ -234,6 +243,9 @@ func (m *Manager) onCanceledTasks(_ context.Context, tasks []*proto.Task) { // onPausingTasks pauses/cancels the pending/running subtasks. func (m *Manager) onPausingTasks(tasks []*proto.Task) error { + if len(tasks) == 0 { + return nil + } m.mu.RLock() defer m.mu.RUnlock() for _, task := range tasks { @@ -250,6 +262,28 @@ func (m *Manager) onPausingTasks(tasks []*proto.Task) error { return nil } +// recoverMetaLoop inits and recovers dist_framework_meta for the tidb node running the scheduler manager. +// This is necessary when the TiDB node experiences a prolonged network partition +// and the dispatcher deletes `dist_framework_meta`. +// When the TiDB node recovers from the network partition, +// we need to re-insert the metadata. +func (m *Manager) recoverMetaLoop() { + defer tidbutil.Recover(metrics.LabelDomain, "recoverMetaLoop", m.recoverMetaLoop, false) + ticker := time.NewTicker(recoverMetaInterval) + for { + select { + case <-m.ctx.Done(): + logutil.Logger(m.logCtx).Info("recoverMetaLoop done") + return + case <-ticker.C: + if err := m.initMeta(); err != nil { + m.logErr(err) + continue + } + } + } +} + // cancelAllRunningTasks cancels all running tasks. func (m *Manager) cancelAllRunningTasks() { m.mu.RLock() diff --git a/pkg/disttask/framework/storage/table_test.go b/pkg/disttask/framework/storage/table_test.go index b711066211471..ae88f887660fd 100644 --- a/pkg/disttask/framework/storage/table_test.go +++ b/pkg/disttask/framework/storage/table_test.go @@ -417,6 +417,7 @@ func TestDistFrameworkMeta(t *testing.T) { require.NoError(t, sm.StartManager(":4000", "background")) require.NoError(t, sm.StartManager(":4001", "")) + require.NoError(t, sm.StartManager(":4002", "")) require.NoError(t, sm.StartManager(":4002", "background")) allNodes, err := sm.GetAllNodes() diff --git a/pkg/disttask/framework/storage/task_table.go b/pkg/disttask/framework/storage/task_table.go index 66736d7e6f3bf..73189b563eb1d 100644 --- a/pkg/disttask/framework/storage/task_table.go +++ b/pkg/disttask/framework/storage/task_table.go @@ -549,8 +549,7 @@ func (stm *TaskManager) StartSubtask(subtaskID int64) error { // StartManager insert the manager information into dist_framework_meta. func (stm *TaskManager) StartManager(tidbID string, role string) error { - _, err := stm.executeSQLWithNewSession(stm.ctx, `insert into mysql.dist_framework_meta values(%?, %?, DEFAULT) - on duplicate key update role = %?`, tidbID, role, role) + _, err := stm.executeSQLWithNewSession(stm.ctx, `replace into mysql.dist_framework_meta values(%?, %?, DEFAULT)`, tidbID, role) return err } diff --git a/pkg/domain/domain.go b/pkg/domain/domain.go index 2876702fc66de..1e2607a1da238 100644 --- a/pkg/domain/domain.go +++ b/pkg/domain/domain.go @@ -1481,7 +1481,7 @@ func (do *Domain) InitDistTaskLoop(ctx context.Context) error { func (do *Domain) distTaskFrameworkLoop(ctx context.Context, taskManager *storage.TaskManager, schedulerManager *scheduler.Manager, serverID string) { err := schedulerManager.Start() if err != nil { - logutil.BgLogger().Error("dist task scheduler manager failed", zap.Error(err)) + logutil.BgLogger().Error("dist task scheduler manager start failed", zap.Error(err)) return } logutil.BgLogger().Info("dist task scheduler manager started") diff --git a/pkg/executor/set.go b/pkg/executor/set.go index aa6cfaf796d3d..27288b32f6c63 100644 --- a/pkg/executor/set.go +++ b/pkg/executor/set.go @@ -166,9 +166,7 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres dom := domain.GetDomain(e.Ctx()) serverID := disttaskutil.GenerateSubtaskExecID(ctx, dom.DDL().GetID()) _, err = e.Ctx().(sqlexec.SQLExecutor).ExecuteInternal(ctx, - `update mysql.dist_framework_meta - set role = %? - where host = %?`, valStr, serverID) + `replace into mysql.dist_framework_meta values(%?, %?, DEFAULT)`, serverID, valStr) } return err } From dfd9080bcf49083e52d6cb714668da9b881dc54d Mon Sep 17 00:00:00 2001 From: Lynn Date: Fri, 27 Oct 2023 11:53:03 +0800 Subject: [PATCH 15/33] pkg/ddl: remove the job version check for deleteRange (#47927) close pingcap/tidb#47916 --- pkg/ddl/ddl_worker.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/pkg/ddl/ddl_worker.go b/pkg/ddl/ddl_worker.go index 96d59251ff32b..12bfeef693ed2 100644 --- a/pkg/ddl/ddl_worker.go +++ b/pkg/ddl/ddl_worker.go @@ -537,16 +537,6 @@ func needUpdateRawArgs(job *model.Job, meetErr bool) bool { return true } -func (w *worker) deleteRange(ctx context.Context, job *model.Job) error { - var err error - if job.Version <= currentVersion { - err = w.delRangeManager.addDelRangeJob(ctx, job) - } else { - err = dbterror.ErrInvalidDDLJobVersion.GenWithStackByArgs(job.Version, currentVersion) - } - return errors.Trace(err) -} - func jobNeedGC(job *model.Job) bool { if !job.IsCancelled() { if job.Warning != nil && dbterror.ErrCantDropFieldOrKey.Equal(job.Warning) { @@ -587,7 +577,7 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { }() if jobNeedGC(job) { - err = w.deleteRange(w.ctx, job) + err = w.delRangeManager.addDelRangeJob(w.ctx, job) if err != nil { return errors.Trace(err) } From 3d353f113fb9ecd9ff6b140dcb743d59170a4f34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Oct 2023 07:39:34 +0000 Subject: [PATCH 16/33] build(deps): bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#48026) --- DEPS.bzl | 12 ++++++------ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/DEPS.bzl b/DEPS.bzl index 9ef127dcda934..fcb826aa24c2d 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -6027,13 +6027,13 @@ def go_deps(): name = "com_github_prometheus_client_golang", build_file_proto_mode = "disable_global", importpath = "github.com/prometheus/client_golang", - sha256 = "0167cee686b836da39815e4a7ea64ecc245f6a3fb9b3c3f729941ed55da7dd4f", - strip_prefix = "github.com/prometheus/client_golang@v1.16.0", + sha256 = "db3c3279e5f3377cc21bf7f353ba67a7472321fad5562990cd55adc2127538f9", + strip_prefix = "github.com/prometheus/client_golang@v1.17.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", - "http://ats.apps.svc/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", + "http://ats.apps.svc/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", + "https://cache.hawkingrei.com/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", ], ) go_repository( diff --git a/go.mod b/go.mod index 9948af1ea28dd..b7c94d83f5e00 100644 --- a/go.mod +++ b/go.mod @@ -87,7 +87,7 @@ require ( github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e github.com/pingcap/tipb v0.0.0-20230919054518-dfd7d194838f github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 diff --git a/go.sum b/go.sum index 8e88eee755d31..c602e5e330848 100644 --- a/go.sum +++ b/go.sum @@ -871,8 +871,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From d1fc638f5b58775e766c4107fd74092b5fcb8278 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Fri, 27 Oct 2023 16:20:05 +0800 Subject: [PATCH 17/33] *: record end key rather than max key (#47648) close pingcap/tidb#47535 --- br/pkg/lightning/backend/backend.go | 4 +- br/pkg/lightning/backend/external/engine.go | 16 ++-- br/pkg/lightning/backend/external/util.go | 82 ++++++++++--------- .../lightning/backend/external/util_test.go | 38 +++------ br/pkg/lightning/backend/local/engine.go | 8 +- br/pkg/lightning/backend/local/local.go | 16 ++-- br/pkg/lightning/backend/local/local_test.go | 4 +- br/pkg/lightning/common/engine.go | 4 +- pkg/ddl/backfilling_dispatcher.go | 48 +++++++---- pkg/ddl/backfilling_dispatcher_test.go | 12 +-- pkg/ddl/backfilling_dist_scheduler.go | 6 +- pkg/ddl/backfilling_import_cloud.go | 4 +- pkg/ddl/backfilling_read_index.go | 20 ++--- .../importinto/dispatcher_testkit_test.go | 26 ++---- pkg/disttask/importinto/planner.go | 31 ++++--- pkg/disttask/importinto/planner_test.go | 38 ++++----- pkg/disttask/importinto/proto.go | 2 + pkg/disttask/importinto/scheduler.go | 4 +- 18 files changed, 175 insertions(+), 188 deletions(-) diff --git a/br/pkg/lightning/backend/backend.go b/br/pkg/lightning/backend/backend.go index f1ebb9484843c..fbcb9e1f00dfc 100644 --- a/br/pkg/lightning/backend/backend.go +++ b/br/pkg/lightning/backend/backend.go @@ -106,8 +106,8 @@ type ExternalEngineConfig struct { StorageURI string DataFiles []string StatFiles []string - MinKey []byte - MaxKey []byte + StartKey []byte + EndKey []byte SplitKeys [][]byte RegionSplitSize int64 // TotalFileSize can be an estimated value. diff --git a/br/pkg/lightning/backend/external/engine.go b/br/pkg/lightning/backend/external/engine.go index 55a8c41371ee3..3688ed851a630 100644 --- a/br/pkg/lightning/backend/external/engine.go +++ b/br/pkg/lightning/backend/external/engine.go @@ -42,8 +42,8 @@ type Engine struct { storage storage.ExternalStorage dataFiles []string statsFiles []string - minKey []byte - maxKey []byte + startKey []byte + endKey []byte splitKeys [][]byte regionSplitSize int64 bufPool *membuf.Pool @@ -66,8 +66,8 @@ func NewExternalEngine( storage storage.ExternalStorage, dataFiles []string, statsFiles []string, - minKey []byte, - maxKey []byte, + startKey []byte, + endKey []byte, splitKeys [][]byte, regionSplitSize int64, keyAdapter common.KeyAdapter, @@ -82,8 +82,8 @@ func NewExternalEngine( storage: storage, dataFiles: dataFiles, statsFiles: statsFiles, - minKey: minKey, - maxKey: maxKey, + startKey: startKey, + endKey: endKey, splitKeys: splitKeys, regionSplitSize: regionSplitSize, bufPool: membuf.NewPool(), @@ -305,8 +305,8 @@ func (e *Engine) ID() string { } // GetKeyRange implements common.Engine. -func (e *Engine) GetKeyRange() (firstKey []byte, lastKey []byte, err error) { - return e.minKey, e.maxKey, nil +func (e *Engine) GetKeyRange() (startKey []byte, endKey []byte, err error) { + return e.startKey, e.endKey, nil } // SplitRanges split the ranges by split keys provided by external engine. diff --git a/br/pkg/lightning/backend/external/util.go b/br/pkg/lightning/backend/external/util.go index 83f3a8fd176d7..6af609bf6eea4 100644 --- a/br/pkg/lightning/backend/external/util.go +++ b/br/pkg/lightning/backend/external/util.go @@ -206,41 +206,39 @@ func GetMaxOverlapping(points []Endpoint) int64 { // SortedKVMeta is the meta of sorted kv. type SortedKVMeta struct { - MinKey []byte `json:"min-key"` - MaxKey []byte `json:"max-key"` - TotalKVSize uint64 `json:"total-kv-size"` - // seems those 2 fields always generated from MultipleFilesStats, - // maybe remove them later. - DataFiles []string `json:"data-files"` - StatFiles []string `json:"stat-files"` + StartKey []byte `json:"start-key"` + EndKey []byte `json:"end-key"` // exclusive + TotalKVSize uint64 `json:"total-kv-size"` MultipleFilesStats []MultipleFilesStat `json:"multiple-files-stats"` } -// NewSortedKVMeta creates a SortedKVMeta from a WriterSummary. +// NewSortedKVMeta creates a SortedKVMeta from a WriterSummary. If the summary +// is empty, it will return a pointer to zero SortedKVMeta. func NewSortedKVMeta(summary *WriterSummary) *SortedKVMeta { - meta := &SortedKVMeta{ - MinKey: summary.Min.Clone(), - MaxKey: summary.Max.Clone(), + if summary == nil || (len(summary.Min) == 0 && len(summary.Max) == 0) { + return &SortedKVMeta{} + } + return &SortedKVMeta{ + StartKey: summary.Min.Clone(), + EndKey: summary.Max.Clone().Next(), TotalKVSize: summary.TotalSize, MultipleFilesStats: summary.MultipleFilesStats, } - for _, f := range summary.MultipleFilesStats { - for _, filename := range f.Filenames { - meta.DataFiles = append(meta.DataFiles, filename[0]) - meta.StatFiles = append(meta.StatFiles, filename[1]) - } - } - return meta } // Merge merges the other SortedKVMeta into this one. func (m *SortedKVMeta) Merge(other *SortedKVMeta) { - m.MinKey = NotNilMin(m.MinKey, other.MinKey) - m.MaxKey = NotNilMax(m.MaxKey, other.MaxKey) - m.TotalKVSize += other.TotalKVSize + if len(other.StartKey) == 0 && len(other.EndKey) == 0 { + return + } + if len(m.StartKey) == 0 && len(m.EndKey) == 0 { + *m = *other + return + } - m.DataFiles = append(m.DataFiles, other.DataFiles...) - m.StatFiles = append(m.StatFiles, other.StatFiles...) + m.StartKey = BytesMin(m.StartKey, other.StartKey) + m.EndKey = BytesMax(m.EndKey, other.EndKey) + m.TotalKVSize += other.TotalKVSize m.MultipleFilesStats = append(m.MultipleFilesStats, other.MultipleFilesStats...) } @@ -250,28 +248,38 @@ func (m *SortedKVMeta) MergeSummary(summary *WriterSummary) { m.Merge(NewSortedKVMeta(summary)) } -// NotNilMin returns the smallest of a and b, ignoring nil values. -func NotNilMin(a, b []byte) []byte { - if len(a) == 0 { - return b +// GetDataFiles returns all data files in the meta. +func (m *SortedKVMeta) GetDataFiles() []string { + var ret []string + for _, stat := range m.MultipleFilesStats { + for _, files := range stat.Filenames { + ret = append(ret, files[0]) + } } - if len(b) == 0 { - return a + return ret +} + +// GetStatFiles returns all stat files in the meta. +func (m *SortedKVMeta) GetStatFiles() []string { + var ret []string + for _, stat := range m.MultipleFilesStats { + for _, files := range stat.Filenames { + ret = append(ret, files[1]) + } } + return ret +} + +// BytesMin returns the smallest of byte slice a and b. +func BytesMin(a, b []byte) []byte { if bytes.Compare(a, b) < 0 { return a } return b } -// NotNilMax returns the largest of a and b, ignoring nil values. -func NotNilMax(a, b []byte) []byte { - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } +// BytesMax returns the largest of byte slice a and b. +func BytesMax(a, b []byte) []byte { if bytes.Compare(a, b) > 0 { return a } diff --git a/br/pkg/lightning/backend/external/util_test.go b/br/pkg/lightning/backend/external/util_test.go index 360dfd23c7977..fd8bf2cf7434e 100644 --- a/br/pkg/lightning/backend/external/util_test.go +++ b/br/pkg/lightning/backend/external/util_test.go @@ -268,26 +268,20 @@ func TestSortedKVMeta(t *testing.T) { }, } meta0 := NewSortedKVMeta(summary[0]) - require.Equal(t, []byte("a"), meta0.MinKey) - require.Equal(t, []byte("b"), meta0.MaxKey) + require.Equal(t, []byte("a"), meta0.StartKey) + require.Equal(t, []byte{'b', 0}, meta0.EndKey) require.Equal(t, uint64(123), meta0.TotalKVSize) - require.Equal(t, []string{"f1", "f2"}, meta0.DataFiles) - require.Equal(t, []string{"stat1", "stat2"}, meta0.StatFiles) require.Equal(t, summary[0].MultipleFilesStats, meta0.MultipleFilesStats) meta1 := NewSortedKVMeta(summary[1]) - require.Equal(t, []byte("x"), meta1.MinKey) - require.Equal(t, []byte("y"), meta1.MaxKey) + require.Equal(t, []byte("x"), meta1.StartKey) + require.Equal(t, []byte{'y', 0}, meta1.EndKey) require.Equal(t, uint64(177), meta1.TotalKVSize) - require.Equal(t, []string{"f3", "f4"}, meta1.DataFiles) - require.Equal(t, []string{"stat3", "stat4"}, meta1.StatFiles) require.Equal(t, summary[1].MultipleFilesStats, meta1.MultipleFilesStats) meta0.MergeSummary(summary[1]) - require.Equal(t, []byte("a"), meta0.MinKey) - require.Equal(t, []byte("y"), meta0.MaxKey) + require.Equal(t, []byte("a"), meta0.StartKey) + require.Equal(t, []byte{'y', 0}, meta0.EndKey) require.Equal(t, uint64(300), meta0.TotalKVSize) - require.Equal(t, []string{"f1", "f2", "f3", "f4"}, meta0.DataFiles) - require.Equal(t, []string{"stat1", "stat2", "stat3", "stat4"}, meta0.StatFiles) mergedStats := append([]MultipleFilesStat{}, summary[0].MultipleFilesStats...) mergedStats = append(mergedStats, summary[1].MultipleFilesStats...) require.Equal(t, mergedStats, meta0.MultipleFilesStats) @@ -298,21 +292,9 @@ func TestSortedKVMeta(t *testing.T) { } func TestKeyMinMax(t *testing.T) { - require.Equal(t, []byte(nil), NotNilMin(nil, nil)) - require.Equal(t, []byte{}, NotNilMin(nil, []byte{})) - require.Equal(t, []byte(nil), NotNilMin([]byte{}, nil)) - require.Equal(t, []byte("a"), NotNilMin([]byte("a"), nil)) - require.Equal(t, []byte("a"), NotNilMin([]byte("a"), []byte{})) - require.Equal(t, []byte("a"), NotNilMin(nil, []byte("a"))) - require.Equal(t, []byte("a"), NotNilMin([]byte("a"), []byte("b"))) - require.Equal(t, []byte("a"), NotNilMin([]byte("b"), []byte("a"))) + require.Equal(t, []byte("a"), BytesMin([]byte("a"), []byte("b"))) + require.Equal(t, []byte("a"), BytesMin([]byte("b"), []byte("a"))) - require.Equal(t, []byte(nil), NotNilMax(nil, nil)) - require.Equal(t, []byte{}, NotNilMax(nil, []byte{})) - require.Equal(t, []byte(nil), NotNilMax([]byte{}, nil)) - require.Equal(t, []byte("a"), NotNilMax([]byte("a"), nil)) - require.Equal(t, []byte("a"), NotNilMax([]byte("a"), []byte{})) - require.Equal(t, []byte("a"), NotNilMax(nil, []byte("a"))) - require.Equal(t, []byte("b"), NotNilMax([]byte("a"), []byte("b"))) - require.Equal(t, []byte("b"), NotNilMax([]byte("b"), []byte("a"))) + require.Equal(t, []byte("b"), BytesMax([]byte("a"), []byte("b"))) + require.Equal(t, []byte("b"), BytesMax([]byte("b"), []byte("a"))) } diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go index 986e6cff57d3b..d70484c84727a 100644 --- a/br/pkg/lightning/backend/local/engine.go +++ b/br/pkg/lightning/backend/local/engine.go @@ -291,8 +291,12 @@ func (e *Engine) ID() string { } // GetKeyRange implements common.Engine. -func (e *Engine) GetKeyRange() (firstKey []byte, lastKey []byte, err error) { - return e.GetFirstAndLastKey(nil, nil) +func (e *Engine) GetKeyRange() (startKey []byte, endKey []byte, err error) { + firstLey, lastKey, err := e.GetFirstAndLastKey(nil, nil) + if err != nil { + return nil, nil, errors.Trace(err) + } + return firstLey, nextKey(lastKey), nil } // SplitRanges gets size properties from pebble and split ranges according to size/keys limit. diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 0f8610c3aebd5..50c0a20a5cd6b 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -961,8 +961,8 @@ func (local *Backend) CloseEngine(ctx context.Context, cfg *backend.EngineConfig store, externalCfg.DataFiles, externalCfg.StatFiles, - externalCfg.MinKey, - externalCfg.MaxKey, + externalCfg.StartKey, + externalCfg.EndKey, externalCfg.SplitKeys, externalCfg.RegionSplitSize, local.keyAdapter, @@ -1073,28 +1073,26 @@ func readAndSplitIntoRange( sizeLimit int64, keysLimit int64, ) ([]common.Range, error) { - firstKey, lastKey, err := engine.GetKeyRange() + startKey, endKey, err := engine.GetKeyRange() if err != nil { return nil, err } - if firstKey == nil { + if startKey == nil { return nil, errors.New("could not find first pair") } - endKey := nextKey(lastKey) - engineFileTotalSize, engineFileLength := engine.KVStatistics() if engineFileTotalSize <= sizeLimit && engineFileLength <= keysLimit { - ranges := []common.Range{{Start: firstKey, End: endKey}} + ranges := []common.Range{{Start: startKey, End: endKey}} return ranges, nil } logger := log.FromContext(ctx).With(zap.String("engine", engine.ID())) - ranges, err := engine.SplitRanges(firstKey, endKey, sizeLimit, keysLimit, logger) + ranges, err := engine.SplitRanges(startKey, endKey, sizeLimit, keysLimit, logger) logger.Info("split engine key ranges", zap.Int64("totalSize", engineFileTotalSize), zap.Int64("totalCount", engineFileLength), - logutil.Key("firstKey", firstKey), logutil.Key("lastKey", lastKey), + logutil.Key("startKey", startKey), logutil.Key("endKey", endKey), zap.Int("ranges", len(ranges)), zap.Error(err)) return ranges, err } diff --git a/br/pkg/lightning/backend/local/local_test.go b/br/pkg/lightning/backend/local/local_test.go index e86fa421bc531..fac02d5fc013a 100644 --- a/br/pkg/lightning/backend/local/local_test.go +++ b/br/pkg/lightning/backend/local/local_test.go @@ -2257,8 +2257,8 @@ func TestExternalEngine(t *testing.T) { StorageURI: storageURI, DataFiles: dataFiles, StatFiles: statFiles, - MinKey: keys[0], - MaxKey: keys[99], + StartKey: keys[0], + EndKey: endKey, SplitKeys: [][]byte{keys[30], keys[60], keys[90]}, TotalFileSize: int64(config.SplitRegionSize) + 1, TotalKVCount: int64(config.SplitRegionKeys) + 1, diff --git a/br/pkg/lightning/common/engine.go b/br/pkg/lightning/common/engine.go index 136e4edd0aa86..559f0058e37ab 100644 --- a/br/pkg/lightning/common/engine.go +++ b/br/pkg/lightning/common/engine.go @@ -38,8 +38,8 @@ type Engine interface { KVStatistics() (totalKVSize int64, totalKVCount int64) // ImportedStatistics returns the imported kv size and imported kv count. ImportedStatistics() (importedKVSize int64, importedKVCount int64) - // GetKeyRange returns the key range of the engine. Both are inclusive. - GetKeyRange() (firstKey []byte, lastKey []byte, err error) + // GetKeyRange returns the key range [startKey, endKey) of the engine. + GetKeyRange() (startKey []byte, endKey []byte, err error) // SplitRanges splits the range [startKey, endKey) into multiple ranges. SplitRanges(startKey, endKey []byte, sizeLimit, keysLimit int64, logger log.Logger) ([]Range, error) Close() error diff --git a/pkg/ddl/backfilling_dispatcher.go b/pkg/ddl/backfilling_dispatcher.go index 7d8b3acccff6f..cb235c2ceb045 100644 --- a/pkg/ddl/backfilling_dispatcher.go +++ b/pkg/ddl/backfilling_dispatcher.go @@ -320,7 +320,12 @@ func generateNonPartitionPlan(d *ddl, tblInfo *model.TableInfo, job *model.Job) end = len(recordRegionMetas) } batch := recordRegionMetas[i:end] - subTaskMeta := &BackfillSubTaskMeta{StartKey: batch[0].StartKey(), EndKey: batch[len(batch)-1].EndKey()} + subTaskMeta := &BackfillSubTaskMeta{ + SortedKVMeta: external.SortedKVMeta{ + StartKey: batch[0].StartKey(), + EndKey: batch[len(batch)-1].EndKey(), + }, + } if i == 0 { subTaskMeta.StartKey = startKey } @@ -381,7 +386,7 @@ func generateGlobalSortIngestPlan( step proto.Step, logger *zap.Logger, ) ([][]byte, error) { - firstKey, lastKey, totalSize, dataFiles, statFiles, err := getSummaryFromLastStep(taskHandle, task.ID, step) + startKeyFromSumm, endKeyFromSumm, totalSize, dataFiles, statFiles, err := getSummaryFromLastStep(taskHandle, task.ID, step) if err != nil { return nil, err } @@ -402,7 +407,7 @@ func generateGlobalSortIngestPlan( }() metaArr := make([][]byte, 0, 16) - startKey := firstKey + startKey := startKeyFromSumm var endKey kv.Key for { endKeyOfGroup, dataFiles, statFiles, rangeSplitKeys, err := splitter.SplitOneRangesGroup() @@ -410,7 +415,7 @@ func generateGlobalSortIngestPlan( return nil, err } if len(endKeyOfGroup) == 0 { - endKey = lastKey.Next() + endKey = endKeyFromSumm } else { endKey = kv.Key(endKeyOfGroup).Clone() } @@ -423,12 +428,12 @@ func generateGlobalSortIngestPlan( } m := &BackfillSubTaskMeta{ SortedKVMeta: external.SortedKVMeta{ - MinKey: startKey, - MaxKey: endKey, - DataFiles: dataFiles, - StatFiles: statFiles, + StartKey: startKey, + EndKey: endKey, TotalKVSize: totalSize / uint64(len(instanceIDs)), }, + DataFiles: dataFiles, + StatFiles: statFiles, RangeSplitKeys: rangeSplitKeys, } metaBytes, err := json.Marshal(m) @@ -483,9 +488,7 @@ func generateMergePlan( end = len(dataFiles) } m := &BackfillSubTaskMeta{ - SortedKVMeta: external.SortedKVMeta{ - DataFiles: dataFiles[start:end], - }, + DataFiles: dataFiles[start:end], } metaBytes, err := json.Marshal(m) if err != nil { @@ -542,12 +545,11 @@ func getSummaryFromLastStep( taskHandle dispatcher.TaskHandle, gTaskID int64, step proto.Step, -) (min, max kv.Key, totalKVSize uint64, dataFiles, statFiles []string, err error) { +) (startKey, endKey kv.Key, totalKVSize uint64, dataFiles, statFiles []string, err error) { subTaskMetas, err := taskHandle.GetPreviousSubtaskMetas(gTaskID, step) if err != nil { return nil, nil, 0, nil, nil, errors.Trace(err) } - var minKey, maxKey kv.Key allDataFiles := make([]string, 0, 16) allStatFiles := make([]string, 0, 16) for _, subTaskMeta := range subTaskMetas { @@ -556,10 +558,22 @@ func getSummaryFromLastStep( if err != nil { return nil, nil, 0, nil, nil, errors.Trace(err) } - // Skip empty subtask.MinKey/MaxKey because it means + // Skip empty subtask.StartKey/EndKey because it means // no records need to be written in this subtask. - minKey = external.NotNilMin(minKey, subtask.MinKey) - maxKey = external.NotNilMax(maxKey, subtask.MaxKey) + if subtask.StartKey == nil || subtask.EndKey == nil { + continue + } + + if len(startKey) == 0 { + startKey = subtask.StartKey + } else { + startKey = external.BytesMin(startKey, subtask.StartKey) + } + if len(endKey) == 0 { + endKey = subtask.EndKey + } else { + endKey = external.BytesMax(endKey, subtask.EndKey) + } totalKVSize += subtask.TotalKVSize for _, stat := range subtask.MultipleFilesStats { @@ -569,7 +583,7 @@ func getSummaryFromLastStep( } } } - return minKey, maxKey, totalKVSize, allDataFiles, allStatFiles, nil + return startKey, endKey, totalKVSize, allDataFiles, allStatFiles, nil } // StepStr convert proto.Step to string. diff --git a/pkg/ddl/backfilling_dispatcher_test.go b/pkg/ddl/backfilling_dispatcher_test.go index 681ea5511cb9b..8093e6a52f610 100644 --- a/pkg/ddl/backfilling_dispatcher_test.go +++ b/pkg/ddl/backfilling_dispatcher_test.go @@ -186,11 +186,9 @@ func TestBackfillingDispatcherGlobalSortMode(t *testing.T) { // update meta, same as import into. sortStepMeta := &ddl.BackfillSubTaskMeta{ SortedKVMeta: external.SortedKVMeta{ - MinKey: []byte("ta"), - MaxKey: []byte("tc"), + StartKey: []byte("ta"), + EndKey: []byte("tc"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/data/1"}, - StatFiles: []string{"gs://sort-bucket/data/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -227,11 +225,9 @@ func TestBackfillingDispatcherGlobalSortMode(t *testing.T) { require.NoError(t, err) mergeSortStepMeta := &ddl.BackfillSubTaskMeta{ SortedKVMeta: external.SortedKVMeta{ - MinKey: []byte("ta"), - MaxKey: []byte("tc"), + StartKey: []byte("ta"), + EndKey: []byte("tc"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/data/1"}, - StatFiles: []string{"gs://sort-bucket/data/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ diff --git a/pkg/ddl/backfilling_dist_scheduler.go b/pkg/ddl/backfilling_dist_scheduler.go index bf825d3d3ed14..afe756be494d3 100644 --- a/pkg/ddl/backfilling_dist_scheduler.go +++ b/pkg/ddl/backfilling_dist_scheduler.go @@ -47,11 +47,11 @@ type BackfillGlobalMeta struct { // BackfillSubTaskMeta is the sub-task meta for backfilling index. type BackfillSubTaskMeta struct { - PhysicalTableID int64 `json:"physical_table_id"` - StartKey []byte `json:"start_key"` - EndKey []byte `json:"end_key"` + PhysicalTableID int64 `json:"physical_table_id"` RangeSplitKeys [][]byte `json:"range_split_keys"` + DataFiles []string `json:"data-files"` + StatFiles []string `json:"stat-files"` external.SortedKVMeta `json:",inline"` } diff --git a/pkg/ddl/backfilling_import_cloud.go b/pkg/ddl/backfilling_import_cloud.go index 546ffdd972def..82dfef011d2d8 100644 --- a/pkg/ddl/backfilling_import_cloud.go +++ b/pkg/ddl/backfilling_import_cloud.go @@ -83,8 +83,8 @@ func (m *cloudImportExecutor) RunSubtask(ctx context.Context, subtask *proto.Sub StorageURI: m.cloudStoreURI, DataFiles: sm.DataFiles, StatFiles: sm.StatFiles, - MinKey: sm.MinKey, - MaxKey: sm.MaxKey, + StartKey: sm.StartKey, + EndKey: sm.EndKey, SplitKeys: sm.RangeSplitKeys, TotalFileSize: int64(sm.TotalKVSize), TotalKVCount: 0, diff --git a/pkg/ddl/backfilling_read_index.go b/pkg/ddl/backfilling_read_index.go index 6bc7ee52979ac..1f6427ee2f9c2 100644 --- a/pkg/ddl/backfilling_read_index.go +++ b/pkg/ddl/backfilling_read_index.go @@ -56,8 +56,6 @@ type readIndexSummary struct { minKey []byte maxKey []byte totalSize uint64 - dataFiles []string - statFiles []string stats []external.MultipleFilesStat mu sync.Mutex } @@ -175,16 +173,18 @@ func (r *readIndexExecutor) OnFinished(ctx context.Context, subtask *proto.Subta } sum, _ := r.subtaskSummary.LoadAndDelete(subtask.ID) s := sum.(*readIndexSummary) - subtaskMeta.MinKey = s.minKey - subtaskMeta.MaxKey = s.maxKey + subtaskMeta.StartKey = s.minKey + subtaskMeta.EndKey = kv.Key(s.maxKey).Next() subtaskMeta.TotalKVSize = s.totalSize - subtaskMeta.DataFiles = s.dataFiles - subtaskMeta.StatFiles = s.statFiles subtaskMeta.MultipleFilesStats = s.stats + fileCnt := 0 + for _, stat := range s.stats { + fileCnt += len(stat.Filenames) + } logutil.Logger(ctx).Info("get key boundary on subtask finished", zap.String("min", hex.EncodeToString(s.minKey)), zap.String("max", hex.EncodeToString(s.maxKey)), - zap.Int("fileCount", len(s.dataFiles)), + zap.Int("fileCount", fileCnt), zap.Uint64("totalSize", s.totalSize)) meta, err := json.Marshal(subtaskMeta) if err != nil { @@ -268,12 +268,6 @@ func (r *readIndexExecutor) buildExternalStorePipeline( } s.totalSize += summary.TotalSize s.stats = append(s.stats, summary.MultipleFilesStats...) - for _, f := range summary.MultipleFilesStats { - for _, filename := range f.Filenames { - s.dataFiles = append(s.dataFiles, filename[0]) - s.statFiles = append(s.statFiles, filename[1]) - } - } s.mu.Unlock() } counter := metrics.BackfillTotalCounter.WithLabelValues( diff --git a/pkg/disttask/importinto/dispatcher_testkit_test.go b/pkg/disttask/importinto/dispatcher_testkit_test.go index 269cbee988979..e9106c7c4fb00 100644 --- a/pkg/disttask/importinto/dispatcher_testkit_test.go +++ b/pkg/disttask/importinto/dispatcher_testkit_test.go @@ -239,11 +239,9 @@ func TestDispatcherExtGlobalSort(t *testing.T) { require.NoError(t, err) sortStepMeta := &importinto.ImportStepMeta{ SortedDataMeta: &external.SortedKVMeta{ - MinKey: []byte("ta"), - MaxKey: []byte("tc"), + StartKey: []byte("ta"), + EndKey: []byte("tc"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/data/1"}, - StatFiles: []string{"gs://sort-bucket/data/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -254,11 +252,9 @@ func TestDispatcherExtGlobalSort(t *testing.T) { }, SortedIndexMetas: map[int64]*external.SortedKVMeta{ 1: { - MinKey: []byte("ia"), - MaxKey: []byte("ic"), + StartKey: []byte("ia"), + EndKey: []byte("ic"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/index/1"}, - StatFiles: []string{"gs://sort-bucket/index/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -301,19 +297,11 @@ func TestDispatcherExtGlobalSort(t *testing.T) { mergeSortStepMeta := &importinto.MergeSortStepMeta{ KVGroup: "data", SortedKVMeta: external.SortedKVMeta{ - MinKey: []byte("ta"), - MaxKey: []byte("tc"), + StartKey: []byte("ta"), + EndKey: []byte("tc"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/data/1"}, - StatFiles: []string{"gs://sort-bucket/data/1.stat"}, - MultipleFilesStats: []external.MultipleFilesStat{ - { - Filenames: [][2]string{ - {"gs://sort-bucket/data/1", "gs://sort-bucket/data/1.stat"}, - }, - }, - }, }, + DataFiles: []string{"gs://sort-bucket/data/1"}, } mergeSortStepMetaBytes, err := json.Marshal(mergeSortStepMeta) require.NoError(t, err) diff --git a/pkg/disttask/importinto/planner.go b/pkg/disttask/importinto/planner.go index 292ef218b9a07..8facd0113b4eb 100644 --- a/pkg/disttask/importinto/planner.go +++ b/pkg/disttask/importinto/planner.go @@ -301,13 +301,14 @@ func generateMergeSortSpecs(planCtx planner.PlanCtx) ([]planner.PipelineSpec, er return nil, err } for kvGroup, kvMeta := range kvMetas { - length := len(kvMeta.DataFiles) if skipMergeSort(kvGroup, kvMeta.MultipleFilesStats) { logutil.Logger(planCtx.Ctx).Info("skip merge sort for kv group", zap.Int64("task-id", planCtx.TaskID), zap.String("kv-group", kvGroup)) continue } + dataFiles := kvMeta.GetDataFiles() + length := len(dataFiles) for start := 0; start < length; start += step { end := start + step if end > length { @@ -316,7 +317,7 @@ func generateMergeSortSpecs(planCtx planner.PlanCtx) ([]planner.PipelineSpec, er result = append(result, &MergeSortSpec{ MergeSortStepMeta: &MergeSortStepMeta{ KVGroup: kvGroup, - DataFiles: kvMeta.DataFiles[start:end], + DataFiles: dataFiles[start:end], }, }) } @@ -368,7 +369,7 @@ func generateWriteIngestSpecs(planCtx planner.PlanCtx, p *LogicalPlan) ([]planne logutil.Logger(ctx).Warn("close range splitter failed", zap.Error(err2)) } }() - startKey := tidbkv.Key(kvMeta.MinKey) + startKey := tidbkv.Key(kvMeta.StartKey) var endKey tidbkv.Key for { endKeyOfGroup, dataFiles, statFiles, rangeSplitKeys, err2 := splitter.SplitOneRangesGroup() @@ -376,13 +377,14 @@ func generateWriteIngestSpecs(planCtx planner.PlanCtx, p *LogicalPlan) ([]planne return err2 } if len(endKeyOfGroup) == 0 { - endKey = tidbkv.Key(kvMeta.MaxKey).Next() + endKey = kvMeta.EndKey } else { endKey = tidbkv.Key(endKeyOfGroup).Clone() } logutil.Logger(ctx).Info("kv range as subtask", zap.String("startKey", hex.EncodeToString(startKey)), - zap.String("endKey", hex.EncodeToString(endKey))) + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Int("dataFiles", len(dataFiles))) if startKey.Cmp(endKey) >= 0 { return errors.Errorf("invalid kv range, startKey: %s, endKey: %s", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) @@ -391,13 +393,13 @@ func generateWriteIngestSpecs(planCtx planner.PlanCtx, p *LogicalPlan) ([]planne m := &WriteIngestStepMeta{ KVGroup: kvGroup, SortedKVMeta: external.SortedKVMeta{ - MinKey: startKey, - MaxKey: endKey, - DataFiles: dataFiles, - StatFiles: statFiles, + StartKey: startKey, + EndKey: endKey, // this is actually an estimate, we don't know the exact size of the data TotalKVSize: uint64(config.DefaultBatchSize), }, + DataFiles: dataFiles, + StatFiles: statFiles, RangeSplitKeys: rangeSplitKeys, RangeSplitSize: splitter.GetRangeSplitSize(), } @@ -499,8 +501,13 @@ func getRangeSplitter(ctx context.Context, store storage.ExternalStorage, kvMeta zap.Int64("region-split-keys", regionSplitKeys)) return external.NewRangeSplitter( - ctx, kvMeta.DataFiles, kvMeta.StatFiles, store, - int64(config.DefaultBatchSize), int64(math.MaxInt64), - regionSplitSize, regionSplitKeys, + ctx, + kvMeta.GetDataFiles(), + kvMeta.GetStatFiles(), + store, + int64(config.DefaultBatchSize), + int64(math.MaxInt64), + regionSplitSize, + regionSplitKeys, ) } diff --git a/pkg/disttask/importinto/planner_test.go b/pkg/disttask/importinto/planner_test.go index 95ccf9607a3fd..a3bb95e0a8b05 100644 --- a/pkg/disttask/importinto/planner_test.go +++ b/pkg/disttask/importinto/planner_test.go @@ -127,11 +127,9 @@ func genEncodeStepMetas(t *testing.T, cnt int) [][]byte { idxPrefix := fmt.Sprintf("i1_%d_", i) meta := &ImportStepMeta{ SortedDataMeta: &external.SortedKVMeta{ - MinKey: []byte(prefix + "a"), - MaxKey: []byte(prefix + "c"), + StartKey: []byte(prefix + "a"), + EndKey: []byte(prefix + "c"), TotalKVSize: 12, - DataFiles: []string{prefix + "/1"}, - StatFiles: []string{prefix + "/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -142,11 +140,9 @@ func genEncodeStepMetas(t *testing.T, cnt int) [][]byte { }, SortedIndexMetas: map[int64]*external.SortedKVMeta{ 1: { - MinKey: []byte(idxPrefix + "a"), - MaxKey: []byte(idxPrefix + "c"), + StartKey: []byte(idxPrefix + "a"), + EndKey: []byte(idxPrefix + "c"), TotalKVSize: 12, - DataFiles: []string{idxPrefix + "/1"}, - StatFiles: []string{idxPrefix + "/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -202,11 +198,9 @@ func genMergeStepMetas(t *testing.T, cnt int) [][]byte { meta := &MergeSortStepMeta{ KVGroup: "data", SortedKVMeta: external.SortedKVMeta{ - MinKey: []byte(prefix + "a"), - MaxKey: []byte(prefix + "c"), + StartKey: []byte(prefix + "a"), + EndKey: []byte(prefix + "c"), TotalKVSize: 12, - DataFiles: []string{prefix + "/1"}, - StatFiles: []string{prefix + "/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -231,17 +225,17 @@ func TestGetSortedKVMetas(t *testing.T) { require.Contains(t, kvMetas, "data") require.Contains(t, kvMetas, "1") // just check meta is merged, won't check all fields - require.Equal(t, []byte("d_0_a"), kvMetas["data"].MinKey) - require.Equal(t, []byte("d_2_c"), kvMetas["data"].MaxKey) - require.Equal(t, []byte("i1_0_a"), kvMetas["1"].MinKey) - require.Equal(t, []byte("i1_2_c"), kvMetas["1"].MaxKey) + require.Equal(t, []byte("d_0_a"), kvMetas["data"].StartKey) + require.Equal(t, []byte("d_2_c"), kvMetas["data"].EndKey) + require.Equal(t, []byte("i1_0_a"), kvMetas["1"].StartKey) + require.Equal(t, []byte("i1_2_c"), kvMetas["1"].EndKey) mergeStepMetas := genMergeStepMetas(t, 3) kvMetas2, err := getSortedKVMetasOfMergeStep(mergeStepMetas) require.NoError(t, err) require.Len(t, kvMetas2, 1) - require.Equal(t, []byte("x_0_a"), kvMetas2["data"].MinKey) - require.Equal(t, []byte("x_2_c"), kvMetas2["data"].MaxKey) + require.Equal(t, []byte("x_0_a"), kvMetas2["data"].StartKey) + require.Equal(t, []byte("x_2_c"), kvMetas2["data"].EndKey) // force merge sort for data kv require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/importinto/forceMergeSort", `return("data")`)) @@ -256,8 +250,8 @@ func TestGetSortedKVMetas(t *testing.T) { }) require.NoError(t, err) require.Len(t, allKVMetas, 2) - require.Equal(t, []byte("x_0_a"), allKVMetas["data"].MinKey) - require.Equal(t, []byte("x_2_c"), allKVMetas["data"].MaxKey) - require.Equal(t, []byte("i1_0_a"), allKVMetas["1"].MinKey) - require.Equal(t, []byte("i1_2_c"), allKVMetas["1"].MaxKey) + require.Equal(t, []byte("x_0_a"), allKVMetas["data"].StartKey) + require.Equal(t, []byte("x_2_c"), allKVMetas["data"].EndKey) + require.Equal(t, []byte("i1_0_a"), allKVMetas["1"].StartKey) + require.Equal(t, []byte("i1_2_c"), allKVMetas["1"].EndKey) } diff --git a/pkg/disttask/importinto/proto.go b/pkg/disttask/importinto/proto.go index 1db7f6cfae703..ba5c13439cef0 100644 --- a/pkg/disttask/importinto/proto.go +++ b/pkg/disttask/importinto/proto.go @@ -109,6 +109,8 @@ type MergeSortStepMeta struct { type WriteIngestStepMeta struct { KVGroup string `json:"kv-group"` external.SortedKVMeta `json:"sorted-kv-meta"` + DataFiles []string `json:"data-files"` + StatFiles []string `json:"stat-files"` RangeSplitKeys [][]byte `json:"range-split-keys"` RangeSplitSize int64 `json:"range-split-size"` diff --git a/pkg/disttask/importinto/scheduler.go b/pkg/disttask/importinto/scheduler.go index 04d516b9e6d37..3462a5266b2ea 100644 --- a/pkg/disttask/importinto/scheduler.go +++ b/pkg/disttask/importinto/scheduler.go @@ -376,8 +376,8 @@ func (e *writeAndIngestStepExecutor) RunSubtask(ctx context.Context, subtask *pr StorageURI: e.taskMeta.Plan.CloudStorageURI, DataFiles: sm.DataFiles, StatFiles: sm.StatFiles, - MinKey: sm.MinKey, - MaxKey: sm.MaxKey, + StartKey: sm.StartKey, + EndKey: sm.EndKey, SplitKeys: sm.RangeSplitKeys, RegionSplitSize: sm.RangeSplitSize, TotalFileSize: int64(sm.TotalKVSize), From 6883e6de500c6ee5104580157d75c5b239963145 Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger <60744015+Lloyd-Pottiger@users.noreply.github.com> Date: Fri, 27 Oct 2023 17:06:34 +0800 Subject: [PATCH 18/33] dumpling: Fix RowReceiverArr unexpected behaviour call BindAddress multiple times (#48037) close pingcap/tidb#48036 --- dumpling/export/sql_type.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dumpling/export/sql_type.go b/dumpling/export/sql_type.go index 90dbc34cc23d3..a079a5a610f42 100644 --- a/dumpling/export/sql_type.go +++ b/dumpling/export/sql_type.go @@ -175,7 +175,7 @@ func SQLTypeNumberMaker() RowReceiverStringer { } // MakeRowReceiver constructs RowReceiverArr from column types -func MakeRowReceiver(colTypes []string) RowReceiverArr { +func MakeRowReceiver(colTypes []string) *RowReceiverArr { rowReceiverArr := make([]RowReceiverStringer, len(colTypes)) for i, colTp := range colTypes { recMaker, ok := colTypeRowReceiverMap[colTp] @@ -184,7 +184,7 @@ func MakeRowReceiver(colTypes []string) RowReceiverArr { } rowReceiverArr[i] = recMaker() } - return RowReceiverArr{ + return &RowReceiverArr{ bound: false, receivers: rowReceiverArr, } @@ -197,7 +197,7 @@ type RowReceiverArr struct { } // BindAddress implements RowReceiver.BindAddress -func (r RowReceiverArr) BindAddress(args []interface{}) { +func (r *RowReceiverArr) BindAddress(args []interface{}) { if r.bound { return } @@ -208,7 +208,7 @@ func (r RowReceiverArr) BindAddress(args []interface{}) { } // WriteToBuffer implements Stringer.WriteToBuffer -func (r RowReceiverArr) WriteToBuffer(bf *bytes.Buffer, escapeBackslash bool) { +func (r *RowReceiverArr) WriteToBuffer(bf *bytes.Buffer, escapeBackslash bool) { bf.WriteByte('(') for i, receiver := range r.receivers { receiver.WriteToBuffer(bf, escapeBackslash) @@ -220,7 +220,7 @@ func (r RowReceiverArr) WriteToBuffer(bf *bytes.Buffer, escapeBackslash bool) { } // WriteToBufferInCsv implements Stringer.WriteToBufferInCsv -func (r RowReceiverArr) WriteToBufferInCsv(bf *bytes.Buffer, escapeBackslash bool, opt *csvOption) { +func (r *RowReceiverArr) WriteToBufferInCsv(bf *bytes.Buffer, escapeBackslash bool, opt *csvOption) { for i, receiver := range r.receivers { receiver.WriteToBufferInCsv(bf, escapeBackslash, opt) if i != len(r.receivers)-1 { From ca977c51111a42d3b24a4d9f5d16413f21bb2730 Mon Sep 17 00:00:00 2001 From: EasonBall <592838129@qq.com> Date: Fri, 27 Oct 2023 17:44:04 +0800 Subject: [PATCH 19/33] disttask: add more retryable error (#48033) ref pingcap/tidb#46258, close pingcap/tidb#48034 --- pkg/disttask/framework/scheduler/BUILD.bazel | 2 ++ pkg/disttask/framework/scheduler/scheduler.go | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/pkg/disttask/framework/scheduler/BUILD.bazel b/pkg/disttask/framework/scheduler/BUILD.bazel index 7d4d274cf0b80..09dde40ab890b 100644 --- a/pkg/disttask/framework/scheduler/BUILD.bazel +++ b/pkg/disttask/framework/scheduler/BUILD.bazel @@ -20,10 +20,12 @@ go_library( "//pkg/disttask/framework/storage", "//pkg/domain/infosync", "//pkg/metrics", + "//pkg/parser/terror", "//pkg/resourcemanager/pool/spool", "//pkg/resourcemanager/util", "//pkg/util", "//pkg/util/backoff", + "//pkg/util/dbterror", "//pkg/util/logutil", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/disttask/framework/scheduler/scheduler.go b/pkg/disttask/framework/scheduler/scheduler.go index bcefa5c79b65a..6253a2c6cab8b 100644 --- a/pkg/disttask/framework/scheduler/scheduler.go +++ b/pkg/disttask/framework/scheduler/scheduler.go @@ -29,7 +29,9 @@ import ( "github.com/pingcap/tidb/pkg/disttask/framework/storage" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/metrics" + "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/util/backoff" + "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" ) @@ -575,6 +577,18 @@ func (s *BaseScheduler) finishSubtaskAndUpdateState(ctx context.Context, subtask metrics.IncDistTaskSubTaskCnt(subtask) } +// TODO: abstract interface for each business to implement it. +func isRetryableError(err error) bool { + originErr := errors.Cause(err) + if tErr, ok := originErr.(*terror.Error); ok { + sqlErr := terror.ToSQLError(tErr) + _, ok := dbterror.ReorgRetryableErrCodes[sqlErr.Code] + return ok + } + // can't retry Unknown err + return false +} + // markSubTaskCanceledOrFailed check the error type and decide the subtasks' state. // 1. Only cancel subtasks when meet ErrCancelSubtask. // 2. Only fail subtasks when meet non retryable error. @@ -584,7 +598,7 @@ func (s *BaseScheduler) markSubTaskCanceledOrFailed(ctx context.Context, subtask if ctx.Err() != nil && context.Cause(ctx) == ErrCancelSubtask { logutil.Logger(s.logCtx).Warn("subtask canceled", zap.Error(err)) s.updateSubtaskStateAndError(subtask, proto.TaskStateCanceled, nil) - } else if common.IsRetryableError(err) { + } else if common.IsRetryableError(err) || isRetryableError(err) { logutil.Logger(s.logCtx).Warn("met retryable error", zap.Error(err)) } else if errors.Cause(err) != context.Canceled { logutil.Logger(s.logCtx).Warn("subtask failed", zap.Error(err)) From 6d64b7e89a3b772aa399261f0b57348f06fafb19 Mon Sep 17 00:00:00 2001 From: YangKeao Date: Fri, 27 Oct 2023 18:44:05 +0800 Subject: [PATCH 20/33] types, stmtctx, *: use ZeroDate, ZeroInDate and InvalidDate related flags to replace variable in statement context (#47794) close pingcap/tidb#47507, close pingcap/tidb#47508 --- br/pkg/lightning/backend/kv/session.go | 7 +- br/pkg/task/backup.go | 2 +- pkg/ddl/backfilling_scheduler.go | 15 +- pkg/ddl/ddl_api.go | 2 +- pkg/domain/BUILD.bazel | 1 - pkg/domain/domain_test.go | 3 +- pkg/executor/aggfuncs/BUILD.bazel | 1 - pkg/executor/aggfuncs/func_count_test.go | 3 +- pkg/executor/brie.go | 4 +- pkg/executor/executor.go | 78 +++--- pkg/executor/inspection_result_test.go | 10 +- pkg/executor/inspection_summary_test.go | 2 +- .../calibrate_resource_test.go | 2 +- pkg/executor/test/executor/executor_test.go | 12 +- pkg/expression/builtin_cast.go | 32 +-- pkg/expression/builtin_cast_vec.go | 32 +-- pkg/expression/builtin_compare.go | 12 +- pkg/expression/builtin_compare_vec.go | 4 +- pkg/expression/builtin_other_vec_test.go | 2 +- pkg/expression/builtin_time.go | 104 ++++---- pkg/expression/builtin_time_test.go | 35 ++- pkg/expression/builtin_time_vec.go | 34 +-- pkg/expression/builtin_time_vec_generated.go | 24 +- pkg/expression/builtin_vectorized_test.go | 6 +- pkg/expression/distsql_builtin_test.go | 2 +- pkg/expression/generator/time_vec.go | 12 +- pkg/expression/helper.go | 10 +- .../core/memtable_predicate_extractor_test.go | 2 +- .../handler/optimizor/statistics_handler.go | 2 +- pkg/server/internal/column/BUILD.bazel | 1 - pkg/server/internal/column/column_test.go | 9 +- pkg/server/internal/dump/BUILD.bazel | 1 - pkg/server/internal/dump/dump_test.go | 19 +- pkg/session/test/meta/session_test.go | 6 +- pkg/sessionctx/stmtctx/stmtctx.go | 10 +- pkg/sessionctx/stmtctx/stmtctx_test.go | 4 +- pkg/sessionctx/variable/varsutil.go | 4 +- pkg/statistics/handle/bootstrap.go | 3 +- pkg/statistics/handle/storage/read.go | 3 +- pkg/statistics/scalar.go | 12 +- pkg/statistics/scalar_test.go | 2 +- pkg/table/tables/mutation_checker_test.go | 2 +- pkg/tablecodec/tablecodec_test.go | 4 +- pkg/types/BUILD.bazel | 1 - pkg/types/context/context.go | 48 +++- pkg/types/convert.go | 15 +- pkg/types/convert_test.go | 21 +- pkg/types/datum.go | 88 +++---- pkg/types/datum_test.go | 10 +- pkg/types/format_test.go | 18 +- pkg/types/time.go | 179 ++++++------- pkg/types/time_test.go | 240 ++++++++---------- pkg/util/chunk/mutrow_test.go | 2 +- pkg/util/codec/codec_test.go | 4 +- pkg/util/dbutil/common.go | 2 +- pkg/util/rowDecoder/decoder_test.go | 4 +- pkg/util/rowcodec/rowcodec_test.go | 4 +- pkg/util/timeutil/time_zone.go | 2 +- 58 files changed, 596 insertions(+), 577 deletions(-) diff --git a/br/pkg/lightning/backend/kv/session.go b/br/pkg/lightning/backend/kv/session.go index f1ca7108c2906..d6b9add03273c 100644 --- a/br/pkg/lightning/backend/kv/session.go +++ b/br/pkg/lightning/backend/kv/session.go @@ -288,11 +288,12 @@ func NewSession(options *encode.SessionOptions, logger log.Logger) *Session { vars.StmtCtx.BatchCheck = true vars.StmtCtx.BadNullAsWarning = !sqlMode.HasStrictMode() vars.StmtCtx.OverflowAsWarning = !sqlMode.HasStrictMode() - vars.StmtCtx.AllowInvalidDate = sqlMode.HasAllowInvalidDatesMode() - vars.StmtCtx.IgnoreZeroInDate = !sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode() vars.SQLMode = sqlMode - typeFlags := vars.StmtCtx.TypeFlags().WithTruncateAsWarning(!sqlMode.HasStrictMode()) + typeFlags := vars.StmtCtx.TypeFlags(). + WithTruncateAsWarning(!sqlMode.HasStrictMode()). + WithIgnoreInvalidDateErr(sqlMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode()) vars.StmtCtx.SetTypeFlags(typeFlags) if options.SysVars != nil { for k, v := range options.SysVars { diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index 497791b315d42..4b49ed0e86700 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -773,7 +773,7 @@ func ParseTSString(ts string, tzCheck bool) (uint64, error) { return 0, errors.Errorf("must set timezone when using datetime format ts, e.g. '2018-05-11 01:42:23+0800'") } } - t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(sc.TypeCtx(), ts, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return 0, errors.Trace(err) } diff --git a/pkg/ddl/backfilling_scheduler.go b/pkg/ddl/backfilling_scheduler.go index eca0b9a7c71e8..4b0d6b12d6310 100644 --- a/pkg/ddl/backfilling_scheduler.go +++ b/pkg/ddl/backfilling_scheduler.go @@ -158,13 +158,13 @@ func initSessCtx( sessCtx.GetSessionVars().StmtCtx.SetTimeZone(sessCtx.GetSessionVars().Location()) sessCtx.GetSessionVars().StmtCtx.BadNullAsWarning = !sqlMode.HasStrictMode() sessCtx.GetSessionVars().StmtCtx.OverflowAsWarning = !sqlMode.HasStrictMode() - sessCtx.GetSessionVars().StmtCtx.AllowInvalidDate = sqlMode.HasAllowInvalidDatesMode() sessCtx.GetSessionVars().StmtCtx.DividedByZeroAsWarning = !sqlMode.HasStrictMode() - sessCtx.GetSessionVars().StmtCtx.IgnoreZeroInDate = !sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode() - sessCtx.GetSessionVars().StmtCtx.NoZeroDate = sqlMode.HasStrictMode() - sessCtx.GetSessionVars().StmtCtx.SetTypeFlags(types.StrictFlags. - WithTruncateAsWarning(!sqlMode.HasStrictMode()), - ) + + typeFlags := types.StrictFlags. + WithTruncateAsWarning(!sqlMode.HasStrictMode()). + WithIgnoreInvalidDateErr(sqlMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode()) + sessCtx.GetSessionVars().StmtCtx.SetTypeFlags(typeFlags) // Prevent initializing the mock context in the workers concurrently. // For details, see https://github.com/pingcap/tidb/issues/40879. @@ -217,6 +217,9 @@ func (b *txnBackfillScheduler) adjustWorkerSize() error { case typeUpdateColumnWorker: // Setting InCreateOrAlterStmt tells the difference between SELECT casting and ALTER COLUMN casting. sessCtx.GetSessionVars().StmtCtx.InCreateOrAlterStmt = true + sessCtx.GetSessionVars().StmtCtx.SetTypeFlags( + sessCtx.GetSessionVars().StmtCtx.TypeFlags(). + WithIgnoreZeroDateErr(!reorgInfo.ReorgMeta.SQLMode.HasStrictMode())) updateWorker := newUpdateColumnWorker(sessCtx, i, b.tbl, b.decodeColMap, reorgInfo, jc) runner = newBackfillWorker(jc.ddlJobCtx, updateWorker) worker = updateWorker diff --git a/pkg/ddl/ddl_api.go b/pkg/ddl/ddl_api.go index 98dee5c75221d..4cd6ce1503ac6 100644 --- a/pkg/ddl/ddl_api.go +++ b/pkg/ddl/ddl_api.go @@ -1043,7 +1043,7 @@ func convertTimestampDefaultValToUTC(ctx sessionctx.Context, defaultVal interfac } if vv, ok := defaultVal.(string); ok { if vv != types.ZeroDatetimeStr && !strings.EqualFold(vv, ast.CurrentTimestamp) { - t, err := types.ParseTime(ctx.GetSessionVars().StmtCtx, vv, col.GetType(), col.GetDecimal(), nil) + t, err := types.ParseTime(ctx.GetSessionVars().StmtCtx.TypeCtx(), vv, col.GetType(), col.GetDecimal(), nil) if err != nil { return defaultVal, errors.Trace(err) } diff --git a/pkg/domain/BUILD.bazel b/pkg/domain/BUILD.bazel index 9e6209b5415ea..fe9b2a28e7486 100644 --- a/pkg/domain/BUILD.bazel +++ b/pkg/domain/BUILD.bazel @@ -142,7 +142,6 @@ go_test( "//pkg/parser/terror", "//pkg/server", "//pkg/session", - "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", "//pkg/store/mockstore", "//pkg/testkit", diff --git a/pkg/domain/domain_test.go b/pkg/domain/domain_test.go index 00e365015998a..861b6b71b465e 100644 --- a/pkg/domain/domain_test.go +++ b/pkg/domain/domain_test.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/types" @@ -196,7 +195,7 @@ func TestStatWorkRecoverFromPanic(t *testing.T) { require.Equal(t, expiredTimeStamp, ts) // set expiredTimeStamp4PC to "2023-08-02 12:15:00" - ts, _ = types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2023-08-02 12:15:00") + ts, _ = types.ParseTimestamp(types.DefaultStmtNoWarningContext, "2023-08-02 12:15:00") dom.SetExpiredTimeStamp4PC(ts) expiredTimeStamp = dom.ExpiredTimeStamp4PC() require.Equal(t, expiredTimeStamp, ts) diff --git a/pkg/executor/aggfuncs/BUILD.bazel b/pkg/executor/aggfuncs/BUILD.bazel index 527a02b2fcf5f..9336517d7ecec 100644 --- a/pkg/executor/aggfuncs/BUILD.bazel +++ b/pkg/executor/aggfuncs/BUILD.bazel @@ -97,7 +97,6 @@ go_test( "//pkg/parser/mysql", "//pkg/planner/util", "//pkg/sessionctx", - "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testsetup", diff --git a/pkg/executor/aggfuncs/func_count_test.go b/pkg/executor/aggfuncs/func_count_test.go index ce9a45dbcf540..58b8f404bd7d8 100644 --- a/pkg/executor/aggfuncs/func_count_test.go +++ b/pkg/executor/aggfuncs/func_count_test.go @@ -23,7 +23,6 @@ import ( "github.com/pingcap/tidb/pkg/executor/aggfuncs" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/mock" @@ -158,7 +157,7 @@ func TestMemCount(t *testing.T) { } func TestWriteTime(t *testing.T) { - tt, err := types.ParseDate(stmtctx.NewStmtCtx(), "2020-11-11") + tt, err := types.ParseDate(types.DefaultStmtNoWarningContext, "2020-11-11") require.NoError(t, err) buf := make([]byte, 16) diff --git a/pkg/executor/brie.go b/pkg/executor/brie.go index 3f037741b96e4..e40493c3f44e2 100644 --- a/pkg/executor/brie.go +++ b/pkg/executor/brie.go @@ -227,7 +227,7 @@ func (bq *brieQueue) clearTask(sc *stmtctx.StatementContext) { bq.tasks.Range(func(key, value interface{}) bool { item := value.(*brieQueueItem) - if d := currTime.Sub(sc, &item.info.finishTime); d.Compare(outdatedDuration) > 0 { + if d := currTime.Sub(sc.TypeCtx(), &item.info.finishTime); d.Compare(outdatedDuration) > 0 { bq.tasks.Delete(key) } return true @@ -236,7 +236,7 @@ func (bq *brieQueue) clearTask(sc *stmtctx.StatementContext) { func (b *executorBuilder) parseTSString(ts string) (uint64, error) { sc := stmtctx.NewStmtCtxWithTimeZone(b.ctx.GetSessionVars().Location()) - t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(sc.TypeCtx(), ts, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return 0, err } diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index b660343f59c38..686950e01207a 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -2102,16 +2102,22 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.IgnoreNoPartition = stmt.IgnoreErr sc.ErrAutoincReadFailedAsWarning = stmt.IgnoreErr sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate sc.Priority = stmt.Priority - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || + !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || + vars.SQLMode.HasAllowInvalidDatesMode())) case *ast.CreateTableStmt, *ast.AlterTableStmt: sc.InCreateOrAlterStmt = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.StrictSQLMode || sc.AllowInvalidDate - sc.NoZeroDate = vars.SQLMode.HasNoZeroDateMode() - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.StrictSQLMode || + vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroDateErr(!vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode)) + case *ast.LoadDataStmt: sc.InLoadDataStmt = true // return warning instead of error when load data meet no partition for value @@ -2126,9 +2132,10 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.OverflowAsWarning = true // Return warning for truncate error in selection. - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) if opts := stmt.SelectStmtOpts; opts != nil { sc.Priority = opts.Priority sc.NotFillCache = !opts.SQLCache @@ -2137,30 +2144,35 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { case *ast.SetOprStmt: sc.InSelectStmt = true sc.OverflowAsWarning = true - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) case *ast.ShowStmt: - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors || stmt.Tp == ast.ShowSessionStates { sc.InShowWarning = true sc.SetWarnings(vars.StmtCtx.GetWarnings()) } case *ast.SplitRegionStmt: - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(false)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(false). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) case *ast.SetSessionStatesStmt: sc.InSetSessionStatesStmt = true - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) default: - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) } sc.SetTypeFlags(sc.TypeFlags(). @@ -2222,11 +2234,13 @@ func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars sc.DupKeyAsWarning = stmt.IgnoreErr sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate sc.Priority = stmt.Priority sc.IgnoreNoPartition = stmt.IgnoreErr - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !vars.StrictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) } // ResetDeleteStmtCtx resets statement context for DeleteStmt. @@ -2235,10 +2249,12 @@ func ResetDeleteStmtCtx(sc *stmtctx.StatementContext, stmt *ast.DeleteStmt, vars sc.DupKeyAsWarning = stmt.IgnoreErr sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate sc.Priority = stmt.Priority - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !vars.StrictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) } func setOptionForTopSQL(sc *stmtctx.StatementContext, snapshot kv.Snapshot) { diff --git a/pkg/executor/inspection_result_test.go b/pkg/executor/inspection_result_test.go index 29dd578ced407..48a9abfce93fe 100644 --- a/pkg/executor/inspection_result_test.go +++ b/pkg/executor/inspection_result_test.go @@ -179,7 +179,7 @@ func TestInspectionResult(t *testing.T) { } func parseTime(t *testing.T, se session.Session, str string) types.Time { - time, err := types.ParseTime(se.GetSessionVars().StmtCtx, str, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(se.GetSessionVars().StmtCtx.TypeCtx(), str, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } @@ -338,7 +338,7 @@ func TestThresholdCheckInspection2(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") datetime := func(s string) types.Time { - time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx, s, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), s, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } @@ -421,7 +421,7 @@ func TestThresholdCheckInspection3(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") datetime := func(s string) types.Time { - time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx, s, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), s, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } @@ -628,7 +628,7 @@ func TestNodeLoadInspection(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") datetime := func(s string) types.Time { - time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx, s, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), s, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } @@ -704,7 +704,7 @@ func TestConfigCheckOfStorageBlockCacheSize(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") datetime := func(s string) types.Time { - time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx, s, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), s, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } diff --git a/pkg/executor/inspection_summary_test.go b/pkg/executor/inspection_summary_test.go index f199c06afac59..e36cc44db715c 100644 --- a/pkg/executor/inspection_summary_test.go +++ b/pkg/executor/inspection_summary_test.go @@ -51,7 +51,7 @@ func TestInspectionSummary(t *testing.T) { defer func() { require.NoError(t, failpoint.Disable(fpName)) }() datetime := func(s string) types.Time { - time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx, s, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), s, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } diff --git a/pkg/executor/internal/calibrateresource/calibrate_resource_test.go b/pkg/executor/internal/calibrateresource/calibrate_resource_test.go index a54d462c5245e..644a13a39767f 100644 --- a/pkg/executor/internal/calibrateresource/calibrate_resource_test.go +++ b/pkg/executor/internal/calibrateresource/calibrate_resource_test.go @@ -87,7 +87,7 @@ func TestCalibrateResource(t *testing.T) { }() datetime := func(s string) types.Time { - time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx, s, mysql.TypeDatetime, types.MaxFsp, nil) + time, err := types.ParseTime(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), s, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) return time } diff --git a/pkg/executor/test/executor/executor_test.go b/pkg/executor/test/executor/executor_test.go index 0165dc5beca70..42b59f2da13ac 100644 --- a/pkg/executor/test/executor/executor_test.go +++ b/pkg/executor/test/executor/executor_test.go @@ -2671,11 +2671,11 @@ func TestAdminShowDDLJobs(t *testing.T) { tk.MustExec(`set @@time_zone = 'Asia/Shanghai'`) re = tk.MustQuery("admin show ddl jobs where end_time is not NULL") row = re.Rows()[0] - createTime, err := types.ParseDatetime(nil, row[8].(string)) + createTime, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row[8].(string)) require.NoError(t, err) - startTime, err := types.ParseDatetime(nil, row[9].(string)) + startTime, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row[9].(string)) require.NoError(t, err) - endTime, err := types.ParseDatetime(nil, row[10].(string)) + endTime, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row[10].(string)) require.NoError(t, err) tk.MustExec(`set @@time_zone = 'Europe/Amsterdam'`) re = tk.MustQuery("admin show ddl jobs where end_time is not NULL") @@ -2683,11 +2683,11 @@ func TestAdminShowDDLJobs(t *testing.T) { require.NotEqual(t, row[8], row2[8]) require.NotEqual(t, row[9], row2[9]) require.NotEqual(t, row[10], row2[10]) - createTime2, err := types.ParseDatetime(nil, row2[8].(string)) + createTime2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row2[8].(string)) require.NoError(t, err) - startTime2, err := types.ParseDatetime(nil, row2[9].(string)) + startTime2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row2[9].(string)) require.NoError(t, err) - endTime2, err := types.ParseDatetime(nil, row2[10].(string)) + endTime2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row2[10].(string)) require.NoError(t, err) loc, err := time.LoadLocation("Asia/Shanghai") require.NoError(t, err) diff --git a/pkg/expression/builtin_cast.go b/pkg/expression/builtin_cast.go index 656cc0c4f5780..78e0829b87410 100644 --- a/pkg/expression/builtin_cast.go +++ b/pkg/expression/builtin_cast.go @@ -756,7 +756,7 @@ func (b *builtinCastIntAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNul if b.args[0].GetType().GetType() == mysql.TypeYear { res, err = types.ParseTimeFromYear(b.ctx.GetSessionVars().StmtCtx, val) } else { - res, err = types.ParseTimeFromNum(b.ctx.GetSessionVars().StmtCtx, val, b.tp.GetType(), b.tp.GetDecimal()) + res, err = types.ParseTimeFromNum(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), val, b.tp.GetType(), b.tp.GetDecimal()) } if err != nil { @@ -1073,7 +1073,7 @@ func (b *builtinCastRealAsTimeSig) evalTime(row chunk.Row) (types.Time, bool, er return types.ZeroTime, false, nil } sc := b.ctx.GetSessionVars().StmtCtx - res, err := types.ParseTimeFromFloatString(sc, fv, b.tp.GetType(), b.tp.GetDecimal()) + res, err := types.ParseTimeFromFloatString(sc.TypeCtx(), fv, b.tp.GetType(), b.tp.GetDecimal()) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1099,7 +1099,7 @@ func (b *builtinCastRealAsDurationSig) evalDuration(row chunk.Row) (res types.Du if isNull || err != nil { return res, isNull, err } - res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(val, 'f', -1, 64), b.tp.GetDecimal()) + res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), strconv.FormatFloat(val, 'f', -1, 64), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1251,7 +1251,7 @@ func (b *builtinCastDecimalAsTimeSig) evalTime(row chunk.Row) (res types.Time, i return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ParseTimeFromFloatString(sc, string(val.ToString()), b.tp.GetType(), b.tp.GetDecimal()) + res, err = types.ParseTimeFromFloatString(sc.TypeCtx(), string(val.ToString()), b.tp.GetType(), b.tp.GetDecimal()) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1277,7 +1277,7 @@ func (b *builtinCastDecimalAsDurationSig) evalDuration(row chunk.Row) (res types if isNull || err != nil { return res, true, err } - res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(val.ToString()), b.tp.GetDecimal()) + res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), string(val.ToString()), b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) // ErrTruncatedWrongVal needs to be considered NULL. @@ -1474,7 +1474,7 @@ func (b *builtinCastStringAsTimeSig) evalTime(row chunk.Row) (res types.Time, is return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ParseTime(sc, val, b.tp.GetType(), b.tp.GetDecimal(), nil) + res, err = types.ParseTime(sc.TypeCtx(), val, b.tp.GetType(), b.tp.GetDecimal(), nil) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1503,7 +1503,7 @@ func (b *builtinCastStringAsDurationSig) evalDuration(row chunk.Row) (res types. if isNull || err != nil { return res, isNull, err } - res, isNull, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, val, b.tp.GetDecimal()) + res, isNull, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), val, b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { sc := b.ctx.GetSessionVars().StmtCtx err = sc.HandleTruncate(err) @@ -1528,10 +1528,10 @@ func (b *builtinCastTimeAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNu } sc := b.ctx.GetSessionVars().StmtCtx - if res, err = res.Convert(sc, b.tp.GetType()); err != nil { + if res, err = res.Convert(sc.TypeCtx(), b.tp.GetType()); err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - res, err = res.RoundFrac(sc, b.tp.GetDecimal()) + res, err = res.RoundFrac(sc.TypeCtx(), b.tp.GetDecimal()) if b.tp.GetType() == mysql.TypeDate { // Truncate hh:mm:ss part if the type is Date. res.SetCoreTime(types.FromDate(res.Year(), res.Month(), res.Day(), 0, 0, 0, 0)) @@ -1556,7 +1556,7 @@ func (b *builtinCastTimeAsIntSig) evalInt(row chunk.Row) (res int64, isNull bool return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - t, err := val.RoundFrac(sc, types.DefaultFsp) + t, err := val.RoundFrac(sc.TypeCtx(), types.DefaultFsp) if err != nil { return res, false, err } @@ -1796,11 +1796,11 @@ func (b *builtinCastDurationAsTimeSig) evalTime(row chunk.Row) (res types.Time, if err != nil { ts = gotime.Now() } - res, err = val.ConvertToTimeWithTimestamp(sc, b.tp.GetType(), ts) + res, err = val.ConvertToTimeWithTimestamp(sc.TypeCtx(), b.tp.GetType(), ts) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - res, err = res.RoundFrac(sc, b.tp.GetDecimal()) + res, err = res.RoundFrac(sc.TypeCtx(), b.tp.GetDecimal()) return res, false, err } @@ -1937,11 +1937,11 @@ func (b *builtinCastJSONAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNu if err != nil { ts = gotime.Now() } - res, err = duration.ConvertToTimeWithTimestamp(sc, b.tp.GetType(), ts) + res, err = duration.ConvertToTimeWithTimestamp(sc.TypeCtx(), b.tp.GetType(), ts) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - res, err = res.RoundFrac(sc, b.tp.GetDecimal()) + res, err = res.RoundFrac(sc.TypeCtx(), b.tp.GetDecimal()) return res, isNull, err case types.JSONTypeCodeString: s, err := val.Unquote() @@ -1949,7 +1949,7 @@ func (b *builtinCastJSONAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNu return res, false, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ParseTime(sc, s, b.tp.GetType(), b.tp.GetDecimal(), nil) + res, err = types.ParseTime(sc.TypeCtx(), s, b.tp.GetType(), b.tp.GetDecimal(), nil) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1999,7 +1999,7 @@ func (b *builtinCastJSONAsDurationSig) evalDuration(row chunk.Row) (res types.Du if err != nil { return res, false, err } - res, _, err = types.ParseDuration(stmtCtx, s, b.tp.GetDecimal()) + res, _, err = types.ParseDuration(stmtCtx.TypeCtx(), s, b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { sc := b.ctx.GetSessionVars().StmtCtx err = sc.HandleTruncate(err) diff --git a/pkg/expression/builtin_cast_vec.go b/pkg/expression/builtin_cast_vec.go index f6e839a5776f6..3a1f04347ea92 100644 --- a/pkg/expression/builtin_cast_vec.go +++ b/pkg/expression/builtin_cast_vec.go @@ -387,7 +387,7 @@ func (b *builtinCastIntAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk. if b.args[0].GetType().GetType() == mysql.TypeYear { tm, err = types.ParseTimeFromYear(stmt, i64s[i]) } else { - tm, err = types.ParseTimeFromNum(stmt, i64s[i], b.tp.GetType(), fsp) + tm, err = types.ParseTimeFromNum(stmt.TypeCtx(), i64s[i], b.tp.GetType(), fsp) } if err != nil { @@ -512,7 +512,7 @@ func (b *builtinCastJSONAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk duration := val.GetDuration() sc := b.ctx.GetSessionVars().StmtCtx - tm, err := duration.ConvertToTimeWithTimestamp(sc, b.tp.GetType(), ts) + tm, err := duration.ConvertToTimeWithTimestamp(sc.TypeCtx(), b.tp.GetType(), ts) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -520,7 +520,7 @@ func (b *builtinCastJSONAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk result.SetNull(i, true) continue } - tm, err = tm.RoundFrac(stmtCtx, fsp) + tm, err = tm.RoundFrac(stmtCtx.TypeCtx(), fsp) if err != nil { return err } @@ -530,7 +530,7 @@ func (b *builtinCastJSONAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk if err != nil { return err } - tm, err := types.ParseTime(stmtCtx, s, b.tp.GetType(), fsp, nil) + tm, err := types.ParseTime(stmtCtx.TypeCtx(), s, b.tp.GetType(), fsp, nil) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -584,7 +584,7 @@ func (b *builtinCastRealAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk times[i] = types.ZeroTime continue } - tm, err := types.ParseTimeFromFloatString(stmt, fv, b.tp.GetType(), fsp) + tm, err := types.ParseTimeFromFloatString(stmt.TypeCtx(), fv, b.tp.GetType(), fsp) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -665,7 +665,7 @@ func (b *builtinCastDurationAsTimeSig) vecEvalTime(input *chunk.Chunk, result *c duration.Duration = ds[i] duration.Fsp = fsp - tm, err := duration.ConvertToTimeWithTimestamp(stmtCtx, b.tp.GetType(), ts) + tm, err := duration.ConvertToTimeWithTimestamp(stmtCtx.TypeCtx(), b.tp.GetType(), ts) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -673,7 +673,7 @@ func (b *builtinCastDurationAsTimeSig) vecEvalTime(input *chunk.Chunk, result *c result.SetNull(i, true) continue } - tm, err = tm.RoundFrac(stmtCtx, fsp) + tm, err = tm.RoundFrac(stmtCtx.TypeCtx(), fsp) if err != nil { return err } @@ -1010,7 +1010,7 @@ func (b *builtinCastStringAsDurationSig) vecEvalDuration(input *chunk.Chunk, res if result.IsNull(i) { continue } - dur, isNull, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, buf.GetString(i), b.tp.GetDecimal()) + dur, isNull, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), buf.GetString(i), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1288,7 +1288,7 @@ func (b *builtinCastRealAsDurationSig) vecEvalDuration(input *chunk.Chunk, resul if result.IsNull(i) { continue } - dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(f64s[i], 'f', -1, 64), b.tp.GetDecimal()) + dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), strconv.FormatFloat(f64s[i], 'f', -1, 64), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1483,7 +1483,7 @@ func (b *builtinCastDecimalAsTimeSig) vecEvalTime(input *chunk.Chunk, result *ch if buf.IsNull(i) { continue } - tm, err := types.ParseTimeFromFloatString(stmt, string(decimals[i].ToString()), b.tp.GetType(), fsp) + tm, err := types.ParseTimeFromFloatString(stmt.TypeCtx(), string(decimals[i].ToString()), b.tp.GetType(), fsp) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -1524,7 +1524,7 @@ func (b *builtinCastTimeAsIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C if result.IsNull(i) { continue } - t, err := times[i].RoundFrac(sc, types.DefaultFsp) + t, err := times[i].RoundFrac(sc.TypeCtx(), types.DefaultFsp) if err != nil { return err } @@ -1553,7 +1553,7 @@ func (b *builtinCastTimeAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk if result.IsNull(i) { continue } - res, err := times[i].Convert(stmt, b.tp.GetType()) + res, err := times[i].Convert(stmt.TypeCtx(), b.tp.GetType()) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -1561,7 +1561,7 @@ func (b *builtinCastTimeAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk result.SetNull(i, true) continue } - tm, err := res.RoundFrac(stmt, fsp) + tm, err := res.RoundFrac(stmt.TypeCtx(), fsp) if err != nil { return err } @@ -1767,7 +1767,7 @@ func (b *builtinCastStringAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chu if result.IsNull(i) { continue } - tm, err := types.ParseTime(stmtCtx, buf.GetString(i), b.tp.GetType(), fsp, nil) + tm, err := types.ParseTime(stmtCtx.TypeCtx(), buf.GetString(i), b.tp.GetType(), fsp, nil) if err != nil { if errors.Is(err, strconv.ErrSyntax) || errors.Is(err, strconv.ErrRange) { err = types.ErrIncorrectDatetimeValue.GenWithStackByArgs(buf.GetString(i)) @@ -1871,7 +1871,7 @@ func (b *builtinCastDecimalAsDurationSig) vecEvalDuration(input *chunk.Chunk, re if result.IsNull(i) { continue } - dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(args[i].ToString()), b.tp.GetDecimal()) + dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), string(args[i].ToString()), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1977,7 +1977,7 @@ func (b *builtinCastJSONAsDurationSig) vecEvalDuration(input *chunk.Chunk, resul if err != nil { return err } - dur, _, err = types.ParseDuration(stmtCtx, s, b.tp.GetDecimal()) + dur, _, err = types.ParseDuration(stmtCtx.TypeCtx(), s, b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { err = stmtCtx.HandleTruncate(err) } diff --git a/pkg/expression/builtin_compare.go b/pkg/expression/builtin_compare.go index e1eb1532b8462..ebcf0a42b1d87 100644 --- a/pkg/expression/builtin_compare.go +++ b/pkg/expression/builtin_compare.go @@ -729,14 +729,14 @@ func doTimeConversionForGL(cmpAsDate bool, ctx sessionctx.Context, sc *stmtctx.S var t types.Time var err error if cmpAsDate { - t, err = types.ParseDate(sc, strVal) + t, err = types.ParseDate(sc.TypeCtx(), strVal) if err == nil { - t, err = t.Convert(sc, mysql.TypeDate) + t, err = t.Convert(sc.TypeCtx(), mysql.TypeDate) } } else { - t, err = types.ParseDatetime(sc, strVal) + t, err = types.ParseDatetime(sc.TypeCtx(), strVal) if err == nil { - t, err = t.Convert(sc, mysql.TypeDatetime) + t, err = t.Convert(sc.TypeCtx(), mysql.TypeDatetime) } } if err != nil { @@ -774,7 +774,7 @@ func (b *builtinGreatestTimeSig) evalTime(row chunk.Row) (res types.Time, isNull // Convert ETType Time value to MySQL actual type, distinguish date and datetime sc := b.ctx.GetSessionVars().StmtCtx resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) - if res, err = res.Convert(sc, resTimeTp); err != nil { + if res, err = res.Convert(sc.TypeCtx(), resTimeTp); err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } return res, false, nil @@ -1048,7 +1048,7 @@ func (b *builtinLeastTimeSig) evalTime(row chunk.Row) (res types.Time, isNull bo // Convert ETType Time value to MySQL actual type, distinguish date and datetime sc := b.ctx.GetSessionVars().StmtCtx resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) - if res, err = res.Convert(sc, resTimeTp); err != nil { + if res, err = res.Convert(sc.TypeCtx(), resTimeTp); err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } return res, false, nil diff --git a/pkg/expression/builtin_compare_vec.go b/pkg/expression/builtin_compare_vec.go index 2d09673bfe251..06f830129738b 100644 --- a/pkg/expression/builtin_compare_vec.go +++ b/pkg/expression/builtin_compare_vec.go @@ -841,7 +841,7 @@ func (b *builtinGreatestTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk.C resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) for rowIdx := 0; rowIdx < n; rowIdx++ { resTimes := result.Times() - resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc, resTimeTp) + resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc.TypeCtx(), resTimeTp) if err != nil { return err } @@ -882,7 +882,7 @@ func (b *builtinLeastTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk.Colu resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) for rowIdx := 0; rowIdx < n; rowIdx++ { resTimes := result.Times() - resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc, resTimeTp) + resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc.TypeCtx(), resTimeTp) if err != nil { return err } diff --git a/pkg/expression/builtin_other_vec_test.go b/pkg/expression/builtin_other_vec_test.go index 1a88cfaaef290..78c2220b0736b 100644 --- a/pkg/expression/builtin_other_vec_test.go +++ b/pkg/expression/builtin_other_vec_test.go @@ -27,7 +27,7 @@ import ( ) func dateTimeFromString(s string) types.Time { - t, err := types.ParseDate(nil, s) + t, err := types.ParseDate(types.DefaultStmtNoWarningContext, s) if err != nil { panic(err) } diff --git a/pkg/expression/builtin_time.go b/pkg/expression/builtin_time.go index 5af1a08e1be25..4f650eac69d27 100644 --- a/pkg/expression/builtin_time.go +++ b/pkg/expression/builtin_time.go @@ -309,7 +309,7 @@ func (c *dateLiteralFunctionClass) getFunction(ctx sessionctx.Context, args []Ex if !datePattern.MatchString(str) { return nil, types.ErrWrongValue.GenWithStackByArgs(types.DateStr, str) } - tm, err := types.ParseDate(ctx.GetSessionVars().StmtCtx, str) + tm, err := types.ParseDate(ctx.GetSessionVars().StmtCtx.TypeCtx(), str) if err != nil { return nil, err } @@ -597,7 +597,7 @@ func (b *builtinStringDurationTimeDiffSig) evalDuration(row chunk.Row) (d types. // calculateTimeDiff calculates interval difference of two types.Time. func calculateTimeDiff(sc *stmtctx.StatementContext, lhs, rhs types.Time) (d types.Duration, isNull bool, err error) { - d = lhs.Sub(sc, &rhs) + d = lhs.Sub(sc.TypeCtx(), &rhs) d.Duration, err = types.TruncateOverflowMySQLTime(d.Duration) if types.ErrTruncatedWrongVal.Equal(err) { err = sc.HandleTruncate(err) @@ -760,7 +760,7 @@ func convertStringToDuration(sc *stmtctx.StatementContext, str string, fsp int) fsp = mathutil.Max(lenStrFsp, fsp) } } - return types.StrToDuration(sc, str, fsp) + return types.StrToDuration(sc.TypeCtx(), str, fsp) } type dateFormatFunctionClass struct { @@ -1931,7 +1931,7 @@ func (b *builtinStrToDateDateSig) evalTime(row chunk.Row) (types.Time, bool, err } var t types.Time sc := b.ctx.GetSessionVars().StmtCtx - succ := t.StrToDate(sc, date, format) + succ := t.StrToDate(sc.TypeCtx(), date, format) if !succ { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())) } @@ -1964,7 +1964,7 @@ func (b *builtinStrToDateDatetimeSig) evalTime(row chunk.Row) (types.Time, bool, } var t types.Time sc := b.ctx.GetSessionVars().StmtCtx - succ := t.StrToDate(sc, date, format) + succ := t.StrToDate(sc.TypeCtx(), date, format) if !succ { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())) } @@ -2000,7 +2000,7 @@ func (b *builtinStrToDateDurationSig) evalDuration(row chunk.Row) (types.Duratio } var t types.Time sc := b.ctx.GetSessionVars().StmtCtx - succ := t.StrToDate(sc, date, format) + succ := t.StrToDate(sc.TypeCtx(), date, format) if !succ { return types.Duration{}, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())) } @@ -2185,7 +2185,7 @@ func (b *builtinCurrentTime0ArgSig) evalDuration(row chunk.Row) (types.Duration, return types.Duration{}, true, err } dur := nowTs.In(tz).Format(types.TimeFormat) - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), dur, types.MinFsp) if err != nil { return types.Duration{}, true, err } @@ -2213,7 +2213,7 @@ func (b *builtinCurrentTime1ArgSig) evalDuration(row chunk.Row) (types.Duration, return types.Duration{}, true, err } dur := nowTs.In(tz).Format(types.TimeFSPFormat) - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, int(fsp)) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), dur, int(fsp)) if err != nil { return types.Duration{}, true, err } @@ -2273,7 +2273,7 @@ func (b *builtinTimeSig) evalDuration(row chunk.Row) (res types.Duration, isNull fsp = tmpFsp sc := b.ctx.GetSessionVars().StmtCtx - res, _, err = types.ParseDuration(sc, expr, fsp) + res, _, err = types.ParseDuration(sc.TypeCtx(), expr, fsp) if types.ErrTruncatedWrongVal.Equal(err) { err = sc.HandleTruncate(err) } @@ -2300,7 +2300,7 @@ func (c *timeLiteralFunctionClass) getFunction(ctx sessionctx.Context, args []Ex if !isDuration(str) { return nil, types.ErrWrongValue.GenWithStackByArgs(types.TimeStr, str) } - duration, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, str, types.GetFsp(str)) + duration, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx.TypeCtx(), str, types.GetFsp(str)) if err != nil { return nil, err } @@ -2678,7 +2678,7 @@ func (b *builtinExtractDatetimeFromStringSig) evalInt(row chunk.Row) (int64, boo } sc := b.ctx.GetSessionVars().StmtCtx if types.IsClockUnit(unit) && types.IsDateUnit(unit) { - dur, _, err := types.ParseDuration(sc, dtStr, types.GetFsp(dtStr)) + dur, _, err := types.ParseDuration(sc.TypeCtx(), dtStr, types.GetFsp(dtStr)) if err != nil { return 0, true, err } @@ -2686,7 +2686,7 @@ func (b *builtinExtractDatetimeFromStringSig) evalInt(row chunk.Row) (int64, boo if err != nil { return 0, true, err } - dt, err := types.ParseDatetime(sc, dtStr) + dt, err := types.ParseDatetime(sc.TypeCtx(), dtStr) if err != nil { return res, false, nil } @@ -2774,7 +2774,7 @@ func (du *baseDateArithmetical) getDateFromString(ctx sessionctx.Context, args [ } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTime(sc, dateStr, dateTp, types.MaxFsp, nil) + date, err := types.ParseTime(sc.TypeCtx(), dateStr, dateTp, types.MaxFsp, nil) if err != nil { err = handleInvalidTimeError(ctx, err) if err != nil { @@ -2794,7 +2794,7 @@ func (du *baseDateArithmetical) getDateFromInt(ctx sessionctx.Context, args []Ex } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTimeFromInt64(sc, dateInt) + date, err := types.ParseTimeFromInt64(sc.TypeCtx(), dateInt) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(ctx, err) } @@ -2814,7 +2814,7 @@ func (du *baseDateArithmetical) getDateFromReal(ctx sessionctx.Context, args []E } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTimeFromFloat64(sc, dateReal) + date, err := types.ParseTimeFromFloat64(sc.TypeCtx(), dateReal) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(ctx, err) } @@ -2834,7 +2834,7 @@ func (du *baseDateArithmetical) getDateFromDecimal(ctx sessionctx.Context, args } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTimeFromDecimal(sc, dateDec) + date, err := types.ParseTimeFromDecimal(sc.TypeCtx(), dateDec) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(ctx, err) } @@ -2983,7 +2983,7 @@ func (du *baseDateArithmetical) addDate(ctx sessionctx.Context, date types.Time, } date.SetCoreTime(types.FromGoTime(goTime)) - overflow, err := types.DateTimeIsOverflow(ctx.GetSessionVars().StmtCtx, date) + overflow, err := types.DateTimeIsOverflow(ctx.GetSessionVars().StmtCtx.TypeCtx(), date) if err := handleInvalidTimeError(ctx, err); err != nil { return types.ZeroTime, true, err } @@ -3053,7 +3053,7 @@ func (du *baseDateArithmetical) vecGetDateFromInt(b *baseBuiltinFunc, input *chu continue } - date, err := types.ParseTimeFromInt64(sc, i64s[i]) + date, err := types.ParseTimeFromInt64(sc.TypeCtx(), i64s[i]) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -3095,7 +3095,7 @@ func (du *baseDateArithmetical) vecGetDateFromReal(b *baseBuiltinFunc, input *ch continue } - date, err := types.ParseTimeFromFloat64(sc, f64s[i]) + date, err := types.ParseTimeFromFloat64(sc.TypeCtx(), f64s[i]) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -3137,7 +3137,7 @@ func (du *baseDateArithmetical) vecGetDateFromDecimal(b *baseBuiltinFunc, input } dec := buf.GetDecimal(i) - date, err := types.ParseTimeFromDecimal(sc, dec) + date, err := types.ParseTimeFromDecimal(sc.TypeCtx(), dec) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -3184,7 +3184,7 @@ func (du *baseDateArithmetical) vecGetDateFromString(b *baseBuiltinFunc, input * dateTp = mysql.TypeDatetime } - date, err := types.ParseTime(sc, dateStr, dateTp, types.MaxFsp, nil) + date, err := types.ParseTime(sc.TypeCtx(), dateStr, dateTp, types.MaxFsp, nil) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -4002,7 +4002,7 @@ func (b *builtinAddSubDateDurationAnySig) evalTime(row chunk.Row) (types.Time, b } sc := b.ctx.GetSessionVars().StmtCtx - t, err := d.ConvertToTime(sc, mysql.TypeDatetime) + t, err := d.ConvertToTime(sc.TypeCtx(), mysql.TypeDatetime) if err != nil { return types.ZeroTime, true, err } @@ -4355,9 +4355,9 @@ func (b *builtinTimestamp1ArgSig) evalTime(row chunk.Row) (types.Time, bool, err var tm types.Time sc := b.ctx.GetSessionVars().StmtCtx if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, s, mysql.TypeDatetime, types.GetFsp(s)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s)) } else { - tm, err = types.ParseTime(sc, s, mysql.TypeDatetime, types.GetFsp(s), nil) + tm, err = types.ParseTime(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s), nil) } if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) @@ -4387,9 +4387,9 @@ func (b *builtinTimestamp2ArgsSig) evalTime(row chunk.Row) (types.Time, bool, er var tm types.Time sc := b.ctx.GetSessionVars().StmtCtx if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0)) } else { - tm, err = types.ParseTime(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) + tm, err = types.ParseTime(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) } if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) @@ -4406,11 +4406,11 @@ func (b *builtinTimestamp2ArgsSig) evalTime(row chunk.Row) (types.Time, bool, er if !isDuration(arg1) { return types.ZeroTime, true, nil } - duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - tmp, err := tm.Add(sc, duration) + tmp, err := tm.Add(sc.TypeCtx(), duration) if err != nil { return types.ZeroTime, true, err } @@ -4440,7 +4440,7 @@ func (c *timestampLiteralFunctionClass) getFunction(ctx sessionctx.Context, args if !timestampPattern.MatchString(str) { return nil, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, str) } - tm, err := types.ParseTime(ctx.GetSessionVars().StmtCtx, str, mysql.TypeDatetime, types.GetFsp(str), nil) + tm, err := types.ParseTime(ctx.GetSessionVars().StmtCtx.TypeCtx(), str, mysql.TypeDatetime, types.GetFsp(str), nil) if err != nil { return nil, err } @@ -4548,13 +4548,13 @@ func isDuration(str string) bool { // strDatetimeAddDuration adds duration to datetime string, returns a string value. func strDatetimeAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (result string, isNull bool, err error) { - arg0, err := types.ParseTime(sc, d, mysql.TypeDatetime, types.MaxFsp, nil) + arg0, err := types.ParseTime(sc.TypeCtx(), d, mysql.TypeDatetime, types.MaxFsp, nil) if err != nil { // Return a warning regardless of the sql_mode, this is compatible with MySQL. sc.AppendWarning(err) return "", true, nil } - ret, err := arg0.Add(sc, arg1) + ret, err := arg0.Add(sc.TypeCtx(), arg1) if err != nil { return "", false, err } @@ -4568,7 +4568,7 @@ func strDatetimeAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.D // strDurationAddDuration adds duration to duration string, returns a string value. func strDurationAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (string, error) { - arg0, _, err := types.ParseDuration(sc, d, types.MaxFsp) + arg0, _, err := types.ParseDuration(sc.TypeCtx(), d, types.MaxFsp) if err != nil { return "", err } @@ -4585,13 +4585,13 @@ func strDurationAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.D // strDatetimeSubDuration subtracts duration from datetime string, returns a string value. func strDatetimeSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (result string, isNull bool, err error) { - arg0, err := types.ParseTime(sc, d, mysql.TypeDatetime, types.MaxFsp, nil) + arg0, err := types.ParseTime(sc.TypeCtx(), d, mysql.TypeDatetime, types.MaxFsp, nil) if err != nil { // Return a warning regardless of the sql_mode, this is compatible with MySQL. sc.AppendWarning(err) return "", true, nil } - resultTime, err := arg0.Add(sc, arg1.Neg()) + resultTime, err := arg0.Add(sc.TypeCtx(), arg1.Neg()) if err != nil { return "", false, err } @@ -4605,7 +4605,7 @@ func strDatetimeSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.D // strDurationSubDuration subtracts duration from duration string, returns a string value. func strDurationSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (string, error) { - arg0, _, err := types.ParseDuration(sc, d, types.MaxFsp) + arg0, _, err := types.ParseDuration(sc.TypeCtx(), d, types.MaxFsp) if err != nil { return "", err } @@ -4725,7 +4725,7 @@ func (b *builtinAddDatetimeAndDurationSig) evalTime(row chunk.Row) (types.Time, if isNull || err != nil { return types.ZeroDatetime, isNull, err } - result, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, arg1) + result, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), arg1) return result, err != nil, err } @@ -4754,7 +4754,7 @@ func (b *builtinAddDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo return types.ZeroDatetime, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -4762,7 +4762,7 @@ func (b *builtinAddDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo } return types.ZeroDatetime, true, err } - result, err := arg0.Add(sc, arg1) + result, err := arg0.Add(sc.TypeCtx(), arg1) return result, err != nil, err } @@ -4835,7 +4835,7 @@ func (b *builtinAddDurationAndStringSig) evalDuration(row chunk.Row) (types.Dura return types.ZeroDuration, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -4937,7 +4937,7 @@ func (b *builtinAddStringAndStringSig) evalString(row chunk.Row) (result string, return "", isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err = types.ParseDuration(sc, arg1Str, getFsp4TimeAddSub(arg1Str)) + arg1, _, err = types.ParseDuration(sc.TypeCtx(), arg1Str, getFsp4TimeAddSub(arg1Str)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5020,7 +5020,7 @@ func (b *builtinAddDateAndStringSig) evalString(row chunk.Row) (string, bool, er return "", true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, getFsp4TimeAddSub(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5286,7 +5286,7 @@ func (b *builtinMakeTimeSig) makeTime(hour int64, minute int64, second float64, second = 59 } fsp := b.tp.GetDecimal() - d, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%02d:%02d:%v", hour, minute, second), fsp) + d, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), fmt.Sprintf("%02d:%02d:%v", hour, minute, second), fsp) return d, err } @@ -5581,7 +5581,7 @@ func (b *builtinSecToTimeSig) evalDuration(row chunk.Row) (types.Duration, bool, secondDemical = float64(second) + demical var dur types.Duration - dur, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) + dur, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) if err != nil { return types.Duration{}, err != nil, err } @@ -5678,7 +5678,7 @@ func (b *builtinSubDatetimeAndDurationSig) evalTime(row chunk.Row) (types.Time, return types.ZeroDatetime, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - result, err := arg0.Add(sc, arg1.Neg()) + result, err := arg0.Add(sc.TypeCtx(), arg1.Neg()) return result, err != nil, err } @@ -5707,7 +5707,7 @@ func (b *builtinSubDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo return types.ZeroDatetime, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5715,7 +5715,7 @@ func (b *builtinSubDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo } return types.ZeroDatetime, true, err } - result, err := arg0.Add(sc, arg1.Neg()) + result, err := arg0.Add(sc.TypeCtx(), arg1.Neg()) return result, err != nil, err } @@ -5806,7 +5806,7 @@ func (b *builtinSubStringAndStringSig) evalString(row chunk.Row) (result string, return "", isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err = types.ParseDuration(sc, s, getFsp4TimeAddSub(s)) + arg1, _, err = types.ParseDuration(sc.TypeCtx(), s, getFsp4TimeAddSub(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5898,7 +5898,7 @@ func (b *builtinSubDurationAndStringSig) evalDuration(row chunk.Row) (types.Dura return types.ZeroDuration, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5976,7 +5976,7 @@ func (b *builtinSubDateAndStringSig) evalString(row chunk.Row) (string, bool, er return "", true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, getFsp4TimeAddSub(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -6238,7 +6238,7 @@ func (b *builtinTimestampAddSig) evalString(row chunk.Row) (string, bool, error) fsp = types.MaxFsp } r := types.NewTime(types.FromGoTime(tb), b.resolveType(arg.Type(), unit), fsp) - if err = r.Check(b.ctx.GetSessionVars().StmtCtx); err != nil { + if err = r.Check(b.ctx.GetSessionVars().StmtCtx.TypeCtx()); err != nil { return "", true, handleInvalidTimeError(b.ctx, err) } return r.String(), false, nil @@ -6403,7 +6403,7 @@ func (b *builtinUTCTimeWithoutArgSig) evalDuration(row chunk.Row) (types.Duratio if err != nil { return types.Duration{}, true, err } - v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), 0) + v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), nowTs.UTC().Format(types.TimeFormat), 0) return v, false, err } @@ -6434,7 +6434,7 @@ func (b *builtinUTCTimeWithArgSig) evalDuration(row chunk.Row) (types.Duration, if err != nil { return types.Duration{}, true, err } - v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFSPFormat), int(fsp)) + v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), nowTs.UTC().Format(types.TimeFSPFormat), int(fsp)) return v, false, err } diff --git a/pkg/expression/builtin_time_test.go b/pkg/expression/builtin_time_test.go index 85c175617414a..cb85b8ab63146 100644 --- a/pkg/expression/builtin_time_test.go +++ b/pkg/expression/builtin_time_test.go @@ -421,7 +421,7 @@ func TestDate(t *testing.T) { func TestMonthName(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected string @@ -457,7 +457,7 @@ func TestMonthName(t *testing.T) { func TestDayName(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected string @@ -495,7 +495,7 @@ func TestDayName(t *testing.T) { func TestDayOfWeek(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected int64 @@ -531,7 +531,7 @@ func TestDayOfWeek(t *testing.T) { func TestDayOfMonth(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected int64 @@ -567,7 +567,7 @@ func TestDayOfMonth(t *testing.T) { func TestDayOfYear(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected int64 @@ -964,7 +964,7 @@ func TestAddTimeSig(t *testing.T) { {"-110:00:00", "1 02:00:00", "-84:00:00"}, } for _, c := range tbl { - dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input)) + dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx.TypeCtx(), c.Input, types.GetFsp(c.Input)) require.NoError(t, err) tmpInput := types.NewDurationDatum(dur) tmpInputDuration := types.NewStringDatum(c.InputDuration) @@ -1065,7 +1065,7 @@ func TestSubTimeSig(t *testing.T) { {"235959", "00:00:01", "23:59:58"}, } for _, c := range tbl { - dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input)) + dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx.TypeCtx(), c.Input, types.GetFsp(c.Input)) require.NoError(t, err) tmpInput := types.NewDurationDatum(dur) tmpInputDuration := types.NewStringDatum(c.InputDuration) @@ -1613,7 +1613,7 @@ func TestDateDiff(t *testing.T) { func TestTimeDiff(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) // Test cases from https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_timediff tests := []struct { args []interface{} @@ -1717,7 +1717,7 @@ func TestWeekWithoutModeSig(t *testing.T) { func TestYearWeek(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) // Test cases from https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_yearweek tests := []struct { t string @@ -1781,8 +1781,7 @@ func TestTimestampDiff(t *testing.T) { } sc := ctx.GetSessionVars().StmtCtx - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true).WithIgnoreZeroInDate(true)) resetStmtContext(ctx) f, err := fc.getFunction(ctx, datumsToConstants([]types.Datum{types.NewStringDatum("DAY"), types.NewStringDatum("2017-01-00"), @@ -1843,7 +1842,7 @@ func TestUnixTimestamp(t *testing.T) { // Set the time_zone variable, because UnixTimestamp() result depends on it. ctx.GetSessionVars().TimeZone = time.UTC - ctx.GetSessionVars().StmtCtx.IgnoreZeroInDate = true + ctx.GetSessionVars().StmtCtx.SetTypeFlags(ctx.GetSessionVars().StmtCtx.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { inputDecimal int input types.Datum @@ -2161,7 +2160,7 @@ func TestDateArithFuncs(t *testing.T) { }, } for _, tt := range testDurations { - dur, _, ok, err := types.StrToDuration(nil, tt.dur, tt.fsp) + dur, _, ok, err := types.StrToDuration(types.DefaultStmtNoWarningContext, tt.dur, tt.fsp) require.NoError(t, err) require.True(t, ok) args = types.MakeDatums(dur, tt.format, tt.unit) @@ -2400,7 +2399,7 @@ func TestMakeTime(t *testing.T) { func TestQuarter(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { t string expect int64 @@ -2479,7 +2478,7 @@ func TestGetFormat(t *testing.T) { func TestToSeconds(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { param interface{} expect int64 @@ -2522,7 +2521,7 @@ func TestToSeconds(t *testing.T) { func TestToDays(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { param interface{} expect int64 @@ -2951,7 +2950,7 @@ func TestLastDay(t *testing.T) { } var timeData types.Time - timeData.StrToDate(ctx.GetSessionVars().StmtCtx, "202010", "%Y%m") + timeData.StrToDate(ctx.GetSessionVars().StmtCtx.TypeCtx(), "202010", "%Y%m") testsNull := []struct { param interface{} isNilNoZeroDate bool @@ -2996,7 +2995,7 @@ func TestWithTimeZone(t *testing.T) { return result } durationToGoTime := func(d types.Datum, loc *time.Location) time.Time { - t, _ := d.GetMysqlDuration().ConvertToTime(sv.StmtCtx, mysql.TypeDatetime) + t, _ := d.GetMysqlDuration().ConvertToTime(sv.StmtCtx.TypeCtx(), mysql.TypeDatetime) result, _ := t.GoTime(sv.TimeZone) return result } diff --git a/pkg/expression/builtin_time_vec.go b/pkg/expression/builtin_time_vec.go index 0ff70bf96e7c6..df66101068f62 100644 --- a/pkg/expression/builtin_time_vec.go +++ b/pkg/expression/builtin_time_vec.go @@ -421,7 +421,7 @@ func (b *builtinUTCTimeWithArgSig) vecEvalDuration(input *chunk.Chunk, result *c if fsp < int64(types.MinFsp) { return errors.Errorf("Invalid negative %d specified, must in [0, 6]", fsp) } - res, _, err := types.ParseDuration(stmtCtx, utc, int(fsp)) + res, _, err := types.ParseDuration(stmtCtx.TypeCtx(), utc, int(fsp)) if err != nil { return err } @@ -726,7 +726,7 @@ func (b *builtinStrToDateDateSig) vecEvalTime(input *chunk.Chunk, result *chunk. continue } var t types.Time - succ := t.StrToDate(sc, bufStrings.GetString(i), bufFormats.GetString(i)) + succ := t.StrToDate(sc.TypeCtx(), bufStrings.GetString(i), bufFormats.GetString(i)) if !succ { if err := handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())); err != nil { return err @@ -1138,7 +1138,7 @@ func (b *builtinStrToDateDurationSig) vecEvalDuration(input *chunk.Chunk, result continue } var t types.Time - succ := t.StrToDate(sc, bufStrings.GetString(i), bufFormats.GetString(i)) + succ := t.StrToDate(sc.TypeCtx(), bufStrings.GetString(i), bufFormats.GetString(i)) if !succ { if err := handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())); err != nil { return err @@ -1500,7 +1500,7 @@ func (b *builtinStrToDateDatetimeSig) vecEvalTime(input *chunk.Chunk, result *ch continue } var t types.Time - succ := t.StrToDate(sc, dateBuf.GetString(i), formatBuf.GetString(i)) + succ := t.StrToDate(sc.TypeCtx(), dateBuf.GetString(i), formatBuf.GetString(i)) if !succ { if err = handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())); err != nil { return err @@ -1744,7 +1744,7 @@ func (b *builtinTimestampAddSig) vecEvalString(input *chunk.Chunk, result *chunk fsp = types.MaxFsp } r := types.NewTime(types.FromGoTime(tb), b.resolveType(arg.Type(), unit), fsp) - if err = r.Check(b.ctx.GetSessionVars().StmtCtx); err != nil { + if err = r.Check(b.ctx.GetSessionVars().StmtCtx.TypeCtx()); err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err } @@ -1937,7 +1937,7 @@ func (b *builtinSecToTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk. second = seconds % 60 } secondDemical := float64(second) + demical - duration, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) + duration, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) if err != nil { return err } @@ -1958,7 +1958,7 @@ func (b *builtinUTCTimeWithoutArgSig) vecEvalDuration(input *chunk.Chunk, result if err != nil { return err } - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), types.DefaultFsp) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), nowTs.UTC().Format(types.TimeFormat), types.DefaultFsp) if err != nil { return err } @@ -2361,7 +2361,7 @@ func (b *builtinCurrentTime0ArgSig) vecEvalDuration(input *chunk.Chunk, result * } tz := b.ctx.GetSessionVars().Location() dur := nowTs.In(tz).Format(types.TimeFormat) - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), dur, types.MinFsp) if err != nil { return err } @@ -2409,7 +2409,7 @@ func (b *builtinTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Colum } fsp = tmpFsp - res, _, err := types.ParseDuration(sc, expr, fsp) + res, _, err := types.ParseDuration(sc.TypeCtx(), expr, fsp) if types.ErrTruncatedWrongVal.Equal(err) { err = sc.HandleTruncate(err) } @@ -2555,7 +2555,7 @@ func (b *builtinCurrentTime1ArgSig) vecEvalDuration(input *chunk.Chunk, result * result.ResizeGoDuration(n, false) durations := result.GoDurations() for i := 0; i < n; i++ { - res, _, err := types.ParseDuration(stmtCtx, dur, int(i64s[i])) + res, _, err := types.ParseDuration(stmtCtx.TypeCtx(), dur, int(i64s[i])) if err != nil { return err } @@ -2658,9 +2658,9 @@ func (b *builtinTimestamp1ArgSig) vecEvalTime(input *chunk.Chunk, result *chunk. s := buf.GetString(i) if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, s, mysql.TypeDatetime, types.GetFsp(s)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s)) } else { - tm, err = types.ParseTime(sc, s, mysql.TypeDatetime, types.GetFsp(s), nil) + tm, err = types.ParseTime(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s), nil) } if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { @@ -2711,9 +2711,9 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk arg1 := buf1.GetString(i) if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0)) } else { - tm, err = types.ParseTime(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) + tm, err = types.ParseTime(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) } if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { @@ -2734,7 +2734,7 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk continue } - duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -2742,7 +2742,7 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk result.SetNull(i, true) continue } - tmp, err := tm.Add(sc, duration) + tmp, err := tm.Add(sc.TypeCtx(), duration) if err != nil { return err } @@ -2929,7 +2929,7 @@ func (b *builtinAddSubDateDurationAnySig) vecEvalTime(input *chunk.Chunk, result continue } iterDuration.Duration = goDurations[i] - t, err := iterDuration.ConvertToTime(sc, mysql.TypeDatetime) + t, err := iterDuration.ConvertToTime(sc.TypeCtx(), mysql.TypeDatetime) if err != nil { result.SetNull(i, true) } diff --git a/pkg/expression/builtin_time_vec_generated.go b/pkg/expression/builtin_time_vec_generated.go index a40cdb3909554..9e29d8f0abe97 100644 --- a/pkg/expression/builtin_time_vec_generated.go +++ b/pkg/expression/builtin_time_vec_generated.go @@ -62,7 +62,7 @@ func (b *builtinAddDatetimeAndDurationSig) vecEvalTime(input *chunk.Chunk, resul // calculate - output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, types.Duration{Duration: arg1, Fsp: -1}) + output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), types.Duration{Duration: arg1, Fsp: -1}) if err != nil { return err @@ -122,7 +122,7 @@ func (b *builtinAddDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -132,7 +132,7 @@ func (b *builtinAddDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result return err } - output, err := arg0.Add(sc, arg1Duration) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration) if err != nil { return err @@ -248,7 +248,7 @@ func (b *builtinAddDurationAndStringSig) vecEvalDuration(input *chunk.Chunk, res continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -406,7 +406,7 @@ func (b *builtinAddStringAndStringSig) vecEvalString(input *chunk.Chunk, result // calculate sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -566,7 +566,7 @@ func (b *builtinAddDateAndStringSig) vecEvalString(input *chunk.Chunk, result *c continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -677,7 +677,7 @@ func (b *builtinSubDatetimeAndDurationSig) vecEvalTime(input *chunk.Chunk, resul sc := b.ctx.GetSessionVars().StmtCtx arg1Duration := types.Duration{Duration: arg1, Fsp: -1} - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) if err != nil { return err @@ -737,7 +737,7 @@ func (b *builtinSubDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -746,7 +746,7 @@ func (b *builtinSubDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result } return err } - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) if err != nil { return err @@ -862,7 +862,7 @@ func (b *builtinSubDurationAndStringSig) vecEvalDuration(input *chunk.Chunk, res continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -1020,7 +1020,7 @@ func (b *builtinSubStringAndStringSig) vecEvalString(input *chunk.Chunk, result // calculate sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -1180,7 +1180,7 @@ func (b *builtinSubDateAndStringSig) vecEvalString(input *chunk.Chunk, result *c continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) diff --git a/pkg/expression/builtin_vectorized_test.go b/pkg/expression/builtin_vectorized_test.go index d5921a2ffc43b..f50338f8a58eb 100644 --- a/pkg/expression/builtin_vectorized_test.go +++ b/pkg/expression/builtin_vectorized_test.go @@ -279,7 +279,7 @@ func (p *mockBuiltinDouble) vecEvalTime(input *chunk.Chunk, result *chunk.Column if err != nil { return err } - if ts[i], err = ts[i].Add(p.ctx.GetSessionVars().StmtCtx, d); err != nil { + if ts[i], err = ts[i].Add(p.ctx.GetSessionVars().StmtCtx.TypeCtx(), d); err != nil { return err } } @@ -371,7 +371,7 @@ func (p *mockBuiltinDouble) evalTime(row chunk.Row) (types.Time, bool, error) { if err != nil { return types.ZeroTime, false, err } - v, err = v.Add(p.ctx.GetSessionVars().StmtCtx, d) + v, err = v.Add(p.ctx.GetSessionVars().StmtCtx.TypeCtx(), d) return v, isNull, err } @@ -512,7 +512,7 @@ func checkVecEval(t *testing.T, eType types.EvalType, sel []int, result *chunk.C tt := types.NewTime(gt, convertETType(eType), 0) d, err := tt.ConvertToDuration() require.NoError(t, err) - v, err := tt.Add(mock.NewContext().GetSessionVars().StmtCtx, d) + v, err := tt.Add(mock.NewContext().GetSessionVars().StmtCtx.TypeCtx(), d) require.NoError(t, err) require.Equal(t, 0, v.Compare(ds[i])) } diff --git a/pkg/expression/distsql_builtin_test.go b/pkg/expression/distsql_builtin_test.go index ef4a2ff34567b..34d9ef2ca49bd 100644 --- a/pkg/expression/distsql_builtin_test.go +++ b/pkg/expression/distsql_builtin_test.go @@ -963,7 +963,7 @@ func newDuration(dur time.Duration) types.Duration { } func newDateTime(t *testing.T, s string) types.Time { - tt, err := types.ParseDate(nil, s) + tt, err := types.ParseDate(types.DefaultStmtNoWarningContext, s) require.NoError(t, err) return tt } diff --git a/pkg/expression/generator/time_vec.go b/pkg/expression/generator/time_vec.go index 39f11261056fd..a7e93da334b02 100644 --- a/pkg/expression/generator/time_vec.go +++ b/pkg/expression/generator/time_vec.go @@ -63,7 +63,7 @@ import ( continue }{{ end }} sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -171,11 +171,11 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result // calculate {{ if or (eq .SigName "builtinAddDatetimeAndDurationSig") (eq .SigName "builtinSubDatetimeAndDurationSig") }} {{ if eq $.FuncName "AddTime" }} - output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, types.Duration{Duration: arg1, Fsp: -1}) + output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), types.Duration{Duration: arg1, Fsp: -1}) {{ else }} sc := b.ctx.GetSessionVars().StmtCtx arg1Duration := types.Duration{Duration: arg1, Fsp: -1} - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) {{ end }} if err != nil { return err @@ -184,14 +184,14 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result {{ else if or (eq .SigName "builtinAddDatetimeAndStringSig") (eq .SigName "builtinSubDatetimeAndStringSig") }} {{ if eq $.FuncName "AddTime" }} {{ template "ConvertStringToDuration" . }} - output, err := arg0.Add(sc, arg1Duration) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration) {{ else }} if !isDuration(arg1) { result.SetNull(i, true) // fixed: true continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -200,7 +200,7 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result } return err } - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) {{ end }} if err != nil { return err diff --git a/pkg/expression/helper.go b/pkg/expression/helper.go index d0fce4ffb2597..8e07ae72e4ca1 100644 --- a/pkg/expression/helper.go +++ b/pkg/expression/helper.go @@ -99,10 +99,10 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int, expli return d, err } } else if lowerX == types.ZeroDatetimeStr { - value, err = types.ParseTimeFromNum(sc, 0, tp, fsp) + value, err = types.ParseTimeFromNum(sc.TypeCtx(), 0, tp, fsp) terror.Log(err) } else { - value, err = types.ParseTime(sc, x, tp, fsp, explicitTz) + value, err = types.ParseTime(sc.TypeCtx(), x, tp, fsp, explicitTz) if err != nil { return d, err } @@ -110,12 +110,12 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int, expli case *driver.ValueExpr: switch x.Kind() { case types.KindString: - value, err = types.ParseTime(sc, x.GetString(), tp, fsp, nil) + value, err = types.ParseTime(sc.TypeCtx(), x.GetString(), tp, fsp, nil) if err != nil { return d, err } case types.KindInt64: - value, err = types.ParseTimeFromNum(sc, x.GetInt64(), tp, fsp) + value, err = types.ParseTimeFromNum(sc.TypeCtx(), x.GetInt64(), tp, fsp) if err != nil { return d, err } @@ -142,7 +142,7 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int, expli return d, err } - value, err = types.ParseTimeFromNum(sc, xval.GetInt64(), tp, fsp) + value, err = types.ParseTimeFromNum(sc.TypeCtx(), xval.GetInt64(), tp, fsp) if err != nil { return d, err } diff --git a/pkg/planner/core/memtable_predicate_extractor_test.go b/pkg/planner/core/memtable_predicate_extractor_test.go index ea21f18572270..2c53fa8f2120b 100644 --- a/pkg/planner/core/memtable_predicate_extractor_test.go +++ b/pkg/planner/core/memtable_predicate_extractor_test.go @@ -1815,7 +1815,7 @@ func TestExtractorInPreparedStmt(t *testing.T) { prepared: "select * from information_schema.tidb_hot_regions_history where update_time>=?", userVars: []interface{}{"cast('2019-10-10 10:10:10' as datetime)"}, params: []interface{}{func() types.Time { - tt, err := types.ParseTimestamp(tk.Session().GetSessionVars().StmtCtx, "2019-10-10 10:10:10") + tt, err := types.ParseTimestamp(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), "2019-10-10 10:10:10") require.NoError(t, err) return tt }()}, diff --git a/pkg/server/handler/optimizor/statistics_handler.go b/pkg/server/handler/optimizor/statistics_handler.go index 1c7b56f14aaa9..4af77791cd901 100644 --- a/pkg/server/handler/optimizor/statistics_handler.go +++ b/pkg/server/handler/optimizor/statistics_handler.go @@ -111,7 +111,7 @@ func (sh StatsHistoryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request } se.GetSessionVars().StmtCtx.SetTimeZone(time.Local) - t, err := types.ParseTime(se.GetSessionVars().StmtCtx, params[handler.Snapshot], mysql.TypeTimestamp, 6, nil) + t, err := types.ParseTime(se.GetSessionVars().StmtCtx.TypeCtx(), params[handler.Snapshot], mysql.TypeTimestamp, 6, nil) if err != nil { handler.WriteError(w, err) return diff --git a/pkg/server/internal/column/BUILD.bazel b/pkg/server/internal/column/BUILD.bazel index 794b8a607f50b..fc758df83711f 100644 --- a/pkg/server/internal/column/BUILD.bazel +++ b/pkg/server/internal/column/BUILD.bazel @@ -40,7 +40,6 @@ go_test( "//pkg/server/internal/util", "//pkg/types", "//pkg/util/chunk", - "//pkg/util/mock", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/server/internal/column/column_test.go b/pkg/server/internal/column/column_test.go index b32d07b028f02..ba63d52a5b9d8 100644 --- a/pkg/server/internal/column/column_test.go +++ b/pkg/server/internal/column/column_test.go @@ -23,7 +23,6 @@ import ( "github.com/pingcap/tidb/pkg/server/internal/util" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" ) @@ -181,13 +180,11 @@ func TestDumpTextValue(t *testing.T) { var d types.Datum - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true losAngelesTz, err := time.LoadLocation("America/Los_Angeles") require.NoError(t, err) - sc.SetTimeZone(losAngelesTz) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), losAngelesTz, func(err error) {}) - time, err := types.ParseTime(sc, "2017-01-05 23:59:59.575601", mysql.TypeDatetime, 0, nil) + time, err := types.ParseTime(typeCtx, "2017-01-05 23:59:59.575601", mysql.TypeDatetime, 0, nil) require.NoError(t, err) d.SetMysqlTime(time) columns[0].Type = mysql.TypeDatetime @@ -195,7 +192,7 @@ func TestDumpTextValue(t *testing.T) { require.NoError(t, err) require.Equal(t, "2017-01-06 00:00:00", mustDecodeStr(t, bs)) - duration, _, err := types.ParseDuration(sc, "11:30:45", 0) + duration, _, err := types.ParseDuration(typeCtx, "11:30:45", 0) require.NoError(t, err) d.SetMysqlDuration(duration) columns[0].Type = mysql.TypeDuration diff --git a/pkg/server/internal/dump/BUILD.bazel b/pkg/server/internal/dump/BUILD.bazel index df6051b402bbf..8529ba1648901 100644 --- a/pkg/server/internal/dump/BUILD.bazel +++ b/pkg/server/internal/dump/BUILD.bazel @@ -19,7 +19,6 @@ go_test( flaky = True, shard_count = 3, deps = [ - "//pkg/sessionctx/stmtctx", "//pkg/types", "@com_github_stretchr_testify//require", ], diff --git a/pkg/server/internal/dump/dump_test.go b/pkg/server/internal/dump/dump_test.go index 1c2b67d8da555..0968c34b10114 100644 --- a/pkg/server/internal/dump/dump_test.go +++ b/pkg/server/internal/dump/dump_test.go @@ -18,52 +18,51 @@ import ( "testing" "time" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/stretchr/testify/require" ) func TestDumpBinaryTime(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - parsedTime, err := types.ParseTimestamp(sc, "0000-00-00 00:00:00.000000") + typeCtx := types.DefaultStmtNoWarningContext + parsedTime, err := types.ParseTimestamp(typeCtx, "0000-00-00 00:00:00.000000") require.NoError(t, err) d := BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - parsedTime, err = types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.Local), "1991-05-01 01:01:01.100001") + parsedTime, err = types.ParseTimestamp(typeCtx.WithLocation(time.Local), "1991-05-01 01:01:01.100001") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) // 199 & 7 composed to uint16 1991 (litter-endian) // 160 & 134 & 1 & 0 composed to uint32 1000001 (litter-endian) require.Equal(t, []byte{11, 199, 7, 5, 1, 1, 1, 1, 161, 134, 1, 0}, d) - parsedTime, err = types.ParseDatetime(sc, "0000-00-00 00:00:00.000000") + parsedTime, err = types.ParseDatetime(typeCtx, "0000-00-00 00:00:00.000000") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - parsedTime, err = types.ParseDatetime(sc, "1993-07-13 01:01:01.000000") + parsedTime, err = types.ParseDatetime(typeCtx, "1993-07-13 01:01:01.000000") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) // 201 & 7 composed to uint16 1993 (litter-endian) require.Equal(t, []byte{7, 201, 7, 7, 13, 1, 1, 1}, d) - parsedTime, err = types.ParseDate(sc, "0000-00-00") + parsedTime, err = types.ParseDate(typeCtx, "0000-00-00") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - parsedTime, err = types.ParseDate(sc, "1992-06-01") + parsedTime, err = types.ParseDate(typeCtx, "1992-06-01") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) // 200 & 7 composed to uint16 1992 (litter-endian) require.Equal(t, []byte{4, 200, 7, 6, 1}, d) - parsedTime, err = types.ParseDate(sc, "0000-00-00") + parsedTime, err = types.ParseDate(typeCtx, "0000-00-00") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - myDuration, _, err := types.ParseDuration(sc, "0000-00-00 00:00:00.000000", 6) + myDuration, _, err := types.ParseDuration(typeCtx, "0000-00-00 00:00:00.000000", 6) require.NoError(t, err) d = BinaryTime(myDuration.Duration) require.Equal(t, []byte{0}, d) diff --git a/pkg/session/test/meta/session_test.go b/pkg/session/test/meta/session_test.go index 055b036b61434..e3be6077e1c82 100644 --- a/pkg/session/test/meta/session_test.go +++ b/pkg/session/test/meta/session_test.go @@ -155,9 +155,9 @@ func TestInformationSchemaCreateTime(t *testing.T) { ret1 := tk.MustQuery("select create_time from information_schema.tables where table_name='t';") ret2 := tk.MustQuery("show table status like 't'") require.Equal(t, ret2.Rows()[0][11].(string), ret1.Rows()[0][0].(string)) - typ1, err := types.ParseDatetime(nil, ret.Rows()[0][0].(string)) + typ1, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, ret.Rows()[0][0].(string)) require.NoError(t, err) - typ2, err := types.ParseDatetime(nil, ret1.Rows()[0][0].(string)) + typ2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, ret1.Rows()[0][0].(string)) require.NoError(t, err) r := typ2.Compare(typ1) require.Equal(t, 1, r) @@ -166,7 +166,7 @@ func TestInformationSchemaCreateTime(t *testing.T) { ret = tk.MustQuery(`select create_time from information_schema.tables where table_name='t'`) ret2 = tk.MustQuery(`show table status like 't'`) require.Equal(t, ret2.Rows()[0][11].(string), ret.Rows()[0][0].(string)) - typ3, err := types.ParseDatetime(nil, ret.Rows()[0][0].(string)) + typ3, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, ret.Rows()[0][0].(string)) require.NoError(t, err) // Asia/Shanghai 2022-02-17 17:40:05 > Europe/Amsterdam 2022-02-17 10:40:05 r = typ2.Compare(typ3) diff --git a/pkg/sessionctx/stmtctx/stmtctx.go b/pkg/sessionctx/stmtctx/stmtctx.go index f7833d64c0eba..8168ef94198bc 100644 --- a/pkg/sessionctx/stmtctx/stmtctx.go +++ b/pkg/sessionctx/stmtctx/stmtctx.go @@ -176,8 +176,6 @@ type StatementContext struct { InCreateOrAlterStmt bool InSetSessionStatesStmt bool InPreparedPlanBuilding bool - IgnoreZeroInDate bool - NoZeroDate bool DupKeyAsWarning bool BadNullAsWarning bool DividedByZeroAsWarning bool @@ -188,7 +186,6 @@ type StatementContext struct { CacheType PlanCacheType BatchCheck bool InNullRejectCheck bool - AllowInvalidDate bool IgnoreNoPartition bool IgnoreExplainIDSuffix bool MultiSchemaInfo *model.MultiSchemaInfo @@ -1145,7 +1142,7 @@ func (sc *StatementContext) PushDownFlags() uint64 { if sc.OverflowAsWarning { flags |= model.FlagOverflowAsWarning } - if sc.IgnoreZeroInDate { + if sc.TypeFlags().IgnoreZeroInDate() { flags |= model.FlagIgnoreZeroInDate } if sc.DividedByZeroAsWarning { @@ -1210,14 +1207,13 @@ func (sc *StatementContext) InitFromPBFlagAndTz(flags uint64, tz *time.Location) sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 - sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 sc.SetTimeZone(tz) sc.SetTypeFlags(typectx.DefaultStmtFlags. WithIgnoreTruncateErr((flags & model.FlagIgnoreTruncate) > 0). WithTruncateAsWarning((flags & model.FlagTruncateAsWarning) > 0). - WithAllowNegativeToUnsigned(!sc.InInsertStmt), - ) + WithIgnoreZeroInDate((flags & model.FlagIgnoreZeroInDate) > 0). + WithAllowNegativeToUnsigned(!sc.InInsertStmt)) } // GetLockWaitStartTime returns the statement pessimistic lock wait start time diff --git a/pkg/sessionctx/stmtctx/stmtctx_test.go b/pkg/sessionctx/stmtctx/stmtctx_test.go index 24c8c042184b5..06c0ead1e2d54 100644 --- a/pkg/sessionctx/stmtctx/stmtctx_test.go +++ b/pkg/sessionctx/stmtctx/stmtctx_test.go @@ -97,7 +97,7 @@ func TestStatementContextPushDownFLags(t *testing.T) { {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) }), 1}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) }), 2}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.OverflowAsWarning = true }), 64}, - {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.IgnoreZeroInDate = true }), 128}, + {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) }), 128}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.DividedByZeroAsWarning = true }), 256}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InLoadDataStmt = true }), 1024}, {newStmtCtx(func(sc *stmtctx.StatementContext) { @@ -110,7 +110,7 @@ func TestStatementContextPushDownFLags(t *testing.T) { }), 257}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InUpdateStmt = true - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) sc.InLoadDataStmt = true }), 1168}, } diff --git a/pkg/sessionctx/variable/varsutil.go b/pkg/sessionctx/variable/varsutil.go index 37abdc355f08d..765367a999526 100644 --- a/pkg/sessionctx/variable/varsutil.go +++ b/pkg/sessionctx/variable/varsutil.go @@ -441,7 +441,7 @@ func parseTSFromNumberOrTime(s *SessionVars, sVal string) (uint64, error) { return tso, nil } - t, err := types.ParseTime(s.StmtCtx, sVal, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(s.StmtCtx.TypeCtx(), sVal, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return 0, err } @@ -456,7 +456,7 @@ func setTxnReadTS(s *SessionVars, sVal string) error { return nil } - t, err := types.ParseTime(s.StmtCtx, sVal, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(s.StmtCtx.TypeCtx(), sVal, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return err } diff --git a/pkg/statistics/handle/bootstrap.go b/pkg/statistics/handle/bootstrap.go index 1e49bec642bef..ba5895c8d5091 100644 --- a/pkg/statistics/handle/bootstrap.go +++ b/pkg/statistics/handle/bootstrap.go @@ -405,8 +405,7 @@ func (*Handle) initStatsBuckets4Chunk(cache util.StatsCache, iter *chunk.Iterato // Setting TimeZone to time.UTC aligns with HistogramFromStorage and can fix #41938. However, #41985 still exist. // TODO: do the correct time zone conversion for timestamp-type columns' upper/lower bounds. sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - sc.AllowInvalidDate = true - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreInvalidDateErr(true).WithIgnoreZeroInDate(true)) var err error lower, err = d.ConvertTo(sc, &column.Info.FieldType) if err != nil { diff --git a/pkg/statistics/handle/storage/read.go b/pkg/statistics/handle/storage/read.go index 0b072004f7903..352171f7a0dcf 100644 --- a/pkg/statistics/handle/storage/read.go +++ b/pkg/statistics/handle/storage/read.go @@ -73,8 +73,7 @@ func HistogramFromStorage(sctx sessionctx.Context, tableID int64, colID int64, t // Invalid date values may be inserted into table under some relaxed sql mode. Those values may exist in statistics. // Hence, when reading statistics, we should skip invalid date check. See #39336. sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - sc.AllowInvalidDate = true - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreInvalidDateErr(true).WithIgnoreZeroInDate(true)) d := rows[i].GetDatum(2, &fields[2].Column.FieldType) // For new collation data, when storing the bounds of the histogram, we store the collate key instead of the // original value. diff --git a/pkg/statistics/scalar.go b/pkg/statistics/scalar.go index 92f3e52db8f41..b84d0933e6fa1 100644 --- a/pkg/statistics/scalar.go +++ b/pkg/statistics/scalar.go @@ -73,7 +73,7 @@ func convertDatumToScalar(value *types.Datum, commonPfxLen int) float64 { minTime = types.MinTimestamp } sc := stmtctx.NewStmtCtxWithTimeZone(types.BoundTimezone) - return float64(valueTime.Sub(sc, &minTime).Duration) + return float64(valueTime.Sub(sc.TypeCtx(), &minTime).Duration) case types.KindString, types.KindBytes: bytes := value.GetBytes() if len(bytes) <= commonPfxLen { @@ -275,19 +275,19 @@ func EnumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []type } fsp := max(lowTime.Fsp(), highTime.Fsp()) var stepSize int64 - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext if lowTime.Type() == mysql.TypeDate { stepSize = 24 * int64(time.Hour) lowTime.SetCoreTime(types.FromDate(lowTime.Year(), lowTime.Month(), lowTime.Day(), 0, 0, 0, 0)) } else { var err error - lowTime, err = lowTime.RoundFrac(sc, fsp) + lowTime, err = lowTime.RoundFrac(typeCtx, fsp) if err != nil { return nil } stepSize = int64(math.Pow10(types.MaxFsp-fsp)) * int64(time.Microsecond) } - remaining := int64(highTime.Sub(sc, &lowTime).Duration)/stepSize + 1 - int64(exclude) + remaining := int64(highTime.Sub(typeCtx, &lowTime).Duration)/stepSize + 1 - int64(exclude) // When `highTime` is much larger than `lowTime`, `remaining` may be overflowed to a negative value. if remaining <= 0 || remaining >= maxNumStep { return nil @@ -295,14 +295,14 @@ func EnumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []type startValue := lowTime var err error if lowExclude { - startValue, err = lowTime.Add(sc, types.Duration{Duration: time.Duration(stepSize), Fsp: fsp}) + startValue, err = lowTime.Add(typeCtx, types.Duration{Duration: time.Duration(stepSize), Fsp: fsp}) if err != nil { return nil } } values := make([]types.Datum, 0, remaining) for i := int64(0); i < remaining; i++ { - value, err := startValue.Add(sc, types.Duration{Duration: time.Duration(i * stepSize), Fsp: fsp}) + value, err := startValue.Add(typeCtx, types.Duration{Duration: time.Duration(i * stepSize), Fsp: fsp}) if err != nil { return nil } diff --git a/pkg/statistics/scalar_test.go b/pkg/statistics/scalar_test.go index 97ed17e438a72..d83f1fbb8f47c 100644 --- a/pkg/statistics/scalar_test.go +++ b/pkg/statistics/scalar_test.go @@ -35,7 +35,7 @@ func getDecimal(value float64) *types.MyDecimal { } func getDuration(value string) types.Duration { - dur, _, _ := types.ParseDuration(nil, value, 0) + dur, _, _ := types.ParseDuration(types.DefaultStmtNoWarningContext, value, 0) return dur } diff --git a/pkg/table/tables/mutation_checker_test.go b/pkg/table/tables/mutation_checker_test.go index bca9cbb53640d..dce5dd12cb743 100644 --- a/pkg/table/tables/mutation_checker_test.go +++ b/pkg/table/tables/mutation_checker_test.go @@ -238,7 +238,7 @@ func TestCheckIndexKeysAndCheckHandleConsistency(t *testing.T) { types.NewStringDatum("some string"), types.NewTimeDatum(now), } - anotherTime, err := now.Add(sessVars.StmtCtx, types.NewDuration(24, 0, 0, 0, 0)) + anotherTime, err := now.Add(sessVars.StmtCtx.TypeCtx(), types.NewDuration(24, 0, 0, 0, 0)) require.Nil(t, err) rowToRemove := []types.Datum{ types.NewStringDatum("old string"), diff --git a/pkg/tablecodec/tablecodec_test.go b/pkg/tablecodec/tablecodec_test.go index 1d2997367f666..79617385232a6 100644 --- a/pkg/tablecodec/tablecodec_test.go +++ b/pkg/tablecodec/tablecodec_test.go @@ -260,11 +260,11 @@ func TestTimeCodec(t *testing.T) { row := make([]types.Datum, colLen) row[0] = types.NewIntDatum(100) row[1] = types.NewBytesDatum([]byte("abc")) - ts, err := types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), + ts, err := types.ParseTimestamp(types.DefaultStmtNoWarningContext, "2016-06-23 11:30:45") require.NoError(t, err) row[2] = types.NewDatum(ts) - du, _, err := types.ParseDuration(nil, "12:59:59.999999", 6) + du, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, "12:59:59.999999", 6) require.NoError(t, err) row[3] = types.NewDatum(du) diff --git a/pkg/types/BUILD.bazel b/pkg/types/BUILD.bazel index 1eae8bd68cc3f..f2be108b18850 100644 --- a/pkg/types/BUILD.bazel +++ b/pkg/types/BUILD.bazel @@ -107,7 +107,6 @@ go_test( "//pkg/testkit/testsetup", "//pkg/util/collate", "//pkg/util/hack", - "//pkg/util/mock", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/pkg/types/context/context.go b/pkg/types/context/context.go index 3bd6e259d3c9d..5a218497c23ac 100644 --- a/pkg/types/context/context.go +++ b/pkg/types/context/context.go @@ -47,6 +47,13 @@ const ( // FlagIgnoreZeroDateErr indicates to ignore the zero-date error. // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_date for details about the "zero-date" error. // If this flag is set, `FlagZeroDateAsWarning` will be ignored. + // + // TODO: `FlagIgnoreZeroDateErr` and `FlagZeroDateAsWarning` don't represent the comments right now, because the + // errors related with `time` and `duration` are handled directly according to SQL mode in many places (expression, + // ddl ...). These error handling will be refined in the future. Currently, the `FlagZeroDateAsWarning` is not used, + // and the `FlagIgnoreZeroDateErr` is used to allow or disallow casting zero to date in `alter` statement. See #25728 + // This flag is the reverse of `NoZeroDate` in #30507. It's set to `true` for most context, and is only set to + // `false` for `alter` (and `create`) statements. FlagIgnoreZeroDateErr // FlagZeroDateAsWarning indicates to append the zero-date error to warnings instead of returning it to user. FlagZeroDateAsWarning @@ -146,6 +153,45 @@ func (f Flags) WithTruncateAsWarning(warn bool) Flags { return f &^ FlagTruncateAsWarning } +// IgnoreZeroInDate indicates whether the flag `FlagIgnoreZeroInData` is set +func (f Flags) IgnoreZeroInDate() bool { + return f&FlagIgnoreZeroInDateErr != 0 +} + +// WithIgnoreZeroInDate returns a new flags with `FlagIgnoreZeroInDateErr` set/unset according to the ignore parameter +func (f Flags) WithIgnoreZeroInDate(ignore bool) Flags { + if ignore { + return f | FlagIgnoreZeroInDateErr + } + return f &^ FlagIgnoreZeroInDateErr +} + +// IgnoreInvalidDateErr indicates whether the flag `FlagIgnoreInvalidDateErr` is set +func (f Flags) IgnoreInvalidDateErr() bool { + return f&FlagIgnoreInvalidDateErr != 0 +} + +// WithIgnoreInvalidDateErr returns a new flags with `FlagIgnoreInvalidDateErr` set/unset according to the ignore parameter +func (f Flags) WithIgnoreInvalidDateErr(ignore bool) Flags { + if ignore { + return f | FlagIgnoreInvalidDateErr + } + return f &^ FlagIgnoreInvalidDateErr +} + +// IgnoreZeroDateErr indicates whether the flag `FlagIgnoreZeroDateErr` is set +func (f Flags) IgnoreZeroDateErr() bool { + return f&FlagIgnoreZeroDateErr != 0 +} + +// WithIgnoreZeroDateErr returns a new flags with `FlagIgnoreZeroDateErr` set/unset according to the ignore parameter +func (f Flags) WithIgnoreZeroDateErr(ignore bool) Flags { + if ignore { + return f | FlagIgnoreZeroDateErr + } + return f &^ FlagIgnoreZeroDateErr +} + // Context provides the information when converting between different types. type Context struct { flags Flags @@ -210,7 +256,7 @@ func (c *Context) AppendWarningFunc() func(err error) { // DefaultStmtFlags is the default flags for statement context with the flag `FlagAllowNegativeToUnsigned` set. // TODO: make DefaultStmtFlags to be equal with StrictFlags, and setting flag `FlagAllowNegativeToUnsigned` // is only for make the code to be equivalent with the old implement during refactoring. -const DefaultStmtFlags = StrictFlags | FlagAllowNegativeToUnsigned +const DefaultStmtFlags = StrictFlags | FlagAllowNegativeToUnsigned | FlagIgnoreZeroDateErr // DefaultStmtNoWarningContext is the context with default statement flags without any other special configuration var DefaultStmtNoWarningContext = NewContext(DefaultStmtFlags, time.UTC, func(_ error) { diff --git a/pkg/types/convert.go b/pkg/types/convert.go index d1de90bdb1000..a48bc48852aac 100644 --- a/pkg/types/convert.go +++ b/pkg/types/convert.go @@ -316,15 +316,15 @@ func StrToUint(ctx Context, str string, isFuncCast bool) (uint64, error) { } // StrToDateTime converts str to MySQL DateTime. -func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, error) { - return ParseTime(sc, str, mysql.TypeDatetime, fsp, nil) +func StrToDateTime(ctx Context, str string, fsp int) (Time, error) { + return ParseTime(ctx, str, mysql.TypeDatetime, fsp, nil) } // StrToDuration converts str to Duration. It returns Duration in normal case, // and returns Time when str is in datetime format. // when isDuration is true, the d is returned, when it is false, the t is returned. // See https://dev.mysql.com/doc/refman/5.5/en/date-and-time-literals.html. -func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duration, t Time, isDuration bool, err error) { +func StrToDuration(ctx Context, str string, fsp int) (d Duration, t Time, isDuration bool, err error) { str = strings.TrimSpace(str) length := len(str) if length > 0 && str[0] == '-' { @@ -336,16 +336,15 @@ func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duratio // Timestamp format is 'YYYYMMDDHHMMSS' or 'YYMMDDHHMMSS', which length is 12. // See #3923, it explains what we do here. if length >= 12 { - t, err = StrToDateTime(sc, str, fsp) + t, err = StrToDateTime(ctx, str, fsp) if err == nil { return d, t, false, nil } } - d, _, err = ParseDuration(sc, str, fsp) + d, _, err = ParseDuration(ctx, str, fsp) if ErrTruncatedWrongVal.Equal(err) { - typeCtx := sc.TypeCtx() - err = typeCtx.HandleTruncate(err) + err = ctx.HandleTruncate(err) } return d, t, true, errors.Trace(err) } @@ -355,7 +354,7 @@ func NumberToDuration(number int64, fsp int) (Duration, error) { if number > TimeMaxValue { // Try to parse DATETIME. if number >= 10000000000 { // '2001-00-00 00-00-00' - if t, err := ParseDatetimeFromNum(nil, number); err == nil { + if t, err := ParseDatetimeFromNum(DefaultStmtNoWarningContext, number); err == nil { dur, err1 := t.ConvertToDuration() return dur, errors.Trace(err1) } diff --git a/pkg/types/convert_test.go b/pkg/types/convert_test.go index 9bdae6188e191..3a633a44c9ecb 100644 --- a/pkg/types/convert_test.go +++ b/pkg/types/convert_test.go @@ -148,15 +148,15 @@ func TestConvertType(t *testing.T) { vv, err := Convert(v, ft) require.NoError(t, err) require.Equal(t, "10:11:12.1", vv.(Duration).String()) - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - vd, err := ParseTime(sc, "2010-10-10 10:11:11.12345", mysql.TypeDatetime, 2, nil) + typeCtx := DefaultStmtNoWarningContext + vd, err := ParseTime(typeCtx, "2010-10-10 10:11:11.12345", mysql.TypeDatetime, 2, nil) require.Equal(t, "2010-10-10 10:11:11.12", vd.String()) require.NoError(t, err) v, err = Convert(vd, ft) require.NoError(t, err) require.Equal(t, "10:11:11.1", v.(Duration).String()) - vt, err := ParseTime(sc, "2010-10-10 10:11:11.12345", mysql.TypeTimestamp, 2, nil) + vt, err := ParseTime(typeCtx, "2010-10-10 10:11:11.12345", mysql.TypeTimestamp, 2, nil) require.Equal(t, "2010-10-10 10:11:11.12", vt.String()) require.NoError(t, err) v, err = Convert(vt, ft) @@ -248,11 +248,11 @@ func TestConvertType(t *testing.T) { // Test Datum.ToDecimal with bad number. d := NewDatum("hello") - _, err = d.ToDecimal(sc.TypeCtxOrDefault()) + _, err = d.ToDecimal(typeCtx) require.Truef(t, terror.ErrorEqual(err, ErrTruncatedWrongVal), "err %v", err) - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - v, err = d.ToDecimal(sc.TypeCtxOrDefault()) + typeCtx = typeCtx.WithFlags(typeCtx.Flags().WithIgnoreTruncateErr(true)) + v, err = d.ToDecimal(typeCtx) require.NoError(t, err) require.Equal(t, "0", v.(*MyDecimal).String()) @@ -266,7 +266,7 @@ func TestConvertType(t *testing.T) { require.Equal(t, int64(2015), v) _, err = Convert(1800, ft) require.Error(t, err) - dt, err := ParseDate(nil, "2015-11-11") + dt, err := ParseDate(DefaultStmtNoWarningContext, "2015-11-11") require.NoError(t, err) v, err = Convert(dt, ft) require.NoError(t, err) @@ -345,11 +345,11 @@ func TestConvertToString(t *testing.T) { testToString(t, Enum{Name: "a", Value: 1}, "a") testToString(t, Set{Name: "a", Value: 1}, "a") - t1, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) + t1, err := ParseTime(DefaultStmtNoWarningContext, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) require.NoError(t, err) testToString(t, t1, "2011-11-10 11:11:11.999999") - td, _, err := ParseDuration(nil, "11:11:11.999999", 6) + td, _, err := ParseDuration(DefaultStmtNoWarningContext, "11:11:11.999999", 6) require.NoError(t, err) testToString(t, td, "11:11:11.999999") @@ -1210,7 +1210,6 @@ func TestNumberToDuration(t *testing.T) { } func TestStrToDuration(t *testing.T) { - sc := stmtctx.NewStmtCtx() var tests = []struct { str string fsp int @@ -1224,7 +1223,7 @@ func TestStrToDuration(t *testing.T) { {"00:00:00", 0, true}, } for _, tt := range tests { - _, _, isDuration, err := StrToDuration(sc, tt.str, tt.fsp) + _, _, isDuration, err := StrToDuration(DefaultStmtNoWarningContext, tt.str, tt.fsp) require.NoError(t, err) require.Equal(t, tt.isDuration, isDuration) } diff --git a/pkg/types/datum.go b/pkg/types/datum.go index 6d4cec1f944a2..1e7ff4968dd88 100644 --- a/pkg/types/datum.go +++ b/pkg/types/datum.go @@ -664,13 +664,13 @@ func (d *Datum) Compare(sc *stmtctx.StatementContext, ad *Datum, comparer collat case KindFloat32, KindFloat64: return d.compareFloat64(typeCtx, ad.GetFloat64()) case KindString: - return d.compareString(sc, ad.GetString(), comparer) + return d.compareString(typeCtx, ad.GetString(), comparer) case KindBytes: - return d.compareString(sc, ad.GetString(), comparer) + return d.compareString(typeCtx, ad.GetString(), comparer) case KindMysqlDecimal: return d.compareMysqlDecimal(sc, ad.GetMysqlDecimal()) case KindMysqlDuration: - return d.compareMysqlDuration(sc, ad.GetMysqlDuration()) + return d.compareMysqlDuration(typeCtx, ad.GetMysqlDuration()) case KindMysqlEnum: return d.compareMysqlEnum(typeCtx, ad.GetMysqlEnum(), comparer) case KindBinaryLiteral, KindMysqlBit: @@ -680,7 +680,7 @@ func (d *Datum) Compare(sc *stmtctx.StatementContext, ad *Datum, comparer collat case KindMysqlJSON: return d.compareMysqlJSON(sc, ad.GetMysqlJSON()) case KindMysqlTime: - return d.compareMysqlTime(sc, ad.GetMysqlTime()) + return d.compareMysqlTime(typeCtx, ad.GetMysqlTime()) default: return 0, nil } @@ -757,9 +757,7 @@ func (d *Datum) compareFloat64(ctx Context, f float64) (int, error) { } } -func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer collate.Collator) (int, error) { - typeCtx := sc.TypeCtxOrDefault() - +func (d *Datum) compareString(ctx Context, s string, comparer collate.Collator) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil @@ -769,13 +767,13 @@ func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer c return comparer.Compare(d.GetString(), s), nil case KindMysqlDecimal: dec := new(MyDecimal) - err := typeCtx.HandleTruncate(dec.FromString(hack.Slice(s))) + err := ctx.HandleTruncate(dec.FromString(hack.Slice(s))) return d.GetMysqlDecimal().Compare(dec), errors.Trace(err) case KindMysqlTime: - dt, err := ParseDatetime(sc, s) + dt, err := ParseDatetime(ctx, s) return d.GetMysqlTime().Compare(dt), errors.Trace(err) case KindMysqlDuration: - dur, _, err := ParseDuration(sc, s, MaxFsp) + dur, _, err := ParseDuration(ctx, s, MaxFsp) return d.GetMysqlDuration().Compare(dur), errors.Trace(err) case KindMysqlSet: return comparer.Compare(d.GetMysqlSet().String(), s), nil @@ -784,11 +782,11 @@ func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer c case KindBinaryLiteral, KindMysqlBit: return comparer.Compare(d.GetBinaryLiteral4Cmp().ToString(), s), nil default: - fVal, err := StrToFloat(sc.TypeCtxOrDefault(), s, false) + fVal, err := StrToFloat(ctx, s, false) if err != nil { return 0, errors.Trace(err) } - return d.compareFloat64(sc.TypeCtxOrDefault(), fVal) + return d.compareFloat64(ctx, fVal) } } @@ -815,7 +813,7 @@ func (d *Datum) compareMysqlDecimal(sc *stmtctx.StatementContext, dec *MyDecimal } } -func (d *Datum) compareMysqlDuration(sc *stmtctx.StatementContext, dur Duration) (int, error) { +func (d *Datum) compareMysqlDuration(ctx Context, dur Duration) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil @@ -824,10 +822,10 @@ func (d *Datum) compareMysqlDuration(sc *stmtctx.StatementContext, dur Duration) case KindMysqlDuration: return d.GetMysqlDuration().Compare(dur), nil case KindString, KindBytes: - dDur, _, err := ParseDuration(sc, d.GetString(), MaxFsp) + dDur, _, err := ParseDuration(ctx, d.GetString(), MaxFsp) return dDur.Compare(dur), errors.Trace(err) default: - return d.compareFloat64(sc.TypeCtxOrDefault(), dur.Seconds()) + return d.compareFloat64(ctx, dur.Seconds()) } } @@ -890,14 +888,14 @@ func (d *Datum) compareMysqlJSON(_ *stmtctx.StatementContext, target BinaryJSON) return CompareBinaryJSON(origin, target), nil } -func (d *Datum) compareMysqlTime(sc *stmtctx.StatementContext, time Time) (int, error) { +func (d *Datum) compareMysqlTime(ctx Context, time Time) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil case KindMaxValue: return 1, nil case KindString, KindBytes: - dt, err := ParseDatetime(sc, d.GetString()) + dt, err := ParseDatetime(ctx, d.GetString()) return dt.Compare(time), errors.Trace(err) case KindMysqlTime: return d.GetMysqlTime().Compare(time), nil @@ -906,13 +904,15 @@ func (d *Datum) compareMysqlTime(sc *stmtctx.StatementContext, time Time) (int, if err != nil { return 0, errors.Trace(err) } - return d.compareFloat64(sc.TypeCtxOrDefault(), fVal) + return d.compareFloat64(ctx, fVal) } } // ConvertTo converts a datum to the target field type. // change this method need sync modification to type2Kind in rowcodec/types.go func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + typeCtx := sc.TypeCtxOrDefault() + if d.k == KindNull { return Datum{}, nil } @@ -929,9 +929,9 @@ func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datu mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: return d.convertToString(sc, target) case mysql.TypeTimestamp: - return d.convertToMysqlTimestamp(sc, target) + return d.convertToMysqlTimestamp(typeCtx, target) case mysql.TypeDatetime, mysql.TypeDate: - return d.convertToMysqlTime(sc, target) + return d.convertToMysqlTime(typeCtx, target) case mysql.TypeDuration: return d.convertToMysqlDuration(sc, target) case mysql.TypeNewDecimal: @@ -1254,7 +1254,7 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( return ret, nil } -func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlTimestamp(ctx Context, target *FieldType) (Datum, error) { var ( ret Datum t Time @@ -1266,26 +1266,26 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi } switch d.k { case KindMysqlTime: - t, err = d.GetMysqlTime().Convert(sc, target.GetType()) + t, err = d.GetMysqlTime().Convert(ctx, target.GetType()) if err != nil { // t might be an invalid Timestamp, but should still be comparable, since same representation (KindMysqlTime) ret.SetMysqlTime(t) return ret, errors.Trace(ErrWrongValue.GenWithStackByArgs(TimestampStr, t.String())) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindMysqlDuration: - t, err = d.GetMysqlDuration().ConvertToTime(sc, mysql.TypeTimestamp) + t, err = d.GetMysqlDuration().ConvertToTime(ctx, mysql.TypeTimestamp) if err != nil { ret.SetMysqlTime(t) return ret, errors.Trace(err) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindString, KindBytes: - t, err = ParseTime(sc, d.GetString(), mysql.TypeTimestamp, fsp, nil) + t, err = ParseTime(ctx, d.GetString(), mysql.TypeTimestamp, fsp, nil) case KindInt64: - t, err = ParseTimeFromNum(sc, d.GetInt64(), mysql.TypeTimestamp, fsp) + t, err = ParseTimeFromNum(ctx, d.GetInt64(), mysql.TypeTimestamp, fsp) case KindMysqlDecimal: - t, err = ParseTimeFromFloatString(sc, d.GetMysqlDecimal().String(), mysql.TypeTimestamp, fsp) + t, err = ParseTimeFromFloatString(ctx, d.GetMysqlDecimal().String(), mysql.TypeTimestamp, fsp) case KindMysqlJSON: j := d.GetMysqlJSON() var s string @@ -1294,7 +1294,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi ret.SetMysqlTime(t) return ret, err } - t, err = ParseTime(sc, s, mysql.TypeTimestamp, fsp, nil) + t, err = ParseTime(ctx, s, mysql.TypeTimestamp, fsp, nil) default: return invalidConv(d, mysql.TypeTimestamp) } @@ -1306,7 +1306,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi return ret, nil } -func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlTime(ctx Context, target *FieldType) (Datum, error) { tp := target.GetType() fsp := DefaultFsp if target.GetDecimal() != UnspecifiedLength { @@ -1319,32 +1319,32 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy ) switch d.k { case KindMysqlTime: - t, err = d.GetMysqlTime().Convert(sc, tp) + t, err = d.GetMysqlTime().Convert(ctx, tp) if err != nil { ret.SetMysqlTime(t) return ret, errors.Trace(err) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindMysqlDuration: - t, err = d.GetMysqlDuration().ConvertToTime(sc, tp) + t, err = d.GetMysqlDuration().ConvertToTime(ctx, tp) if err != nil { ret.SetMysqlTime(t) return ret, errors.Trace(err) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindMysqlDecimal: - t, err = ParseTimeFromFloatString(sc, d.GetMysqlDecimal().String(), tp, fsp) + t, err = ParseTimeFromFloatString(ctx, d.GetMysqlDecimal().String(), tp, fsp) case KindString, KindBytes: - t, err = ParseTime(sc, d.GetString(), tp, fsp, nil) + t, err = ParseTime(ctx, d.GetString(), tp, fsp, nil) case KindInt64: - t, err = ParseTimeFromNum(sc, d.GetInt64(), tp, fsp) + t, err = ParseTimeFromNum(ctx, d.GetInt64(), tp, fsp) case KindUint64: intOverflow64 := d.GetInt64() < 0 if intOverflow64 { uNum := strconv.FormatUint(d.GetUint64(), 10) t, err = ZeroDate, ErrWrongValue.GenWithStackByArgs(TimeStr, uNum) } else { - t, err = ParseTimeFromNum(sc, d.GetInt64(), tp, fsp) + t, err = ParseTimeFromNum(ctx, d.GetInt64(), tp, fsp) } case KindMysqlJSON: j := d.GetMysqlJSON() @@ -1354,7 +1354,7 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy ret.SetMysqlTime(t) return ret, err } - t, err = ParseTime(sc, s, tp, fsp, nil) + t, err = ParseTime(ctx, s, tp, fsp, nil) default: return invalidConv(d, tp) } @@ -1370,6 +1370,8 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy } func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + typeCtx := sc.TypeCtx() + tp := target.GetType() fsp := DefaultFsp if target.GetDecimal() != UnspecifiedLength { @@ -1413,13 +1415,13 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie if timeNum < -MaxDuration { return ret, ErrWrongValue.GenWithStackByArgs(TimeStr, timeStr) } - t, _, err := ParseDuration(sc, timeStr, fsp) + t, _, err := ParseDuration(typeCtx, timeStr, fsp) ret.SetMysqlDuration(t) if err != nil { return ret, errors.Trace(err) } case KindString, KindBytes: - t, _, err := ParseDuration(sc, d.GetString(), fsp) + t, _, err := ParseDuration(typeCtx, d.GetString(), fsp) ret.SetMysqlDuration(t) if err != nil { return ret, errors.Trace(err) @@ -1430,7 +1432,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie if err != nil { return ret, errors.Trace(err) } - t, _, err := ParseDuration(sc, s, fsp) + t, _, err := ParseDuration(typeCtx, s, fsp) ret.SetMysqlDuration(t) if err != nil { return ret, errors.Trace(err) @@ -1877,7 +1879,7 @@ func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, e case KindMysqlTime: // 2011-11-10 11:11:11.999999 -> 20111110111112 // 2011-11-10 11:59:59.999999 -> 20111110120000 - t, err := d.GetMysqlTime().RoundFrac(sc, DefaultFsp) + t, err := d.GetMysqlTime().RoundFrac(sc.TypeCtxOrDefault(), DefaultFsp) if err != nil { return 0, errors.Trace(err) } diff --git a/pkg/types/datum_test.go b/pkg/types/datum_test.go index 42e1905d5ad74..5abfda17cc064 100644 --- a/pkg/types/datum_test.go +++ b/pkg/types/datum_test.go @@ -93,11 +93,11 @@ func TestToBool(t *testing.T) { testDatumToBool(t, CreateBinaryJSON(true), 1) testDatumToBool(t, CreateBinaryJSON(false), 1) testDatumToBool(t, CreateBinaryJSON(""), 1) - t1, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) + t1, err := ParseTime(DefaultStmtNoWarningContext, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) require.NoError(t, err) testDatumToBool(t, t1, 1) - td, _, err := ParseDuration(nil, "11:11:11.999999", 6) + td, _, err := ParseDuration(DefaultStmtNoWarningContext, "11:11:11.999999", 6) require.NoError(t, err) testDatumToBool(t, td, 1) @@ -135,11 +135,11 @@ func TestToInt64(t *testing.T) { testDatumToInt64(t, Set{Name: "a", Value: 1}, int64(1)) testDatumToInt64(t, CreateBinaryJSON(int64(3)), int64(3)) - t1, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 0, nil) + t1, err := ParseTime(DefaultStmtNoWarningContext, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 0, nil) require.NoError(t, err) testDatumToInt64(t, t1, int64(20111110111112)) - td, _, err := ParseDuration(nil, "11:11:11.999999", 6) + td, _, err := ParseDuration(DefaultStmtNoWarningContext, "11:11:11.999999", 6) require.NoError(t, err) testDatumToInt64(t, td, int64(111112)) @@ -225,7 +225,7 @@ func TestConvertToFloat(t *testing.T) { } func mustParseTime(s string, tp byte, fsp int) Time { - t, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), s, tp, fsp, nil) + t, err := ParseTime(DefaultStmtNoWarningContext, s, tp, fsp, nil) if err != nil { panic("ParseTime fail") } diff --git a/pkg/types/format_test.go b/pkg/types/format_test.go index b48cec6212acf..a63d8f161ee7e 100644 --- a/pkg/types/format_test.go +++ b/pkg/types/format_test.go @@ -16,16 +16,15 @@ package types_test import ( "testing" + "time" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" ) func TestTimeFormatMethod(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) tblDate := []struct { Input string Format string @@ -69,7 +68,7 @@ func TestTimeFormatMethod(t *testing.T) { }, } for i, tt := range tblDate { - tm, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, 6, nil) + tm, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, 6, nil) require.NoErrorf(t, err, "Parse time fail: %s", tt.Input) str, err := tm.DateFormat(tt.Format) @@ -79,8 +78,7 @@ func TestTimeFormatMethod(t *testing.T) { } func TestStrToDate(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) tests := []struct { input string format string @@ -157,9 +155,9 @@ func TestStrToDate(t *testing.T) { {"30/Feb/2016 12:34:56.1234", "%d/%b/%Y %H:%i:%S.%f", types.FromDate(2016, 2, 30, 12, 34, 56, 123400)}, // Feb 30th } for i, tt := range tests { - sc.AllowInvalidDate = true + typeCtx = typeCtx.WithFlags(typeCtx.Flags().WithIgnoreInvalidDateErr(true)) var time types.Time - require.Truef(t, time.StrToDate(sc, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) + require.Truef(t, time.StrToDate(typeCtx, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) require.Equalf(t, tt.expect, time.CoreTime(), "no.%d failed input=%s format=%s", i, tt.input, tt.format) } @@ -192,8 +190,8 @@ func TestStrToDate(t *testing.T) { {"11:13:56a", "%r"}, // EOF while parsing "AM"/"PM" } for i, tt := range errTests { - sc.AllowInvalidDate = false + typeCtx = typeCtx.WithFlags(typeCtx.Flags().WithIgnoreInvalidDateErr(false)) var time types.Time - require.Falsef(t, time.StrToDate(sc, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) + require.Falsef(t, time.StrToDate(typeCtx, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) } } diff --git a/pkg/types/time.go b/pkg/types/time.go index 35e11aacacc1c..3cc91576a8e61 100644 --- a/pkg/types/time.go +++ b/pkg/types/time.go @@ -453,7 +453,7 @@ func (t Time) FillNumber(dec *MyDecimal) { } // Convert converts t with type tp. -func (t Time) Convert(sc *stmtctx.StatementContext, tp uint8) (Time, error) { +func (t Time) Convert(ctx Context, tp uint8) (Time, error) { t1 := t if t.Type() == tp || t.IsZero() { t1.SetType(tp) @@ -461,7 +461,7 @@ func (t Time) Convert(sc *stmtctx.StatementContext, tp uint8) (Time, error) { } t1.SetType(tp) - err := t1.check(sc, nil) + err := t1.check(ctx, nil) return t1, errors.Trace(err) } @@ -490,9 +490,9 @@ func (t Time) Compare(o Time) int { // CompareString is like Compare, // but parses string to Time then compares. -func (t Time) CompareString(sc *stmtctx.StatementContext, str string) (int, error) { +func (t Time) CompareString(ctx Context, str string) (int, error) { // use MaxFsp to parse the string - o, err := ParseTime(sc, str, t.Type(), MaxFsp, nil) + o, err := ParseTime(ctx, str, t.Type(), MaxFsp, nil) if err != nil { return 0, errors.Trace(err) } @@ -507,7 +507,7 @@ func roundTime(t gotime.Time, fsp int) gotime.Time { } // RoundFrac rounds the fraction part of a time-type value according to `fsp`. -func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) { +func (t Time) RoundFrac(ctx Context, fsp int) (Time, error) { if t.Type() == mysql.TypeDate || t.IsZero() { // date type has no fsp return t, nil @@ -524,13 +524,13 @@ func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) { } var nt CoreTime - if t1, err := t.GoTime(sc.TimeZone()); err == nil { + if t1, err := t.GoTime(ctx.Location()); err == nil { t1 = roundTime(t1, fsp) nt = FromGoTime(t1) } else { // Take the hh:mm:ss part out to avoid handle month or day = 0. hour, minute, second, microsecond := t.Hour(), t.Minute(), t.Second(), t.Microsecond() - t1 := gotime.Date(1, 1, 1, hour, minute, second, microsecond*1000, sc.TimeZone()) + t1 := gotime.Date(1, 1, 1, hour, minute, second, microsecond*1000, ctx.Location()) t2 := roundTime(t1, fsp) hour, minute, second = t2.Clock() microsecond = t2.Nanosecond() / 1000 @@ -674,18 +674,13 @@ func (t *Time) FromPackedUint(packed uint64) error { // check whether t matches valid Time format. // If allowZeroInDate is false, it returns ErrZeroDate when month or day is zero. // FIXME: See https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sqlmode_no_zero_in_date -func (t Time) check(sc *stmtctx.StatementContext, explicitTz *gotime.Location) error { - allowZeroInDate := false - allowInvalidDate := false - // We should avoid passing sc as nil here as far as possible. - if sc != nil { - allowZeroInDate = sc.IgnoreZeroInDate - allowInvalidDate = sc.AllowInvalidDate - } +func (t Time) check(ctx Context, explicitTz *gotime.Location) error { + allowZeroInDate := ctx.Flags().IgnoreZeroInDate() + allowInvalidDate := ctx.Flags().IgnoreInvalidDateErr() var err error switch t.Type() { case mysql.TypeTimestamp: - err = checkTimestampType(sc, t.coreTime, explicitTz) + err = checkTimestampType(ctx, t.coreTime, explicitTz) case mysql.TypeDatetime, mysql.TypeDate: err = checkDatetimeType(t.coreTime, allowZeroInDate, allowInvalidDate) } @@ -693,18 +688,18 @@ func (t Time) check(sc *stmtctx.StatementContext, explicitTz *gotime.Location) e } // Check if 't' is valid -func (t *Time) Check(sc *stmtctx.StatementContext) error { - return t.check(sc, nil) +func (t *Time) Check(ctx Context) error { + return t.check(ctx, nil) } // Sub subtracts t1 from t, returns a duration value. // Note that sub should not be done on different time types. -func (t *Time) Sub(sc *stmtctx.StatementContext, t1 *Time) Duration { +func (t *Time) Sub(ctx Context, t1 *Time) Duration { var duration gotime.Duration if t.Type() == mysql.TypeTimestamp && t1.Type() == mysql.TypeTimestamp { - a, err := t.GoTime(sc.TimeZone()) + a, err := t.GoTime(ctx.Location()) terror.Log(errors.Trace(err)) - b, err := t1.GoTime(sc.TimeZone()) + b, err := t1.GoTime(ctx.Location()) terror.Log(errors.Trace(err)) duration = a.Sub(b) } else { @@ -727,7 +722,7 @@ func (t *Time) Sub(sc *stmtctx.StatementContext, t1 *Time) Duration { } // Add adds d to t, returns the result time value. -func (t *Time) Add(sc *stmtctx.StatementContext, d Duration) (Time, error) { +func (t *Time) Add(ctx Context, d Duration) (Time, error) { seconds, microseconds, _ := calcTimeDurationDiff(t.coreTime, d) days := seconds / secondsIn24Hour year, month, day := getDateFromDaynr(uint(days)) @@ -747,7 +742,7 @@ func (t *Time) Add(sc *stmtctx.StatementContext, d Duration) (Time, error) { fsp = d.Fsp } ret := NewTime(tm.coreTime, t.Type(), fsp) - return ret, ret.Check(sc) + return ret, ret.Check(ctx) } // TimestampDiff returns t2 - t1 where t1 and t2 are date or datetime expressions. @@ -953,7 +948,7 @@ func splitDateTime(format string) (seps []string, fracStr string, hasTZ bool, tz } // See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-literals.html. -func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { +func parseDatetime(ctx Context, str string, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { var ( year, month, day, hour, minute, second, deltaHour, deltaMinute int fracStr string @@ -964,7 +959,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo seps, fracStr, hasTZ, tzSign, tzHour, tzSep, tzMinute, truncatedOrIncorrect := splitDateTime(str) if truncatedOrIncorrect { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) } /* if we have timezone parsed, there are the following cases to be considered, however some of them are wrongly parsed, and we should consider absorb them back to seps. @@ -1045,12 +1040,12 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo l := len(seps[0]) // Values specified as numbers if isFloat { - numOfTime, err := StrToInt(sc.TypeCtxOrDefault(), seps[0], false) + numOfTime, err := StrToInt(ctx, seps[0], false) if err != nil { return ZeroDatetime, errors.Trace(ErrWrongValue.GenWithStackByArgs(DateTimeStr, str)) } - dateTime, err := ParseDatetimeFromNum(sc, numOfTime) + dateTime, err := ParseDatetimeFromNum(ctx, numOfTime) if err != nil { return ZeroDatetime, errors.Trace(ErrWrongValue.GenWithStackByArgs(DateTimeStr, str)) } @@ -1126,8 +1121,8 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo } truncatedOrIncorrect = err != nil } - if truncatedOrIncorrect && sc != nil { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) + if truncatedOrIncorrect { + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) err = nil } case 2: @@ -1150,9 +1145,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo // For case like `2020-05-28 23:59:59 00:00:00`, the seps should be > 6, the reluctant parts should be truncated. seps = seps[:6] // YYYY-MM-DD HH-MM-SS - if sc != nil { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) - } + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) err = scanTimeArgs(seps, &year, &month, &day, &hour, &minute, &second) hhmmss = true } @@ -1194,7 +1187,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo if explicitTz != nil { t1, err = tmp.GoTime(explicitTz) } else { - t1, err = tmp.GoTime(sc.TimeZone()) + t1, err = tmp.GoTime(ctx.Location()) } if err != nil { return ZeroDatetime, errors.Trace(err) @@ -1231,7 +1224,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo if explicitTz != nil { t1 = t1.In(explicitTz) } else { - t1 = t1.In(sc.TimeZone()) + t1 = t1.In(ctx.Location()) } tmp = FromGoTime(t1) } @@ -1500,24 +1493,24 @@ func (d Duration) ToNumber() *MyDecimal { // ConvertToTime converts duration to Time. // Tp is TypeDatetime, TypeTimestamp and TypeDate. -func (d Duration) ConvertToTime(sc *stmtctx.StatementContext, tp uint8) (Time, error) { - year, month, day := gotime.Now().In(sc.TimeZone()).Date() +func (d Duration) ConvertToTime(ctx Context, tp uint8) (Time, error) { + year, month, day := gotime.Now().In(ctx.Location()).Date() datePart := FromDate(year, int(month), day, 0, 0, 0, 0) mixDateAndDuration(&datePart, d) t := NewTime(datePart, mysql.TypeDatetime, d.Fsp) - return t.Convert(sc, tp) + return t.Convert(ctx, tp) } // ConvertToTimeWithTimestamp converts duration to Time by system timestamp. // Tp is TypeDatetime, TypeTimestamp and TypeDate. -func (d Duration) ConvertToTimeWithTimestamp(sc *stmtctx.StatementContext, tp uint8, ts gotime.Time) (Time, error) { - year, month, day := ts.In(sc.TimeZone()).Date() +func (d Duration) ConvertToTimeWithTimestamp(ctx Context, tp uint8, ts gotime.Time) (Time, error) { + year, month, day := ts.In(ctx.Location()).Date() datePart := FromDate(year, int(month), day, 0, 0, 0, 0) mixDateAndDuration(&datePart, d) t := NewTime(datePart, mysql.TypeDatetime, d.Fsp) - return t.Convert(sc, tp) + return t.Convert(ctx, tp) } // RoundFrac rounds fractional seconds precision with new fsp and returns a new one. @@ -1559,9 +1552,9 @@ func (d Duration) Compare(o Duration) int { // CompareString is like Compare, // but parses str to Duration then compares. -func (d Duration) CompareString(sc *stmtctx.StatementContext, str string) (int, error) { +func (d Duration) CompareString(ctx Context, str string) (int, error) { // use MaxFsp to parse the string - o, _, err := ParseDuration(sc, str, MaxFsp) + o, _, err := ParseDuration(ctx, str, MaxFsp) if err != nil { return 0, err } @@ -1813,7 +1806,7 @@ func canFallbackToDateTime(str string) bool { // ParseDuration parses the time form a formatted string with a fractional seconds part, // returns the duration type Time value and bool to indicate whether the result is null. // See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html -func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, bool, error) { +func ParseDuration(ctx Context, str string, fsp int) (Duration, bool, error) { rest := strings.TrimSpace(str) d, isNull, err := matchDuration(rest, fsp) if err == nil { @@ -1823,7 +1816,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, return d, isNull, ErrTruncatedWrongVal.GenWithStackByArgs("time", str) } - datetime, err := ParseDatetime(sc, rest) + datetime, err := ParseDatetime(ctx, rest) if err != nil { return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str) } @@ -1833,7 +1826,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str) } - d, err = d.RoundFrac(fsp, sc.TimeZone()) + d, err = d.RoundFrac(fsp, ctx.Location()) return d, false, err } @@ -1868,7 +1861,7 @@ func splitDuration(t gotime.Duration) (sign int, hours int, minutes int, seconds var maxDaysInMonth = []int{31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} -func getTime(sc *stmtctx.StatementContext, num, originNum int64, tp byte) (Time, error) { +func getTime(ctx Context, num, originNum int64, tp byte) (Time, error) { s1 := num / 1000000 s2 := num - s1*1000000 @@ -1888,14 +1881,14 @@ func getTime(sc *stmtctx.StatementContext, num, originNum int64, tp byte) (Time, return ZeroDatetime, errors.Trace(ErrWrongValue.GenWithStackByArgs(TimeStr, numStr)) } t := NewTime(ct, tp, DefaultFsp) - err := t.check(sc, nil) + err := t.check(ctx, nil) return t, errors.Trace(err) } // parseDateTimeFromNum parses date time from num. // See number_to_datetime function. // https://github.com/mysql/mysql-server/blob/5.7/sql-common/my_time.c -func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { +func parseDateTimeFromNum(ctx Context, num int64) (Time, error) { t := ZeroDate // Check zero. if num == 0 { @@ -1906,7 +1899,7 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // Check datetime type. if num >= 10000101000000 { t.SetType(mysql.TypeDatetime) - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check MMDD. @@ -1918,7 +1911,7 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDD, year: 2000-2069 if num <= (70-1)*10000+1231 { num = (num + 20000000) * 1000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check YYMMDD. @@ -1930,13 +1923,13 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDD, year: 1970-1999 if num <= 991231 { num = (num + 19000000) * 1000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Adjust hour/min/second. if num <= 99991231 { num = num * 1000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check MMDDHHMMSS. @@ -1951,7 +1944,7 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDDHHMMSS, 2000-2069 if num <= 69*10000000000+1231235959 { num = num + 20000000000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check YYYYMMDDHHMMSS. @@ -1963,10 +1956,10 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDDHHMMSS, 1970-1999 if num <= 991231235959 { num = num + 19000000000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // ParseTime parses a formatted string with type tp and specific fsp. @@ -1981,51 +1974,51 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // The valid timestamp range is from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'. // The valid date range is from '1000-01-01' to '9999-12-31' // explicitTz is used to handle a data race of timeZone, refer to https://github.com/pingcap/tidb/issues/40710. It only works for timestamp now, be careful to use it! -func ParseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int, explicitTz *gotime.Location) (Time, error) { - return parseTime(sc, str, tp, fsp, false, explicitTz) +func ParseTime(ctx Context, str string, tp byte, fsp int, explicitTz *gotime.Location) (Time, error) { + return parseTime(ctx, str, tp, fsp, false, explicitTz) } // ParseTimeFromFloatString is similar to ParseTime, except that it's used to parse a float converted string. -func ParseTimeFromFloatString(sc *stmtctx.StatementContext, str string, tp byte, fsp int) (Time, error) { +func ParseTimeFromFloatString(ctx Context, str string, tp byte, fsp int) (Time, error) { // MySQL compatibility: 0.0 should not be converted to null, see #11203 if len(str) >= 3 && str[:3] == "0.0" { return NewTime(ZeroCoreTime, tp, DefaultFsp), nil } - return parseTime(sc, str, tp, fsp, true, nil) + return parseTime(ctx, str, tp, fsp, true, nil) } -func parseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { +func parseTime(ctx Context, str string, tp byte, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { fsp, err := CheckFsp(fsp) if err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } - t, err := parseDatetime(sc, str, fsp, isFloat, explicitTz) + t, err := parseDatetime(ctx, str, fsp, isFloat, explicitTz) if err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } t.SetType(tp) - if err = t.check(sc, explicitTz); err != nil { + if err = t.check(ctx, explicitTz); err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } return t, nil } // ParseDatetime is a helper function wrapping ParseTime with datetime type and default fsp. -func ParseDatetime(sc *stmtctx.StatementContext, str string) (Time, error) { - return ParseTime(sc, str, mysql.TypeDatetime, GetFsp(str), nil) +func ParseDatetime(ctx Context, str string) (Time, error) { + return ParseTime(ctx, str, mysql.TypeDatetime, GetFsp(str), nil) } // ParseTimestamp is a helper function wrapping ParseTime with timestamp type and default fsp. -func ParseTimestamp(sc *stmtctx.StatementContext, str string) (Time, error) { - return ParseTime(sc, str, mysql.TypeTimestamp, GetFsp(str), nil) +func ParseTimestamp(ctx Context, str string) (Time, error) { + return ParseTime(ctx, str, mysql.TypeTimestamp, GetFsp(str), nil) } // ParseDate is a helper function wrapping ParseTime with date type. -func ParseDate(sc *stmtctx.StatementContext, str string) (Time, error) { +func ParseDate(ctx Context, str string) (Time, error) { // date has no fractional seconds precision - return ParseTime(sc, str, mysql.TypeDate, MinFsp, nil) + return ParseTime(ctx, str, mysql.TypeDate, MinFsp, nil) } // ParseTimeFromYear parse a `YYYY` formed year to corresponded Datetime type. @@ -2041,11 +2034,11 @@ func ParseTimeFromYear(_ *stmtctx.StatementContext, year int64) (Time, error) { // ParseTimeFromNum parses a formatted int64, // returns the value which type is tp. -func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int) (Time, error) { +func ParseTimeFromNum(ctx Context, num int64, tp byte, fsp int) (Time, error) { // MySQL compatibility: 0 should not be converted to null, see #11203 if num == 0 { zt := NewTime(ZeroCoreTime, tp, DefaultFsp) - if sc != nil && sc.InCreateOrAlterStmt && !sc.TypeFlags().TruncateAsWarning() && sc.NoZeroDate { + if !ctx.Flags().IgnoreZeroDateErr() { switch tp { case mysql.TypeTimestamp: return zt, ErrTruncatedWrongVal.GenWithStackByArgs(TimestampStr, "0") @@ -2062,33 +2055,33 @@ func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int) return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } - t, err := parseDateTimeFromNum(sc, num) + t, err := parseDateTimeFromNum(ctx, num) if err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } t.SetType(tp) t.SetFsp(fsp) - if err := t.check(sc, nil); err != nil { + if err := t.check(ctx, nil); err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } return t, nil } // ParseDatetimeFromNum is a helper function wrapping ParseTimeFromNum with datetime type and default fsp. -func ParseDatetimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { - return ParseTimeFromNum(sc, num, mysql.TypeDatetime, DefaultFsp) +func ParseDatetimeFromNum(ctx Context, num int64) (Time, error) { + return ParseTimeFromNum(ctx, num, mysql.TypeDatetime, DefaultFsp) } // ParseTimestampFromNum is a helper function wrapping ParseTimeFromNum with timestamp type and default fsp. -func ParseTimestampFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { - return ParseTimeFromNum(sc, num, mysql.TypeTimestamp, DefaultFsp) +func ParseTimestampFromNum(ctx Context, num int64) (Time, error) { + return ParseTimeFromNum(ctx, num, mysql.TypeTimestamp, DefaultFsp) } // ParseDateFromNum is a helper function wrapping ParseTimeFromNum with date type. -func ParseDateFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { +func ParseDateFromNum(ctx Context, num int64) (Time, error) { // date has no fractional seconds precision - return ParseTimeFromNum(sc, num, mysql.TypeDate, MinFsp) + return ParseTimeFromNum(ctx, num, mysql.TypeDate, MinFsp) } // TimeFromDays Converts a day number to a date. @@ -2158,17 +2151,13 @@ func checkMonthDay(year, month, day int, allowInvalidDate bool) error { return nil } -func checkTimestampType(sc *stmtctx.StatementContext, t CoreTime, explicitTz *gotime.Location) error { +func checkTimestampType(ctx Context, t CoreTime, explicitTz *gotime.Location) error { if compareTime(t, ZeroCoreTime) == 0 { return nil } - if sc == nil { - return errors.New("statementContext is required during checkTimestampType") - } - var checkTime CoreTime - tz := sc.TimeZone() + tz := ctx.Location() if explicitTz != nil { tz = explicitTz } @@ -2650,16 +2639,16 @@ func IsDateFormat(format string) bool { } // ParseTimeFromInt64 parses mysql time value from int64. -func ParseTimeFromInt64(sc *stmtctx.StatementContext, num int64) (Time, error) { - return parseDateTimeFromNum(sc, num) +func ParseTimeFromInt64(ctx Context, num int64) (Time, error) { + return parseDateTimeFromNum(ctx, num) } // ParseTimeFromFloat64 parses mysql time value from float64. // It is used in scenarios that distinguish date and datetime, e.g., date_add/sub() with first argument being real. // For example, 20010203 parses to date (no HMS) and 20010203040506 parses to datetime (with HMS). -func ParseTimeFromFloat64(sc *stmtctx.StatementContext, f float64) (Time, error) { +func ParseTimeFromFloat64(ctx Context, f float64) (Time, error) { intPart := int64(f) - t, err := parseDateTimeFromNum(sc, intPart) + t, err := parseDateTimeFromNum(ctx, intPart) if err != nil { return ZeroTime, err } @@ -2676,13 +2665,13 @@ func ParseTimeFromFloat64(sc *stmtctx.StatementContext, f float64) (Time, error) // ParseTimeFromDecimal parses mysql time value from decimal. // It is used in scenarios that distinguish date and datetime, e.g., date_add/sub() with first argument being decimal. // For example, 20010203 parses to date (no HMS) and 20010203040506 parses to datetime (with HMS). -func ParseTimeFromDecimal(sc *stmtctx.StatementContext, dec *MyDecimal) (t Time, err error) { +func ParseTimeFromDecimal(ctx Context, dec *MyDecimal) (t Time, err error) { intPart, err := dec.ToInt() if err != nil && !terror.ErrorEqual(err, ErrTruncated) { return ZeroTime, err } fsp := min(MaxFsp, int(dec.GetDigitsFrac())) - t, err = parseDateTimeFromNum(sc, intPart) + t, err = parseDateTimeFromNum(ctx, intPart) if err != nil { return ZeroTime, err } @@ -2894,7 +2883,7 @@ func abbrDayOfMonth(day int) string { // StrToDate converts date string according to format. // See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_date-format -func (t *Time) StrToDate(sc *stmtctx.StatementContext, date, format string) bool { +func (t *Time) StrToDate(typeCtx Context, date, format string) bool { ctx := make(map[string]int) var tm CoreTime success, warning := strToDate(&tm, date, format, ctx) @@ -2910,13 +2899,13 @@ func (t *Time) StrToDate(sc *stmtctx.StatementContext, date, format string) bool t.SetCoreTime(tm) t.SetType(mysql.TypeDatetime) - if t.check(sc, nil) != nil { + if t.check(typeCtx, nil) != nil { return false } if warning { // Only append this warning when success but still need warning. // Currently this only happens when `date` has extra characters at the end. - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs(DateTimeStr, date)) + typeCtx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs(DateTimeStr, date)) } return true } @@ -3434,8 +3423,8 @@ func DateFSP(date string) (fsp int) { // DateTimeIsOverflow returns if this date is overflow. // See: https://dev.mysql.com/doc/refman/8.0/en/datetime.html -func DateTimeIsOverflow(sc *stmtctx.StatementContext, date Time) (bool, error) { - tz := sc.TimeZone() +func DateTimeIsOverflow(ctx Context, date Time) (bool, error) { + tz := ctx.Location() if tz == nil { logutil.BgLogger().Warn("use gotime.local because sc.timezone is nil") tz = gotime.Local diff --git a/pkg/types/time_test.go b/pkg/types/time_test.go index 67008aee63707..0af88d4aaf276 100644 --- a/pkg/types/time_test.go +++ b/pkg/types/time_test.go @@ -25,9 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" ) @@ -61,8 +59,10 @@ func TestTimeEncoding(t *testing.T) { } func TestDateTime(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + var warnings []error + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) { + warnings = append(warnings, err) + }) table := []struct { Input string Expect string @@ -116,7 +116,7 @@ func TestDateTime(t *testing.T) { } for _, test := range table { - v, err := types.ParseDatetime(sc, test.Input) + v, err := types.ParseDatetime(typeCtx, test.Input) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } @@ -147,12 +147,12 @@ func TestDateTime(t *testing.T) { } for _, test := range fspTbl { - v, err := types.ParseTime(sc, test.Input, mysql.TypeDatetime, test.Fsp, nil) + v, err := types.ParseTime(typeCtx, test.Input, mysql.TypeDatetime, test.Fsp, nil) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } - v, _ := types.ParseTime(sc, "121231113045.9999999", mysql.TypeDatetime, 6, nil) + v, _ := types.ParseTime(typeCtx, "121231113045.9999999", mysql.TypeDatetime, 6, nil) require.Equal(t, 46, v.Second()) require.Equal(t, 0, v.Microsecond()) @@ -177,9 +177,9 @@ func TestDateTime(t *testing.T) { } for _, test := range errTable { - _, err := types.ParseDatetime(sc, test) - require.True(t, err != nil || sc.WarningCount() > 0) - sc.SetWarnings(nil) + _, err := types.ParseDatetime(typeCtx, test) + require.True(t, err != nil || len(warnings) > 0) + warnings = nil } } @@ -192,7 +192,7 @@ func TestTimestamp(t *testing.T) { } for _, test := range table { - v, err := types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), test.Input) + v, err := types.ParseTimestamp(types.DefaultStmtNoWarningContext, test.Input) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } @@ -203,14 +203,13 @@ func TestTimestamp(t *testing.T) { } for _, test := range errTable { - _, err := types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), test) + _, err := types.ParseTimestamp(types.DefaultStmtNoWarningContext, test) require.Error(t, err) } } func TestDate(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Expect string @@ -278,7 +277,7 @@ func TestDate(t *testing.T) { } for _, test := range table { - v, err := types.ParseDate(sc, test.Input) + v, err := types.ParseDate(typeCtx, test.Input) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } @@ -298,14 +297,13 @@ func TestDate(t *testing.T) { } for _, test := range errTable { - _, err := types.ParseDate(sc, test) + _, err := types.ParseDate(typeCtx, test) require.Error(t, err) } } func TestTime(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Expect string @@ -337,7 +335,7 @@ func TestTime(t *testing.T) { } for _, test := range table { - duration, isNull, err := types.ParseDuration(sc, test.Input, types.MinFsp) + duration, isNull, err := types.ParseDuration(typeCtx, test.Input, types.MinFsp) require.NoError(t, err) require.False(t, isNull) require.Equal(t, test.Expect, duration.String()) @@ -353,7 +351,7 @@ func TestTime(t *testing.T) { } for _, test := range table { - duration, _, err := types.ParseDuration(sc, test.Input, types.MaxFsp) + duration, _, err := types.ParseDuration(typeCtx, test.Input, types.MaxFsp) require.NoError(t, err) require.Equal(t, test.Expect, duration.String()) } @@ -368,7 +366,7 @@ func TestTime(t *testing.T) { } for _, test := range table { - duration, isNull, err := types.ParseDuration(sc, test.Input, types.MaxFsp) + duration, isNull, err := types.ParseDuration(typeCtx, test.Input, types.MaxFsp) require.False(t, isNull) require.True(t, types.ErrTruncatedWrongVal.Equal(err)) require.Equal(t, test.Expect, duration.String()) @@ -381,11 +379,11 @@ func TestTime(t *testing.T) { } for _, test := range errTable { - _, _, err := types.ParseDuration(sc, test, types.DefaultFsp) + _, _, err := types.ParseDuration(typeCtx, test, types.DefaultFsp) require.Error(t, err) } - duration, _, err := types.ParseDuration(sc, "4294967295 0:59:59", types.DefaultFsp) + duration, _, err := types.ParseDuration(typeCtx, "4294967295 0:59:59", types.DefaultFsp) require.Error(t, err) require.Equal(t, "838:59:59", duration.String()) @@ -428,15 +426,15 @@ func TestDurationAdd(t *testing.T) { {"00:00:00.099", 3, "00:00:00.001", 3, "00:00:00.100"}, } for _, test := range table { - duration, _, err := types.ParseDuration(nil, test.Input, test.Fsp) + duration, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, test.Input, test.Fsp) require.NoError(t, err) - ta, _, err := types.ParseDuration(nil, test.InputAdd, test.FspAdd) + ta, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, test.InputAdd, test.FspAdd) require.NoError(t, err) result, err := duration.Add(ta) require.NoError(t, err) require.Equal(t, test.Expect, result.String()) } - duration, _, err := types.ParseDuration(nil, "00:00:00", 0) + duration, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, "00:00:00", 0) require.NoError(t, err) ta := new(types.Duration) result, err := duration.Add(*ta) @@ -444,15 +442,14 @@ func TestDurationAdd(t *testing.T) { require.Equal(t, "00:00:00", result.String()) duration = types.Duration{Duration: math.MaxInt64, Fsp: 0} - tatmp, _, err := types.ParseDuration(nil, "00:01:00", 0) + tatmp, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, "00:01:00", 0) require.NoError(t, err) _, err = duration.Add(tatmp) require.Error(t, err) } func TestDurationSub(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Fsp int @@ -464,9 +461,9 @@ func TestDurationSub(t *testing.T) { {"00:00:00", 0, "00:00:00.1", 1, "-00:00:00.1"}, } for _, test := range table { - duration, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + duration, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.NoError(t, err) - ta, _, err := types.ParseDuration(sc, test.InputAdd, test.FspAdd) + ta, _, err := types.ParseDuration(typeCtx, test.InputAdd, test.FspAdd) require.NoError(t, err) result, err := duration.Sub(ta) require.NoError(t, err) @@ -475,8 +472,7 @@ func TestDurationSub(t *testing.T) { } func TestTimeFsp(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Fsp int @@ -495,7 +491,7 @@ func TestTimeFsp(t *testing.T) { } for _, test := range table { - duration, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + duration, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.NoError(t, err) require.Equal(t, test.Expect, duration.String()) } @@ -508,7 +504,7 @@ func TestTimeFsp(t *testing.T) { } for _, test := range errTable { - _, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + _, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.Error(t, err) } } @@ -575,13 +571,13 @@ func TestYear(t *testing.T) { } func TestCodec(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext // MySQL timestamp value doesn't allow month=0 or day=0. - _, err := types.ParseTimestamp(sc, "2016-12-00 00:00:00") + _, err := types.ParseTimestamp(typeCtx, "2016-12-00 00:00:00") require.Error(t, err) - t5, err := types.ParseTimestamp(sc, "2010-10-10 10:11:11") + t5, err := types.ParseTimestamp(typeCtx, "2010-10-10 10:11:11") require.NoError(t, err) _, err = t5.ToPackedUint() require.NoError(t, err) @@ -602,7 +598,7 @@ func TestCodec(t *testing.T) { require.NoError(t, err) require.Equal(t, types.ZeroDatetime.String(), t3.String()) - t5, err = types.ParseDatetime(nil, "0001-01-01 00:00:00") + t5, err = types.ParseDatetime(types.DefaultStmtNoWarningContext, "0001-01-01 00:00:00") require.NoError(t, err) packed, _ = t5.ToPackedUint() @@ -619,7 +615,7 @@ func TestCodec(t *testing.T) { } for _, test := range tbl { - v, err := types.ParseTime(sc, test, mysql.TypeDatetime, types.MaxFsp, nil) + v, err := types.ParseTime(typeCtx, test, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) packed, _ = v.ToPackedUint() @@ -670,7 +666,7 @@ func TestParseTimeFromNum(t *testing.T) { for ith, test := range table { // testtypes.ParseDatetimeFromNum - t1, err := types.ParseDatetimeFromNum(nil, test.Input) + t1, err := types.ParseDatetimeFromNum(types.DefaultStmtNoWarningContext, test.Input) if test.ExpectDateTimeError { require.Errorf(t, err, "%d", ith) } else { @@ -680,7 +676,7 @@ func TestParseTimeFromNum(t *testing.T) { require.Equal(t, test.ExpectDateTimeValue, t1.String()) // testtypes.ParseTimestampFromNum - t1, err = types.ParseTimestampFromNum(stmtctx.NewStmtCtxWithTimeZone(time.UTC), test.Input) + t1, err = types.ParseTimestampFromNum(types.DefaultStmtNoWarningContext, test.Input) if test.ExpectTimeStampError { require.Error(t, err) } else { @@ -690,7 +686,7 @@ func TestParseTimeFromNum(t *testing.T) { require.Equal(t, test.ExpectTimeStampValue, t1.String()) // testtypes.ParseDateFromNum - t1, err = types.ParseDateFromNum(nil, test.Input) + t1, err = types.ParseDateFromNum(types.DefaultStmtNoWarningContext, test.Input) if test.ExpectDateTimeError { require.Error(t, err) @@ -703,11 +699,9 @@ func TestParseTimeFromNum(t *testing.T) { } func TestToNumber(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true losAngelesTz, err := time.LoadLocation("America/Los_Angeles") require.NoError(t, err) - sc.SetTimeZone(losAngelesTz) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), losAngelesTz, func(err error) {}) tblDateTime := []struct { Input string Fsp int @@ -725,7 +719,7 @@ func TestToNumber(t *testing.T) { } for _, test := range tblDateTime { - v, err := types.ParseTime(sc, test.Input, mysql.TypeDatetime, test.Fsp, nil) + v, err := types.ParseTime(typeCtx, test.Input, mysql.TypeDatetime, test.Fsp, nil) require.NoError(t, err) require.Equal(t, test.Expect, v.ToNumber().String()) } @@ -748,7 +742,7 @@ func TestToNumber(t *testing.T) { } for _, test := range tblDate { - v, err := types.ParseTime(sc, test.Input, mysql.TypeDate, 0, nil) + v, err := types.ParseTime(typeCtx, test.Input, mysql.TypeDate, 0, nil) require.NoError(t, err) require.Equal(t, test.Expect, v.ToNumber().String()) } @@ -771,7 +765,7 @@ func TestToNumber(t *testing.T) { } for _, test := range tblDuration { - v, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + v, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.NoError(t, err) // now we can only changetypes.Duration's Fsp to check ToNumber with different Fsp require.Equal(t, test.Expect, v.ToNumber().String()) @@ -779,8 +773,7 @@ func TestToNumber(t *testing.T) { } func TestParseTimeFromFloatString(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Fsp int @@ -800,7 +793,7 @@ func TestParseTimeFromFloatString(t *testing.T) { } for _, test := range table { - v, err := types.ParseTimeFromFloatString(sc, test.Input, mysql.TypeDatetime, test.Fsp) + v, err := types.ParseTimeFromFloatString(typeCtx, test.Input, mysql.TypeDatetime, test.Fsp) if test.ExpectError { require.Error(t, err) } else { @@ -847,9 +840,7 @@ func TestParseFrac(t *testing.T) { } func TestRoundFrac(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true - sc.SetTimeZone(time.UTC) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) tbl := []struct { Input string Fsp int @@ -869,16 +860,16 @@ func TestRoundFrac(t *testing.T) { } for _, tt := range tbl { - v, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) + v, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - nv, err := v.RoundFrac(sc, tt.Fsp) + nv, err := v.RoundFrac(typeCtx, tt.Fsp) require.NoError(t, err) require.Equal(t, tt.Except, nv.String()) } // test different time zone losAngelesTz, err := time.LoadLocation("America/Los_Angeles") require.NoError(t, err) - sc.SetTimeZone(losAngelesTz) + typeCtx = typeCtx.WithLocation(losAngelesTz) tbl = []struct { Input string Fsp int @@ -894,9 +885,9 @@ func TestRoundFrac(t *testing.T) { } for _, tt := range tbl { - v, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) + v, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - nv, err := v.RoundFrac(sc, tt.Fsp) + nv, err := v.RoundFrac(typeCtx, tt.Fsp) require.NoError(t, err) require.Equal(t, tt.Except, nv.String()) } @@ -915,9 +906,9 @@ func TestRoundFrac(t *testing.T) { } for _, tt := range tbl { - v, _, err := types.ParseDuration(sc, tt.Input, types.MaxFsp) + v, _, err := types.ParseDuration(typeCtx, tt.Input, types.MaxFsp) require.NoError(t, err) - nv, err := v.RoundFrac(tt.Fsp, sc.TimeZone()) + nv, err := v.RoundFrac(tt.Fsp, typeCtx.Location()) require.NoError(t, err) require.Equal(t, tt.Except, nv.String()) } @@ -939,10 +930,8 @@ func TestRoundFrac(t *testing.T) { } func TestConvert(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true losAngelesTz, _ := time.LoadLocation("America/Los_Angeles") - sc.SetTimeZone(losAngelesTz) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), losAngelesTz, func(err error) {}) tbl := []struct { Input string Fsp int @@ -958,7 +947,7 @@ func TestConvert(t *testing.T) { } for _, tt := range tbl { - v, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, tt.Fsp, nil) + v, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, tt.Fsp, nil) require.NoError(t, err) nv, err := v.ConvertToDuration() require.NoError(t, err) @@ -975,21 +964,21 @@ func TestConvert(t *testing.T) { {"1 11:30:45.999999", 0}, } // test different time zone. - sc.SetTimeZone(time.UTC) + typeCtx = typeCtx.WithLocation(time.UTC) for _, tt := range tblDuration { - v, _, err := types.ParseDuration(sc, tt.Input, tt.Fsp) + v, _, err := types.ParseDuration(typeCtx, tt.Input, tt.Fsp) require.NoError(t, err) - year, month, day := time.Now().In(sc.TimeZone()).Date() - n := time.Date(year, month, day, 0, 0, 0, 0, sc.TimeZone()) - t1, err := v.ConvertToTime(sc, mysql.TypeDatetime) + year, month, day := time.Now().In(typeCtx.Location()).Date() + n := time.Date(year, month, day, 0, 0, 0, 0, typeCtx.Location()) + t1, err := v.ConvertToTime(typeCtx, mysql.TypeDatetime) require.NoError(t, err) - t2, _ := t1.GoTime(sc.TimeZone()) + t2, _ := t1.GoTime(typeCtx.Location()) require.Equal(t, v.Duration, t2.Sub(n)) } } func TestCompare(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext tbl := []struct { Arg1 string Arg2 string @@ -1003,17 +992,17 @@ func TestCompare(t *testing.T) { } for _, tt := range tbl { - v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - ret, err := v1.CompareString(nil, tt.Arg2) + ret, err := v1.CompareString(types.DefaultStmtNoWarningContext, tt.Arg2) require.NoError(t, err) require.Equal(t, tt.Ret, ret) } - v1, err := types.ParseTime(sc, "2011-10-10 11:11:11", mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, "2011-10-10 11:11:11", mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - res, err := v1.CompareString(nil, "Test should error") + res, err := v1.CompareString(types.DefaultStmtNoWarningContext, "Test should error") require.Error(t, err) require.Equal(t, 0, res) @@ -1028,10 +1017,10 @@ func TestCompare(t *testing.T) { } for _, tt := range tbl { - v1, _, err := types.ParseDuration(nil, tt.Arg1, types.MaxFsp) + v1, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, tt.Arg1, types.MaxFsp) require.NoError(t, err) - ret, err := v1.CompareString(nil, tt.Arg2) + ret, err := v1.CompareString(types.DefaultStmtNoWarningContext, tt.Arg2) require.NoError(t, err) require.Equal(t, tt.Ret, ret) } @@ -1052,7 +1041,7 @@ func TestDurationClock(t *testing.T) { } for _, tt := range tbl { - d, _, err := types.ParseDuration(stmtctx.NewStmtCtxWithTimeZone(time.UTC), tt.Input, types.MaxFsp) + d, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, tt.Input, types.MaxFsp) require.NoError(t, err) require.Equal(t, tt.Hour, d.Hour()) require.Equal(t, tt.Minute, d.Minute()) @@ -1163,15 +1152,15 @@ func TestTimeAdd(t *testing.T) { {"2017-08-21", "01:01:01.001", "2017-08-21 01:01:01.001"}, } - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext for _, tt := range tbl { - v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - dur, _, err := types.ParseDuration(sc, tt.Arg2, types.MaxFsp) + dur, _, err := types.ParseDuration(typeCtx, tt.Arg2, types.MaxFsp) require.NoError(t, err) - result, err := types.ParseTime(sc, tt.Ret, mysql.TypeDatetime, types.MaxFsp, nil) + result, err := types.ParseTime(typeCtx, tt.Ret, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - v2, err := v1.Add(sc, dur) + v2, err := v1.Add(typeCtx, dur) require.NoError(t, err) require.Equalf(t, 0, v2.Compare(result), "%v %v", v2.CoreTime(), result.CoreTime()) } @@ -1252,7 +1241,7 @@ func TestCheckTimestamp(t *testing.T) { } for _, tt := range tests { - validTimestamp := types.CheckTimestampTypeForTest(stmtctx.NewStmtCtxWithTimeZone(tt.tz), tt.input, nil) + validTimestamp := types.CheckTimestampTypeForTest(types.NewContext(types.StrictFlags, tt.tz, func(err error) {}), tt.input, nil) if tt.expectRetError { require.Errorf(t, validTimestamp, "For %s %s", tt.input, tt.tz) } else { @@ -1309,7 +1298,7 @@ func TestCheckTimestamp(t *testing.T) { } for _, tt := range tests { - validTimestamp := types.CheckTimestampTypeForTest(stmtctx.NewStmtCtxWithTimeZone(tt.tz), tt.input, nil) + validTimestamp := types.CheckTimestampTypeForTest(types.NewContext(types.StrictFlags, tt.tz, func(err error) {}), tt.input, nil) if tt.expectRetError { require.Errorf(t, validTimestamp, "For %s %s", tt.input, tt.tz) } else { @@ -1785,11 +1774,10 @@ func TestIsDateFormat(t *testing.T) { } func TestParseTimeFromInt64(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) input := int64(20190412140000) - output, err := types.ParseTimeFromInt64(sc, input) + output, err := types.ParseTimeFromInt64(typeCtx, input) require.NoError(t, err) require.Equal(t, types.DefaultFsp, output.Fsp()) require.Equal(t, mysql.TypeDatetime, output.Type()) @@ -1803,8 +1791,7 @@ func TestParseTimeFromInt64(t *testing.T) { } func TestParseTimeFromFloat64(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) cases := []struct { f float64 @@ -1829,7 +1816,7 @@ func TestParseTimeFromFloat64(t *testing.T) { } for _, c := range cases { - res, err := types.ParseTimeFromFloat64(sc, c.f) + res, err := types.ParseTimeFromFloat64(typeCtx, c.f) require.Equalf(t, c.t, res.Type(), "Type mismatch for case %v", c) require.Equalf(t, c.Y, res.Year(), "Year mismatch for case %v", c) require.Equalf(t, c.M, res.Month(), "Month mismatch for case %v", c) @@ -1847,8 +1834,7 @@ func TestParseTimeFromFloat64(t *testing.T) { } func TestParseTimeFromDecimal(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) cases := []struct { d *types.MyDecimal @@ -1873,7 +1859,7 @@ func TestParseTimeFromDecimal(t *testing.T) { } for _, c := range cases { - res, err := types.ParseTimeFromDecimal(sc, c.d) + res, err := types.ParseTimeFromDecimal(typeCtx, c.d) require.Equalf(t, c.t, res.Type(), "Type mismatch for case %v", c) require.Equalf(t, c.Y, res.Year(), "Year mismatch for case %v", c) require.Equalf(t, c.M, res.Month(), "Month mismatch for case %v", c) @@ -1923,8 +1909,7 @@ func TestGetFracIndex(t *testing.T) { } func TestTimeOverflow(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Output bool @@ -1947,9 +1932,9 @@ func TestTimeOverflow(t *testing.T) { } for _, test := range table { - v, err := types.ParseDatetime(sc, test.Input) + v, err := types.ParseDatetime(typeCtx, test.Input) require.NoError(t, err) - isOverflow, err := types.DateTimeIsOverflow(sc, v) + isOverflow, err := types.DateTimeIsOverflow(typeCtx, v) require.NoError(t, err) require.Equal(t, test.Output, isOverflow) } @@ -1983,15 +1968,15 @@ func TestTimeSub(t *testing.T) { {"2019-04-12 18:20:00", "2019-04-12 14:00:00", "04:20:00"}, } - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext for _, tt := range tbl { - v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - v2, err := types.ParseTime(sc, tt.Arg2, mysql.TypeDatetime, types.MaxFsp, nil) + v2, err := types.ParseTime(typeCtx, tt.Arg2, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - dur, _, err := types.ParseDuration(sc, tt.Ret, types.MaxFsp) + dur, _, err := types.ParseDuration(typeCtx, tt.Ret, types.MaxFsp) require.NoError(t, err) - rec := v1.Sub(sc, &v2) + rec := v1.Sub(typeCtx, &v2) require.Equal(t, dur, rec) } } @@ -2016,12 +2001,11 @@ func TestCheckMonthDay(t *testing.T) { {types.FromDate(3200, 2, 29, 0, 0, 0, 0), true}, } - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - sc.AllowInvalidDate = false + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreInvalidDateErr(false), time.UTC, func(err error) {}) for _, tt := range dates { v := types.NewTime(tt.date, mysql.TypeDate, types.DefaultFsp) - err := v.Check(sc) + err := v.Check(typeCtx) if tt.isValidDate { require.NoError(t, err) } else { @@ -2182,7 +2166,7 @@ func TestParseWithTimezone(t *testing.T) { }, } for ith, ca := range cases { - v, err := types.ParseTime(stmtctx.NewStmtCtxWithTimeZone(ca.sysTZ), ca.lit, mysql.TypeTimestamp, ca.fsp, nil) + v, err := types.ParseTime(types.NewContext(types.StrictFlags, ca.sysTZ, func(err error) {}), ca.lit, mysql.TypeTimestamp, ca.fsp, nil) require.NoErrorf(t, err, "tidb time parse misbehaved on %d", ith) if err != nil { continue @@ -2194,8 +2178,8 @@ func TestParseWithTimezone(t *testing.T) { } func TestMarshalTime(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - v1, err := types.ParseTime(sc, "2017-01-18 01:01:01.123456", mysql.TypeDatetime, types.MaxFsp, nil) + typeCtx := types.DefaultStmtNoWarningContext + v1, err := types.ParseTime(typeCtx, "2017-01-18 01:01:01.123456", mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) j, err := json.Marshal(v1) require.NoError(t, err) @@ -2215,11 +2199,11 @@ func BenchmarkFormat(b *testing.B) { } func BenchmarkTimeAdd(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - arg1, _ := types.ParseTime(sc, "2017-01-18", mysql.TypeDatetime, types.MaxFsp, nil) - arg2, _, _ := types.ParseDuration(sc, "12:30:59", types.MaxFsp) + typeCtx := types.DefaultStmtNoWarningContext + arg1, _ := types.ParseTime(typeCtx, "2017-01-18", mysql.TypeDatetime, types.MaxFsp, nil) + arg2, _, _ := types.ParseDuration(typeCtx, "12:30:59", types.MaxFsp) for i := 0; i < b.N; i++ { - _, err := arg1.Add(sc, arg2) + _, err := arg1.Add(typeCtx, arg2) if err != nil { b.Fatal(err) } @@ -2227,9 +2211,9 @@ func BenchmarkTimeAdd(b *testing.B) { } func BenchmarkTimeCompare(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext mustParse := func(str string) types.Time { - t, err := types.ParseDatetime(sc, str) + t, err := types.ParseDatetime(typeCtx, str) if err != nil { b.Fatal(err) } @@ -2278,10 +2262,10 @@ func BenchmarkParseDateFormat(b *testing.B) { benchmarkDateFormat(b, "datetime repeated delimiters", "2011---12---13 14::15::16..123456") } -func benchmarkDatetimeFormat(b *testing.B, name string, sc *stmtctx.StatementContext, str string) { +func benchmarkDatetimeFormat(b *testing.B, name string, ctx types.Context, str string) { b.Run(name, func(b *testing.B) { for i := 0; i < b.N; i++ { - _, err := types.ParseDatetime(sc, str) + _, err := types.ParseDatetime(ctx, str) if err != nil { b.Fatal(err) } @@ -2290,23 +2274,23 @@ func benchmarkDatetimeFormat(b *testing.B, name string, sc *stmtctx.StatementCon } func BenchmarkParseDatetimeFormat(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - benchmarkDatetimeFormat(b, "datetime without timezone", sc, "2020-10-10T10:10:10") - benchmarkDatetimeFormat(b, "datetime with timezone", sc, "2020-10-10T10:10:10Z+08:00") + typeCtx := types.DefaultStmtNoWarningContext + benchmarkDatetimeFormat(b, "datetime without timezone", typeCtx, "2020-10-10T10:10:10") + benchmarkDatetimeFormat(b, "datetime with timezone", typeCtx, "2020-10-10T10:10:10Z+08:00") } -func benchmarkStrToDate(b *testing.B, name string, sc *stmtctx.StatementContext, str, format string) { +func benchmarkStrToDate(b *testing.B, name string, ctx types.Context, str, format string) { b.Run(name, func(b *testing.B) { for i := 0; i < b.N; i++ { var t types.Time - t.StrToDate(sc, str, format) + t.StrToDate(ctx, str, format) } }) } func BenchmarkStrToDate(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - benchmarkStrToDate(b, "strToDate yyyyMMdd hhmmss ffff", sc, "31/05/2016 12:34:56.1234", "%d/%m/%Y %H:%i:%S.%f") - benchmarkStrToDate(b, "strToDate %r ddMMyyyy", sc, "04:13:56 AM 13/05/2019", "%r %d/%c/%Y") - benchmarkStrToDate(b, "strToDate %T ddMMyyyy", sc, " 4:13:56 13/05/2019", "%T %d/%c/%Y") + typeCtx := types.DefaultStmtNoWarningContext + benchmarkStrToDate(b, "strToDate yyyyMMdd hhmmss ffff", typeCtx, "31/05/2016 12:34:56.1234", "%d/%m/%Y %H:%i:%S.%f") + benchmarkStrToDate(b, "strToDate %r ddMMyyyy", typeCtx, "04:13:56 AM 13/05/2019", "%r %d/%c/%Y") + benchmarkStrToDate(b, "strToDate %T ddMMyyyy", typeCtx, " 4:13:56 13/05/2019", "%T %d/%c/%Y") } diff --git a/pkg/util/chunk/mutrow_test.go b/pkg/util/chunk/mutrow_test.go index 82d0ce32adb82..b09663f0084d8 100644 --- a/pkg/util/chunk/mutrow_test.go +++ b/pkg/util/chunk/mutrow_test.go @@ -79,7 +79,7 @@ func TestMutRow(t *testing.T) { retTypes := []*types.FieldType{types.NewFieldType(mysql.TypeDuration)} chk := New(retTypes, 1, 1) - dur, _, err := types.ParseDuration(sc, "01:23:45", 0) + dur, _, err := types.ParseDuration(sc.TypeCtx(), "01:23:45", 0) require.NoError(t, err) chk.AppendDuration(0, dur) mutRow = MutRowFromTypes(retTypes) diff --git a/pkg/util/codec/codec_test.go b/pkg/util/codec/codec_test.go index 0bcbdea8ad931..4b636c3087e56 100644 --- a/pkg/util/codec/codec_test.go +++ b/pkg/util/codec/codec_test.go @@ -520,13 +520,13 @@ func TestBytes(t *testing.T) { func parseTime(t *testing.T, s string) types.Time { sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - m, err := types.ParseTime(sc, s, mysql.TypeDatetime, types.DefaultFsp, nil) + m, err := types.ParseTime(sc.TypeCtx(), s, mysql.TypeDatetime, types.DefaultFsp, nil) require.NoError(t, err) return m } func parseDuration(t *testing.T, s string) types.Duration { - m, _, err := types.ParseDuration(nil, s, types.DefaultFsp) + m, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, s, types.DefaultFsp) require.NoError(t, err) return m } diff --git a/pkg/util/dbutil/common.go b/pkg/util/dbutil/common.go index d6291b30729fd..0ab538e247371 100644 --- a/pkg/util/dbutil/common.go +++ b/pkg/util/dbutil/common.go @@ -551,7 +551,7 @@ func AnalyzeValuesFromBuckets(valueString string, cols []*model.ColumnInfo) ([]s if IsTimeTypeAndNeedDecode(col.GetType()) { // check if values[i] is already a time string sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - _, err := types.ParseTime(sc, values[i], col.GetType(), types.MinFsp, nil) + _, err := types.ParseTime(sc.TypeCtx(), values[i], col.GetType(), types.MinFsp, nil) if err == nil { continue } diff --git a/pkg/util/rowDecoder/decoder_test.go b/pkg/util/rowDecoder/decoder_test.go index fb77d027d4592..039d929f7359c 100644 --- a/pkg/util/rowDecoder/decoder_test.go +++ b/pkg/util/rowDecoder/decoder_test.go @@ -77,11 +77,11 @@ func TestRowDecoder(t *testing.T) { Duration: time.Hour + time.Second, }) - time2, err := time1.Add(sc, d1.GetMysqlDuration()) + time2, err := time1.Add(sc.TypeCtx(), d1.GetMysqlDuration()) require.Nil(t, err) t2 := types.NewTimeDatum(time2) - time3, err := time1.Add(sc, types.Duration{Duration: time.Hour*2 + time.Second*2}) + time3, err := time1.Add(sc.TypeCtx(), types.Duration{Duration: time.Hour*2 + time.Second*2}) require.Nil(t, err) t3 := types.NewTimeDatum(time3) diff --git a/pkg/util/rowcodec/rowcodec_test.go b/pkg/util/rowcodec/rowcodec_test.go index 64844fed25e78..e3c6086cdc12d 100644 --- a/pkg/util/rowcodec/rowcodec_test.go +++ b/pkg/util/rowcodec/rowcodec_test.go @@ -292,7 +292,7 @@ func TestTypesNewRowCodec(t *testing.T) { return d } getTime := func(value string) types.Time { - d, err := types.ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), value, mysql.TypeTimestamp, 6, nil) + d, err := types.ParseTime(types.DefaultStmtNoWarningContext, value, mysql.TypeTimestamp, 6, nil) require.NoError(t, err) return d } @@ -1306,7 +1306,7 @@ var ( } } getDuration = func(value string) types.Duration { - dur, _, _ := types.ParseDuration(nil, value, 0) + dur, _, _ := types.ParseDuration(types.DefaultStmtNoWarningContext, value, 0) return dur } getOldDatumByte = func(d types.Datum) []byte { diff --git a/pkg/util/timeutil/time_zone.go b/pkg/util/timeutil/time_zone.go index 173810695414c..2c0f4f95a3d0e 100644 --- a/pkg/util/timeutil/time_zone.go +++ b/pkg/util/timeutil/time_zone.go @@ -237,7 +237,7 @@ func ParseTimeZone(s string) (*time.Location, error) { // The value can be given as a string indicating an offset from UTC, such as '+10:00' or '-6:00'. // The time zone's value should in [-12:59,+14:00]. if strings.HasPrefix(s, "+") || strings.HasPrefix(s, "-") { - d, _, err := types.ParseDuration(nil, s[1:], 0) + d, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, s[1:], 0) if err == nil { if s[0] == '-' { if d.Duration > 12*time.Hour+59*time.Minute { From 2387127ea6859d4fa89504ff52b20cc02fa86d2e Mon Sep 17 00:00:00 2001 From: Rustin Liu Date: Fri, 27 Oct 2023 19:18:34 +0800 Subject: [PATCH 21/33] statistics: better benchmark tests for merge topN (#48006) --- pkg/statistics/cmsketch.go | 6 +- pkg/statistics/handle/globalstats/topn.go | 51 +++++-- .../handle/globalstats/topn_bench_test.go | 143 ++++++++---------- 3 files changed, 103 insertions(+), 97 deletions(-) diff --git a/pkg/statistics/cmsketch.go b/pkg/statistics/cmsketch.go index b5fe74411888a..9643e6b7188b2 100644 --- a/pkg/statistics/cmsketch.go +++ b/pkg/statistics/cmsketch.go @@ -828,14 +828,12 @@ func MergeTopN(topNs []*TopN, n uint32) (*TopN, []TopNMeta) { // CheckEmptyTopNs checks whether all TopNs are empty. func CheckEmptyTopNs(topNs []*TopN) bool { - count := uint64(0) for _, topN := range topNs { - count += topN.TotalCount() - if count != 0 { + if topN.TotalCount() != 0 { return false } } - return count == 0 + return true } // SortTopnMeta sort topnMeta diff --git a/pkg/statistics/handle/globalstats/topn.go b/pkg/statistics/handle/globalstats/topn.go index d07ea09bae988..9e9f14a068a54 100644 --- a/pkg/statistics/handle/globalstats/topn.go +++ b/pkg/statistics/handle/globalstats/topn.go @@ -45,12 +45,21 @@ func mergeGlobalStatsTopN(gp *gp.Pool, sc sessionctx.Context, wrapper *StatsWrap return MergeGlobalStatsTopNByConcurrency(gp, mergeConcurrency, batchSize, wrapper, timeZone, version, n, isIndex, killer) } -// MergeGlobalStatsTopNByConcurrency merge partition topN by concurrency -// To merge global stats topn by concurrency, we will separate the partition topn in concurrency part and deal it with different worker. -// mergeConcurrency is used to control the total concurrency of the running worker, and mergeBatchSize is sued to control -// the partition size for each worker to solve it -func MergeGlobalStatsTopNByConcurrency(gp *gp.Pool, mergeConcurrency, mergeBatchSize int, wrapper *StatsWrapper, - timeZone *time.Location, version int, n uint32, isIndex bool, killer *sqlkiller.SQLKiller) (*statistics.TopN, +// MergeGlobalStatsTopNByConcurrency merge partition topN by concurrency. +// To merge global stats topN by concurrency, +// we will separate the partition topN in concurrency part and deal it with different worker. +// mergeConcurrency is used to control the total concurrency of the running worker, +// and mergeBatchSize is sued to control the partition size for each worker to solve it +func MergeGlobalStatsTopNByConcurrency( + gp *gp.Pool, + mergeConcurrency, mergeBatchSize int, + wrapper *StatsWrapper, + timeZone *time.Location, + version int, + n uint32, + isIndex bool, + killer *sqlkiller.SQLKiller, +) (*statistics.TopN, []statistics.TopNMeta, []*statistics.Histogram, error) { if len(wrapper.AllTopN) < mergeConcurrency { mergeConcurrency = len(wrapper.AllTopN) @@ -119,18 +128,31 @@ func MergeGlobalStatsTopNByConcurrency(gp *gp.Pool, mergeConcurrency, mergeBatch // MergePartTopN2GlobalTopN is used to merge the partition-level topN to global-level topN. // The input parameters: // 1. `topNs` are the partition-level topNs to be merged. -// 2. `n` is the size of the global-level topN. Notice: This value can be 0 and has no default value, we must explicitly specify this value. -// 3. `hists` are the partition-level histograms. Some values not in topN may be placed in the histogram. We need it here to make the value in the global-level TopN more accurate. +// 2. `n` is the size of the global-level topN. +// Notice: This value can be 0 and has no default value, we must explicitly specify this value. +// 3. `hists` are the partition-level histograms. +// Some values not in topN may be placed in the histogram. +// We need it here to make the value in the global-level TopN more accurate. // // The output parameters: // 1. `*TopN` is the final global-level topN. -// 2. `[]TopNMeta` is the left topN value from the partition-level TopNs, but is not placed to global-level TopN. We should put them back to histogram latter. -// 3. `[]*Histogram` are the partition-level histograms which just delete some values when we merge the global-level topN. -func MergePartTopN2GlobalTopN(loc *time.Location, version int, topNs []*statistics.TopN, n uint32, hists []*statistics.Histogram, - isIndex bool, killer *sqlkiller.SQLKiller) (*statistics.TopN, []statistics.TopNMeta, []*statistics.Histogram, error) { +// 2. `[]TopNMeta` is the left topN value from the partition-level TopNs, +// but is not placed to global-level TopN. We should put them back to histogram latter. +// 3. `[]*Histogram` are the partition-level histograms which +// just delete some values when we merge the global-level topN. +func MergePartTopN2GlobalTopN( + loc *time.Location, + version int, + topNs []*statistics.TopN, + n uint32, + hists []*statistics.Histogram, + isIndex bool, + killer *sqlkiller.SQLKiller, +) (*statistics.TopN, []statistics.TopNMeta, []*statistics.Histogram, error) { if statistics.CheckEmptyTopNs(topNs) { return nil, nil, hists, nil } + partNum := len(topNs) // Different TopN structures may hold the same value, we have to merge them. counter := make(map[hack.MutableString]float64) @@ -141,9 +163,11 @@ func MergePartTopN2GlobalTopN(loc *time.Location, version int, topNs []*statisti if err := killer.HandleSignal(); err != nil { return nil, nil, nil, err } + // Ignore the empty topN. if topN.TotalCount() == 0 { continue } + for _, val := range topN.TopN { encodedVal := hack.String(val.Encoded) _, exists := counter[encodedVal] @@ -152,6 +176,7 @@ func MergePartTopN2GlobalTopN(loc *time.Location, version int, topNs []*statisti // We have already calculated the encodedVal from the histogram, so just continue to next topN value. continue } + // We need to check whether the value corresponding to encodedVal is contained in other partition-level stats. // 1. Check the topN first. // 2. If the topN doesn't contain the value corresponding to encodedVal. We should check the histogram. @@ -159,6 +184,7 @@ func MergePartTopN2GlobalTopN(loc *time.Location, version int, topNs []*statisti if err := killer.HandleSignal(); err != nil { return nil, nil, nil, err } + if (j == i && version >= 2) || topNs[j].FindTopN(val.Encoded) != -1 { continue } @@ -181,6 +207,7 @@ func MergePartTopN2GlobalTopN(loc *time.Location, version int, topNs []*statisti } } } + numTop := len(counter) if numTop == 0 { return nil, nil, hists, nil diff --git a/pkg/statistics/handle/globalstats/topn_bench_test.go b/pkg/statistics/handle/globalstats/topn_bench_test.go index ffad91ec224ab..a272bfbd4bfee 100644 --- a/pkg/statistics/handle/globalstats/topn_bench_test.go +++ b/pkg/statistics/handle/globalstats/topn_bench_test.go @@ -16,6 +16,7 @@ package globalstats import ( "fmt" + "math/rand" "testing" "time" @@ -30,29 +31,22 @@ import ( "github.com/tiancaiamao/gp" ) -// cmd: go test -run=^$ -bench=BenchmarkMergePartTopN2GlobalTopNWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats -func benchmarkMergePartTopN2GlobalTopNWithHists(partitions int, b *testing.B) { - loc := time.UTC - sc := stmtctx.NewStmtCtxWithTimeZone(loc) - version := 1 - killer := sqlkiller.SQLKiller{} - +func prepareTopNsAndHists(b *testing.B, partitions int, tz *time.Location) ([]*statistics.TopN, []*statistics.Histogram) { + sc := stmtctx.NewStmtCtxWithTimeZone(tz) // Prepare TopNs. topNs := make([]*statistics.TopN, 0, partitions) for i := 0; i < partitions; i++ { - // Construct TopN, should be key1 -> 2, key2 -> 2, key3 -> 3. - topN := statistics.NewTopN(3) + // Construct TopN, should be key1 -> rand(0, 1000), key2 -> rand(0, 1000), key3 -> rand(0, 1000)... + topN := statistics.NewTopN(500) { - key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1)) - require.NoError(b, err) - topN.AppendTopN(key1, 2) - key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(2)) - require.NoError(b, err) - topN.AppendTopN(key2, 2) - if i%2 == 0 { - key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(3)) + for j := 1; j <= 500; j++ { + // Randomly skip some keys for some partitions. + if i%2 == 0 && j%2 == 0 { + continue + } + key, err := codec.EncodeKey(sc, nil, types.NewIntDatum(int64(j))) require.NoError(b, err) - topN.AppendTopN(key3, 3) + topN.AppendTopN(key, uint64(rand.Intn(1000))) } } topNs = append(topNs, topN) @@ -62,68 +56,55 @@ func benchmarkMergePartTopN2GlobalTopNWithHists(partitions int, b *testing.B) { hists := make([]*statistics.Histogram, 0, partitions) for i := 0; i < partitions; i++ { // Construct Hist - h := statistics.NewHistogram(1, 10, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) - h.Bounds.AppendInt64(0, 1) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 20}) - h.Bounds.AppendInt64(0, 2) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 3) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 4) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 40}) + h := statistics.NewHistogram(1, 500, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) + for j := 1; j <= 500; j++ { + datum := types.NewIntDatum(int64(j)) + h.AppendBucket(&datum, &datum, int64(10+j*10), 10) + } hists = append(hists, h) } + return topNs, hists +} + +func benchmarkMergePartTopN2GlobalTopNWithHists(partitions int, b *testing.B) { + loc := time.UTC + version := 1 + killer := sqlkiller.SQLKiller{} + topNs, hists := prepareTopNsAndHists(b, partitions, loc) + b.ResetTimer() for i := 0; i < b.N; i++ { - // Benchmark merge 10 topN. - _, _, _, _ = MergePartTopN2GlobalTopN(loc, version, topNs, 10, hists, false, &killer) + // Benchmark merge 100 topN. + _, _, _, _ = MergePartTopN2GlobalTopN( + loc, + version, + topNs, + 100, + hists, + false, + &killer, + ) + } +} + +var benchmarkSizes = []int{100, 1000, 2000, 5000, 10000} + +// cmd: go test -run=^$ -bench=BenchmarkMergePartTopN2GlobalTopNWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats +func BenchmarkMergePartTopN2GlobalTopNWithHists(b *testing.B) { + for _, size := range benchmarkSizes { + b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) { + benchmarkMergePartTopN2GlobalTopNWithHists(size, b) + }) } } -// cmd: go test -run=^$ -bench=BenchmarkMergeGlobalStatsTopNByConcurrencyWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats func benchmarkMergeGlobalStatsTopNByConcurrencyWithHists(partitions int, b *testing.B) { loc := time.UTC - sc := stmtctx.NewStmtCtxWithTimeZone(loc) version := 1 killer := sqlkiller.SQLKiller{} - // Prepare TopNs. - topNs := make([]*statistics.TopN, 0, partitions) - for i := 0; i < partitions; i++ { - // Construct TopN, should be key1 -> 2, key2 -> 2, key3 -> 3. - topN := statistics.NewTopN(3) - { - key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1)) - require.NoError(b, err) - topN.AppendTopN(key1, 2) - key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(2)) - require.NoError(b, err) - topN.AppendTopN(key2, 2) - if i%2 == 0 { - key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(3)) - require.NoError(b, err) - topN.AppendTopN(key3, 3) - } - } - topNs = append(topNs, topN) - } - - // Prepare Hists. - hists := make([]*statistics.Histogram, 0, partitions) - for i := 0; i < partitions; i++ { - // Construct Hist - h := statistics.NewHistogram(1, 10, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) - h.Bounds.AppendInt64(0, 1) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 20}) - h.Bounds.AppendInt64(0, 2) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 3) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 4) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 40}) - hists = append(hists, h) - } + topNs, hists := prepareTopNsAndHists(b, partitions, loc) wrapper := NewStatsWrapper(hists, topNs) const mergeConcurrency = 4 batchSize := len(wrapper.AllTopN) / mergeConcurrency @@ -136,24 +117,24 @@ func benchmarkMergeGlobalStatsTopNByConcurrencyWithHists(partitions int, b *test defer gpool.Close() b.ResetTimer() for i := 0; i < b.N; i++ { - // Benchmark merge 10 topN. - _, _, _, _ = MergeGlobalStatsTopNByConcurrency(gpool, mergeConcurrency, batchSize, wrapper, loc, version, 10, false, &killer) - } -} - -var benchmarkSizes = []int{100, 1000, 10000, 100000, 1000000, 10000000} -var benchmarkConcurrencySizes = []int{100, 1000, 10000, 100000} - -func BenchmarkMergePartTopN2GlobalTopNWithHists(b *testing.B) { - for _, size := range benchmarkSizes { - b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) { - benchmarkMergePartTopN2GlobalTopNWithHists(size, b) - }) + // Benchmark merge 100 topN. + _, _, _, _ = MergeGlobalStatsTopNByConcurrency( + gpool, + mergeConcurrency, + batchSize, + wrapper, + loc, + version, + 100, + false, + &killer, + ) } } +// cmd: go test -run=^$ -bench=BenchmarkMergeGlobalStatsTopNByConcurrencyWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats func BenchmarkMergeGlobalStatsTopNByConcurrencyWithHists(b *testing.B) { - for _, size := range benchmarkConcurrencySizes { + for _, size := range benchmarkSizes { b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) { benchmarkMergeGlobalStatsTopNByConcurrencyWithHists(size, b) }) From bf1913e7968dea906c2b1e4483506ceaca61fb31 Mon Sep 17 00:00:00 2001 From: Arenatlx <314806019@qq.com> Date: Fri, 27 Oct 2023 19:18:41 +0800 Subject: [PATCH 22/33] planner: fix mpp task spreading blocked by virtual column substitution (#48041) close pingcap/tidb#47766 --- pkg/executor/test/tiflashtest/BUILD.bazel | 2 +- pkg/executor/test/tiflashtest/tiflash_test.go | 36 +++++++++++++++++++ .../core/rule_generate_column_substitute.go | 6 ++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/pkg/executor/test/tiflashtest/BUILD.bazel b/pkg/executor/test/tiflashtest/BUILD.bazel index 6d99e3ee3b5b9..4afb61918b486 100644 --- a/pkg/executor/test/tiflashtest/BUILD.bazel +++ b/pkg/executor/test/tiflashtest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 39, + shard_count = 40, deps = [ "//pkg/config", "//pkg/domain", diff --git a/pkg/executor/test/tiflashtest/tiflash_test.go b/pkg/executor/test/tiflashtest/tiflash_test.go index 73f97caa9cda2..1502e54baf76b 100644 --- a/pkg/executor/test/tiflashtest/tiflash_test.go +++ b/pkg/executor/test/tiflashtest/tiflash_test.go @@ -1735,6 +1735,42 @@ func TestMppStoreCntWithErrors(t *testing.T) { require.Nil(t, failpoint.Disable(mppStoreCountPDError)) } +func TestMPP47766(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_allow_mpp=1") + tk.MustExec("set @@session.tidb_enforce_mpp=1") + tk.MustExec("set @@session.tidb_allow_tiflash_cop=off") + + tk.MustExec("CREATE TABLE `traces` (" + + " `test_time` timestamp NOT NULL," + + " `test_time_gen` date GENERATED ALWAYS AS (date(`test_time`)) VIRTUAL," + + " KEY `traces_date_idx` (`test_time_gen`)" + + ")") + tk.MustExec("alter table `traces` set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "traces") + err := domain.GetDomain(tk.Session()).DDL().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustQuery("explain select date(test_time), count(1) as test_date from `traces` group by 1").Check(testkit.Rows( + "Projection_4 8000.00 root test.traces.test_time_gen->Column#5, Column#4", + "└─HashAgg_8 8000.00 root group by:test.traces.test_time_gen, funcs:count(1)->Column#4, funcs:firstrow(test.traces.test_time_gen)->test.traces.test_time_gen", + " └─TableReader_20 10000.00 root MppVersion: 2, data:ExchangeSender_19", + " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan_18 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) + tk.MustQuery("explain select /*+ read_from_storage(tiflash[traces]) */ date(test_time) as test_date, count(1) from `traces` group by 1").Check(testkit.Rows( + "TableReader_31 8000.00 root MppVersion: 2, data:ExchangeSender_30", + "└─ExchangeSender_30 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection_5 8000.00 mpp[tiflash] date(test.traces.test_time)->Column#5, Column#4", + " └─Projection_26 8000.00 mpp[tiflash] Column#4, test.traces.test_time", + " └─HashAgg_27 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#4, funcs:firstrow(Column#15)->test.traces.test_time", + " └─ExchangeReceiver_29 8000.00 mpp[tiflash] ", + " └─ExchangeSender_28 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " └─HashAgg_25 8000.00 mpp[tiflash] group by:Column#17, funcs:count(1)->Column#14, funcs:firstrow(Column#16)->Column#15", + " └─Projection_32 10000.00 mpp[tiflash] test.traces.test_time->Column#16, date(test.traces.test_time)->Column#17", + " └─TableFullScan_15 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) +} + func TestUnionScan(t *testing.T) { store := testkit.CreateMockStore(t, withMockTiFlash(2)) tk := testkit.NewTestKit(t, store) diff --git a/pkg/planner/core/rule_generate_column_substitute.go b/pkg/planner/core/rule_generate_column_substitute.go index 117551f5c5db3..f39c5aa5d0d55 100644 --- a/pkg/planner/core/rule_generate_column_substitute.go +++ b/pkg/planner/core/rule_generate_column_substitute.go @@ -60,6 +60,12 @@ func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) { if !ok { return } + // detect the read_from_storage(tiflash) hints, since virtual column will + // block the mpp task spreading (only supporting MPP table scan), causing + // mpp plan fail the cost comparison with tikv index plan. + if ds.preferStoreType&preferTiFlash != 0 { + return + } for _, p := range ds.possibleAccessPaths { if p.IsTablePath() { continue From 9f88af570fef57cf406fec0a07ff80b8577f6f50 Mon Sep 17 00:00:00 2001 From: Rustin Liu Date: Fri, 27 Oct 2023 20:36:34 +0800 Subject: [PATCH 23/33] executor: fix typo (#48039) --- pkg/executor/compiler.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/executor/compiler.go b/pkg/executor/compiler.go index cb849370e3101..1f1bc6c690c4b 100644 --- a/pkg/executor/compiler.go +++ b/pkg/executor/compiler.go @@ -48,13 +48,13 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (_ *ExecS if r == nil { return } - if recoverdErr, ok := r.(error); !ok || !(exeerrors.ErrMemoryExceedForQuery.Equal(recoverdErr) || - exeerrors.ErrMemoryExceedForInstance.Equal(recoverdErr) || - exeerrors.ErrQueryInterrupted.Equal(recoverdErr) || - exeerrors.ErrMaxExecTimeExceeded.Equal(recoverdErr)) { + if recoveredErr, ok := r.(error); !ok || !(exeerrors.ErrMemoryExceedForQuery.Equal(recoveredErr) || + exeerrors.ErrMemoryExceedForInstance.Equal(recoveredErr) || + exeerrors.ErrQueryInterrupted.Equal(recoveredErr) || + exeerrors.ErrMaxExecTimeExceeded.Equal(recoveredErr)) { panic(r) } else { - err = recoverdErr + err = recoveredErr } logutil.Logger(ctx).Error("compile SQL panic", zap.String("SQL", stmtNode.Text()), zap.Stack("stack"), zap.Any("recover", r)) }() From 30288c77c7e84a9c596a8145dca27748468f9e51 Mon Sep 17 00:00:00 2001 From: BornChanger <97348524+BornChanger@users.noreply.github.com> Date: Fri, 27 Oct 2023 21:11:04 +0800 Subject: [PATCH 24/33] ebs br: provide fsr warmup to tikv data volumes (#47272) --- br/pkg/aws/ebs.go | 148 +++++++++++++++++++++++++++++++- br/pkg/config/ebs.go | 8 ++ br/pkg/task/common.go | 1 + br/pkg/task/restore.go | 7 ++ br/pkg/task/restore_data.go | 10 +-- br/pkg/task/restore_ebs_meta.go | 22 ++++- 6 files changed, 184 insertions(+), 12 deletions(-) diff --git a/br/pkg/aws/ebs.go b/br/pkg/aws/ebs.go index ddea6b358f556..cf5425e03be0d 100644 --- a/br/pkg/aws/ebs.go +++ b/br/pkg/aws/ebs.go @@ -281,6 +281,152 @@ func (e *EC2Session) DeleteSnapshots(snapIDMap map[string]string) { log.Info("delete snapshot end", zap.Int("need-to-del", len(snapIDMap)), zap.Int32("deleted", deletedCnt.Load())) } +// EnableDataFSR enables FSR for data volume snapshots +func (e *EC2Session) EnableDataFSR(meta *config.EBSBasedBRMeta, targetAZ string) (map[string][]*string, error) { + snapshotsIDsMap := fetchTargetSnapshots(meta, targetAZ) + + if len(snapshotsIDsMap) == 0 { + return snapshotsIDsMap, errors.Errorf("empty backup meta") + } + + eg, _ := errgroup.WithContext(context.Background()) + + for availableZone := range snapshotsIDsMap { + targetAZ := availableZone + eg.Go(func() error { + log.Info("enable fsr for snapshots", zap.String("available zone", targetAZ)) + resp, err := e.ec2.EnableFastSnapshotRestores(&ec2.EnableFastSnapshotRestoresInput{ + AvailabilityZones: []*string{&targetAZ}, + SourceSnapshotIds: snapshotsIDsMap[targetAZ], + }) + + if err != nil { + return errors.Trace(err) + } + + if len(resp.Unsuccessful) > 0 { + log.Warn("not all snapshots enabled FSR") + return errors.Errorf("Some snapshot fails to enable FSR for available zone %s, such as %s, error code is %v", targetAZ, *resp.Unsuccessful[0].SnapshotId, resp.Unsuccessful[0].FastSnapshotRestoreStateErrors) + } + + return e.waitDataFSREnabled(snapshotsIDsMap[targetAZ], targetAZ) + }) + } + return snapshotsIDsMap, eg.Wait() +} + +// waitDataFSREnabled waits FSR for data volume snapshots are all enabled +func (e *EC2Session) waitDataFSREnabled(snapShotIDs []*string, targetAZ string) error { + // Create a map to store the strings as keys + pendingSnapshots := make(map[string]struct{}) + + // Populate the map with the strings from the array + for _, str := range snapShotIDs { + pendingSnapshots[*str] = struct{}{} + } + + log.Info("starts check fsr pending snapshots", zap.Any("snapshots", pendingSnapshots), zap.String("available zone", targetAZ)) + for { + if len(pendingSnapshots) == 0 { + log.Info("all snapshots fsr enablement is finished", zap.String("available zone", targetAZ)) + return nil + } + + // check pending snapshots every 1 minute + time.Sleep(1 * time.Minute) + log.Info("check snapshots not fsr enabled", zap.Int("count", len(pendingSnapshots))) + input := &ec2.DescribeFastSnapshotRestoresInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("disabled"), aws.String("disabling"), aws.String("enabling"), aws.String("optimizing")}, + }, + { + Name: aws.String("availability-zone"), + Values: []*string{aws.String(targetAZ)}, + }, + }, + } + + result, err := e.ec2.DescribeFastSnapshotRestores(input) + if err != nil { + return errors.Trace(err) + } + + uncompletedSnapshots := make(map[string]struct{}) + for _, fastRestore := range result.FastSnapshotRestores { + _, found := pendingSnapshots[*fastRestore.SnapshotId] + if found { + // Detect some conflict states + if strings.EqualFold(*fastRestore.State, "disabled") || strings.EqualFold(*fastRestore.State, "disabling") { + log.Error("detect conflict status", zap.String("snapshot", *fastRestore.SnapshotId), zap.String("status", *fastRestore.State)) + return errors.Errorf("status of snapshot %s is %s ", *fastRestore.SnapshotId, *fastRestore.State) + } + uncompletedSnapshots[*fastRestore.SnapshotId] = struct{}{} + } + } + pendingSnapshots = uncompletedSnapshots + } +} + +// DisableDataFSR disables FSR for data volume snapshots +func (e *EC2Session) DisableDataFSR(snapshotsIDsMap map[string][]*string) error { + if len(snapshotsIDsMap) == 0 { + return nil + } + + eg, _ := errgroup.WithContext(context.Background()) + + for availableZone := range snapshotsIDsMap { + targetAZ := availableZone + eg.Go(func() error { + resp, err := e.ec2.DisableFastSnapshotRestores(&ec2.DisableFastSnapshotRestoresInput{ + AvailabilityZones: []*string{&targetAZ}, + SourceSnapshotIds: snapshotsIDsMap[targetAZ], + }) + + if err != nil { + return errors.Trace(err) + } + + if len(resp.Unsuccessful) > 0 { + log.Warn("not all snapshots disabled FSR", zap.String("available zone", targetAZ)) + return errors.Errorf("Some snapshot fails to disable FSR for available zone %s, such as %s, error code is %v", targetAZ, *resp.Unsuccessful[0].SnapshotId, resp.Unsuccessful[0].FastSnapshotRestoreStateErrors) + } + + log.Info("Disable FSR issued", zap.String("available zone", targetAZ)) + + return nil + }) + } + return eg.Wait() +} + +func fetchTargetSnapshots(meta *config.EBSBasedBRMeta, specifiedAZ string) map[string][]*string { + var sourceSnapshotIDs = make(map[string][]*string) + + if len(meta.TiKVComponent.Stores) == 0 { + return sourceSnapshotIDs + } + + for i := range meta.TiKVComponent.Stores { + store := meta.TiKVComponent.Stores[i] + for j := range store.Volumes { + oldVol := store.Volumes[j] + // Handle data volume snapshots only + if strings.Compare(oldVol.Type, "storage.data-dir") == 0 { + if specifiedAZ != "" { + sourceSnapshotIDs[specifiedAZ] = append(sourceSnapshotIDs[specifiedAZ], &oldVol.SnapshotID) + } else { + sourceSnapshotIDs[oldVol.VolumeAZ] = append(sourceSnapshotIDs[oldVol.VolumeAZ], &oldVol.SnapshotID) + } + } + } + } + + return sourceSnapshotIDs +} + // CreateVolumes create volumes from snapshots // if err happens in the middle, return half-done result // returned map: store id -> old volume id -> new volume id @@ -377,7 +523,7 @@ func (e *EC2Session) WaitVolumesCreated(volumeIDMap map[string]string, progress for len(pendingVolumes) > 0 { // check every 5 seconds time.Sleep(5 * time.Second) - log.Info("check pending snapshots", zap.Int("count", len(pendingVolumes))) + log.Info("check pending volumes", zap.Int("count", len(pendingVolumes))) resp, err := e.ec2.DescribeVolumes(&ec2.DescribeVolumesInput{ VolumeIds: pendingVolumes, }) diff --git a/br/pkg/config/ebs.go b/br/pkg/config/ebs.go index deedb2d384403..5731738c14d2a 100644 --- a/br/pkg/config/ebs.go +++ b/br/pkg/config/ebs.go @@ -100,6 +100,14 @@ func (c *EBSBasedBRMeta) GetStoreCount() uint64 { return uint64(len(c.TiKVComponent.Stores)) } +func (c *EBSBasedBRMeta) GetTiKVVolumeCount() uint64 { + if c.TiKVComponent == nil || len(c.TiKVComponent.Stores) == 0 { + return 0 + } + // Assume TiKV nodes are symmetric + return uint64(len(c.TiKVComponent.Stores[0].Volumes)) +} + func (c *EBSBasedBRMeta) String() string { cfg, err := json.Marshal(c) if err != nil { diff --git a/br/pkg/task/common.go b/br/pkg/task/common.go index e17d81aedb32b..1aec04510bbc6 100644 --- a/br/pkg/task/common.go +++ b/br/pkg/task/common.go @@ -80,6 +80,7 @@ const ( flagDryRun = "dry-run" // TODO used for local test, should be removed later flagSkipAWS = "skip-aws" + flagUseFSR = "use-fsr" flagCloudAPIConcurrency = "cloud-api-concurrency" flagWithSysTable = "with-sys-table" flagOperatorPausedGCAndSchedulers = "operator-paused-gc-and-scheduler" diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index d0f79845cb369..b70788c821ef8 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -139,6 +139,7 @@ func DefineRestoreCommonFlags(flags *pflag.FlagSet) { "batch size for ddl to create a batch of tables once.") flags.Bool(flagWithSysTable, false, "whether restore system privilege tables on default setting") flags.StringArrayP(FlagResetSysUsers, "", []string{"cloud_admin", "root"}, "whether reset these users after restoration") + flags.Bool(flagUseFSR, false, "whether enable FSR for AWS snapshots") _ = flags.MarkHidden(FlagResetSysUsers) _ = flags.MarkHidden(FlagMergeRegionSizeBytes) _ = flags.MarkHidden(FlagMergeRegionKeyCount) @@ -218,6 +219,7 @@ type RestoreConfig struct { VolumeThroughput int64 `json:"volume-throughput" toml:"volume-throughput"` ProgressFile string `json:"progress-file" toml:"progress-file"` TargetAZ string `json:"target-az" toml:"target-az"` + UseFSR bool `json:"use-fsr" toml:"use-fsr"` } // DefineRestoreFlags defines common flags for the restore tidb command. @@ -391,6 +393,11 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { return errors.Trace(err) } + cfg.UseFSR, err = flags.GetBool(flagUseFSR) + if err != nil { + return errors.Trace(err) + } + // iops: gp3 [3,000-16,000]; io1/io2 [100-32,000] // throughput: gp3 [125, 1000]; io1/io2 cannot set throughput // io1 and io2 volumes support up to 64,000 IOPS only on Instances built on the Nitro System. diff --git a/br/pkg/task/restore_data.go b/br/pkg/task/restore_data.go index 357e56672e894..3276a0f2af101 100644 --- a/br/pkg/task/restore_data.go +++ b/br/pkg/task/restore_data.go @@ -159,23 +159,17 @@ func RunResolveKvData(c context.Context, g glue.Glue, cmdName string, cfg *Resto //TODO: restore volume type into origin type //ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) by backupmeta - // this is used for cloud restoration + err = client.Init(g, mgr.GetStorage()) if err != nil { return errors.Trace(err) } defer client.Close() - log.Info("start to clear system user for cloud") - err = client.ClearSystemUsers(ctx, cfg.ResetSysUsers) - - if err != nil { - return errors.Trace(err) - } - // since we cannot reset tiflash automaticlly. so we should start it manually if err = client.ResetTiFlashReplicas(ctx, g, mgr.GetStorage()); err != nil { return errors.Trace(err) } + progress.Close() summary.CollectDuration("restore duration", time.Since(startAll)) summary.SetSuccessStatus(true) diff --git a/br/pkg/task/restore_ebs_meta.go b/br/pkg/task/restore_ebs_meta.go index 53286505b5b9c..7dbad5960cb17 100644 --- a/br/pkg/task/restore_ebs_meta.go +++ b/br/pkg/task/restore_ebs_meta.go @@ -175,10 +175,10 @@ func (h *restoreEBSMetaHelper) restore() error { return errors.Trace(err) } - storeCount := h.metaInfo.GetStoreCount() - progress := h.g.StartProgress(ctx, h.cmdName, int64(storeCount), !h.cfg.LogProgress) + volumeCount := h.metaInfo.GetStoreCount() * h.metaInfo.GetTiKVVolumeCount() + progress := h.g.StartProgress(ctx, h.cmdName, int64(volumeCount), !h.cfg.LogProgress) defer progress.Close() - go progressFileWriterRoutine(ctx, progress, int64(storeCount), h.cfg.ProgressFile) + go progressFileWriterRoutine(ctx, progress, int64(volumeCount), h.cfg.ProgressFile) resolvedTs = h.metaInfo.ClusterInfo.ResolvedTS if totalSize, err = h.doRestore(ctx, progress); err != nil { @@ -226,6 +226,8 @@ func (h *restoreEBSMetaHelper) restoreVolumes(progress glue.Progress) (map[strin volumeIDMap = make(map[string]string) err error totalSize int64 + // a map whose key is available zone, and value is the snapshot id array + snapshotsIDsMap = make(map[string][]*string) ) ec2Session, err = aws.NewEC2Session(h.cfg.CloudAPIConcurrency, h.cfg.S3.Region) if err != nil { @@ -236,7 +238,21 @@ func (h *restoreEBSMetaHelper) restoreVolumes(progress glue.Progress) (map[strin log.Error("failed to create all volumes, cleaning up created volume") ec2Session.DeleteVolumes(volumeIDMap) } + + if h.cfg.UseFSR { + err = ec2Session.DisableDataFSR(snapshotsIDsMap) + log.Error("disable fsr failed", zap.Error(err)) + } }() + + // Turn on FSR for TiKV data snapshots + if h.cfg.UseFSR { + snapshotsIDsMap, err = ec2Session.EnableDataFSR(h.metaInfo, h.cfg.TargetAZ) + if err != nil { + return nil, 0, errors.Trace(err) + } + } + volumeIDMap, err = ec2Session.CreateVolumes(h.metaInfo, string(h.cfg.VolumeType), h.cfg.VolumeIOPS, h.cfg.VolumeThroughput, h.cfg.TargetAZ) if err != nil { From 3682bd82cac05a75241ed9fc9f29a1d320d776a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20van=20Eeden?= Date: Sat, 28 Oct 2023 07:00:34 +0200 Subject: [PATCH 25/33] *: Ignore *.log.json files, like tidb-audit.log.json (#48043) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f12c5532ca8b6..f553c7e473a86 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ bazel-testlogs bazel-tidb .ijwb/ /oom_record/ +*.log.json From 9a4997fb803c06442de9a142fc4d3c6a1f1ddc39 Mon Sep 17 00:00:00 2001 From: Hangjie Mo Date: Mon, 30 Oct 2023 10:42:35 +0800 Subject: [PATCH 26/33] tests: move IT in `executor` to `tests/integrationtest` (PART 7) (#48035) ref pingcap/tidb#47076 --- pkg/executor/insert_test.go | 982 ------------- pkg/executor/parallel_apply_test.go | 146 -- pkg/executor/prepared_test.go | 155 --- pkg/executor/revoke_test.go | 131 -- .../integrationtest/r/executor/insert.result | 1217 +++++++++++++++++ .../r/executor/parallel_apply.result | 172 +++ .../r/executor/prepared.result | 136 ++ .../integrationtest/r/executor/revoke.result | 85 ++ tests/integrationtest/t/executor/insert.test | 926 +++++++++++++ .../t/executor/parallel_apply.test | 140 ++ .../integrationtest/t/executor/prepared.test | 122 ++ tests/integrationtest/t/executor/revoke.test | 108 ++ 12 files changed, 2906 insertions(+), 1414 deletions(-) create mode 100644 tests/integrationtest/r/executor/insert.result create mode 100644 tests/integrationtest/r/executor/parallel_apply.result create mode 100644 tests/integrationtest/r/executor/revoke.result create mode 100644 tests/integrationtest/t/executor/insert.test create mode 100644 tests/integrationtest/t/executor/parallel_apply.test create mode 100644 tests/integrationtest/t/executor/revoke.test diff --git a/pkg/executor/insert_test.go b/pkg/executor/insert_test.go index 6cfa7fb258ad8..523f0ebfcdd1a 100644 --- a/pkg/executor/insert_test.go +++ b/pkg/executor/insert_test.go @@ -16,9 +16,7 @@ package executor_test import ( "fmt" - "math" "strconv" - "strings" "testing" "time" @@ -256,407 +254,6 @@ func testInsertOnDuplicateKey(t *testing.T, tk *testkit.TestKit) { tk.MustQuery(`select * from t1 use index(primary)`).Check(testkit.Rows(`1.0000`)) } -func TestClusterIndexInsertOnDuplicateKey(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists cluster_index_duplicate_entry_error;") - tk.MustExec("create database cluster_index_duplicate_entry_error;") - tk.MustExec("use cluster_index_duplicate_entry_error;") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec("create table t(a char(20), b int, primary key(a));") - tk.MustExec("insert into t values('aa', 1), ('bb', 1);") - tk.MustMatchErrMsg("insert into t values('aa', 2);", ".*Duplicate entry 'aa' for.*") - - tk.MustExec("drop table t;") - tk.MustExec("create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c));") - tk.MustExec("insert into t values ('a', 'b', 'c'), ('b', 'a', 'c');") - tk.MustMatchErrMsg("insert into t values ('a', 'b', 'c');", ".*Duplicate entry 'a-b-c' for.*") -} - -func TestPaddingCommonHandle(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("drop table if exists t1;") - tk.MustExec(`create table t1(c1 decimal(6,4), primary key(c1))`) - tk.MustExec(`insert into t1 set c1 = 0.1`) - tk.MustExec(`insert into t1 set c1 = 0.1 on duplicate key update c1 = 1`) - tk.MustQuery(`select * from t1`).Check(testkit.Rows(`1.0000`)) -} - -func TestInsertReorgDelete(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - inputs := []struct { - typ string - dat string - }{ - {"year", "'2004'"}, - {"year", "2004"}, - {"bit", "1"}, - {"smallint unsigned", "1"}, - {"int unsigned", "1"}, - {"smallint", "-1"}, - {"int", "-1"}, - {"decimal(6,4)", "'1.1'"}, - {"decimal", "1.1"}, - {"numeric", "-1"}, - {"float", "1.2"}, - {"double", "1.2"}, - {"double", "1.3"}, - {"real", "1.4"}, - {"date", "'2020-01-01'"}, - {"time", "'20:00:00'"}, - {"datetime", "'2020-01-01 22:22:22'"}, - {"timestamp", "'2020-01-01 22:22:22'"}, - {"year", "'2020'"}, - {"char(15)", "'test'"}, - {"varchar(15)", "'test'"}, - {"binary(3)", "'a'"}, - {"varbinary(3)", "'b'"}, - {"blob", "'test'"}, - {"text", "'test'"}, - {"enum('a', 'b')", "'a'"}, - {"set('a', 'b')", "'a,b'"}, - } - - for _, i := range inputs { - tk.MustExec(`drop table if exists t1`) - tk.MustExec(fmt.Sprintf(`create table t1(c1 %s)`, i.typ)) - tk.MustExec(fmt.Sprintf(`insert into t1 set c1 = %s`, i.dat)) - switch i.typ { - case "blob", "text": - tk.MustExec(`alter table t1 add index idx(c1(3))`) - default: - tk.MustExec(`alter table t1 add index idx(c1)`) - } - tk.MustExec(`delete from t1`) - tk.MustExec(`admin check table t1`) - } -} - -func TestUpdateDuplicateKey(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table c(i int,j int,k int,primary key(i,j,k));`) - tk.MustExec(`insert into c values(1,2,3);`) - tk.MustExec(`insert into c values(1,2,4);`) - tk.MustGetErrMsg(`update c set i=1,j=2,k=4 where i=1 and j=2 and k=3;`, - "[kv:1062]Duplicate entry '1-2-4' for key 'c.PRIMARY'") -} - -func TestIssue37187(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists a, b") - tk.MustExec("create table t1 (a int(11) ,b varchar(100) ,primary key (a));") - tk.MustExec("create table t2 (c int(11) ,d varchar(100) ,primary key (c));") - tk.MustExec("prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d';") - err := tk.ExecToErr("execute in1;") - require.NoError(t, err) -} - -func TestInsertWrongValueForField(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a bigint);`) - tk.MustGetErrCode(`insert into t1 values("asfasdfsajhlkhlksdaf");`, errno.ErrTruncatedWrongValueForField) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a varchar(10)) charset ascii;`) - tk.MustGetErrCode(`insert into t1 values('我');`, errno.ErrTruncatedWrongValueForField) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a char(10) charset utf8);`) - tk.MustExec(`insert into t1 values('我');`) - tk.MustExec(`alter table t1 add column b char(10) charset ascii as ((a));`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows("我 ?")) - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t (a year);`) - tk.MustGetErrMsg(`insert into t values(2156);`, - "[types:1264]Out of range value for column 'a' at row 1") - - tk.MustExec(`DROP TABLE IF EXISTS ts`) - tk.MustExec(`CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL)`) - tk.MustExec(`SET @@sql_mode=''`) - tk.MustExec(`INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00')`) - tk.MustQuery(`SHOW WARNINGS`).Check(testkit.Rows(`Warning 1292 Incorrect timestamp value: '1018-12-23 00:00:00' for column 'time1' at row 1`)) - tk.MustQuery(`SELECT * FROM ts ORDER BY id`).Check(testkit.Rows(`1 0000-00-00 00:00:00`)) - - tk.MustExec(`SET @@sql_mode='STRICT_TRANS_TABLES'`) - tk.MustGetErrMsg(`INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00')`, `[table:1292]Incorrect timestamp value: '1018-12-24 00:00:00' for column 'time1' at row 1`) - tk.MustExec(`DROP TABLE ts`) - - tk.MustExec(`CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY);`) - tk.MustExec(`INSERT IGNORE INTO t0(c0) VALUES (194626268);`) - tk.MustExec(`INSERT IGNORE INTO t0(c0) VALUES ('*')`) - tk.MustQuery(`SHOW WARNINGS`).Check(testkit.Rows( - `Warning 1366 Incorrect smallint value: '*' for column 'c0' at row 1`, - `Warning 1690 constant 32768 overflows smallint`, - `Warning 1467 Failed to read auto-increment value from storage engine`)) -} - -func TestInsertValueForCastDecimalField(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a decimal(15,2));`) - tk.MustExec(`insert into t1 values (1111111111111.01);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`1111111111111.01`)) - tk.MustQuery(`select cast(a as decimal) from t1;`).Check(testkit.Rows(`9999999999`)) -} - -func TestInsertForMultiValuedIndex(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a json, b int, unique index idx((cast(a as signed array))));`) - tk.MustExec(`insert into t1 values ('[1,11]', 1);`) - tk.MustExec(`insert into t1 values ('[2, 22]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustGetErrMsg(`insert into t1 values ('[2, 222]', 2);`, "[kv:1062]Duplicate entry '2' for key 't1.idx'") - tk.MustExec(`replace into t1 values ('[1, 10]', 10)`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[2, 22] 2`, `[1, 10] 10`)) - tk.MustExec(`replace into t1 values ('[1, 2]', 1)`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 2] 1`)) - tk.MustExec(`replace into t1 values ('[1, 11]', 1)`) - tk.MustExec(`insert into t1 values ('[2, 22]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustExec(`insert ignore into t1 values ('[1]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustExec(`insert ignore into t1 values ('[1, 2]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustExec(`insert into t1 values ('[2]', 2) on duplicate key update b = 10;`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 10`)) - tk.MustGetErrMsg(`insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]';`, "[kv:1062]Duplicate entry '[1, 2]' for key 't1.idx'") - tk.MustGetErrMsg(`insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]';`, "[kv:1062]Duplicate entry '[1, 2]' for key 't1.idx'") - tk.MustGetErrMsg(`insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]';`, "[kv:1062]Duplicate entry '[1, 2]' for key 't1.idx'") -} - -func TestInsertDateTimeWithTimeZone(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec(`use test;`) - tk.MustExec(`set time_zone="+09:00";`) - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP);`) - tk.MustExec(`set TIMESTAMP = 1234;`) - tk.MustExec(`insert t (id) values (1);`) - - tk.MustQuery(`select * from t;`).Check(testkit.Rows( - `1 1970-01-01 09:20:34`, - )) - - // test for ambiguous cases - cases := []struct { - lit string - expect string - }{ - {"2020-10-22", "2020-10-22 00:00:00"}, - {"2020-10-22-16", "2020-10-22 16:00:00"}, - {"2020-10-22 16-31", "2020-10-22 16:31:00"}, - {"2020-10-22 16:31-15", "2020-10-22 16:31:15"}, - {"2020-10-22T16:31:15-10", "2020-10-23 10:31:15"}, - - {"2020.10-22", "2020-10-22 00:00:00"}, - {"2020-10.22-16", "2020-10-22 16:00:00"}, - {"2020-10-22.16-31", "2020-10-22 16:31:00"}, - {"2020-10-22 16.31-15", "2020-10-22 16:31:15"}, - {"2020-10-22T16.31.15+14", "2020-10-22 10:31:15"}, - - {"2020-10:22", "2020-10-22 00:00:00"}, - {"2020-10-22:16", "2020-10-22 16:00:00"}, - {"2020-10-22-16:31", "2020-10-22 16:31:00"}, - {"2020-10-22 16-31:15", "2020-10-22 16:31:15"}, - {"2020-10-22T16.31.15+09:30", "2020-10-22 15:01:15"}, - - {"2020.10-22:16", "2020-10-22 16:00:00"}, - {"2020-10.22-16:31", "2020-10-22 16:31:00"}, - {"2020-10-22.16-31:15", "2020-10-22 16:31:15"}, - {"2020-10-22T16:31.15+09:30", "2020-10-22 15:01:15"}, - } - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t (dt datetime)`) - tk.MustExec(`set @@time_zone='+08:00'`) - for _, ca := range cases { - tk.MustExec(`delete from t`) - tk.MustExec(fmt.Sprintf("insert into t values ('%s')", ca.lit)) - tk.MustQuery(`select * from t`).Check(testkit.Rows(ca.expect)) - } - - // test for time zone change - tzcCases := []struct { - tz1 string - lit string - tz2 string - exp1 string - exp2 string - }{ - {"+08:00", "2020-10-22T16:53:40Z", "+00:00", "2020-10-23 00:53:40", "2020-10-22 16:53:40"}, - {"-08:00", "2020-10-22T16:53:40Z", "+08:00", "2020-10-22 08:53:40", "2020-10-23 00:53:40"}, - {"-03:00", "2020-10-22T16:53:40+03:00", "+08:00", "2020-10-22 10:53:40", "2020-10-22 21:53:40"}, - {"+08:00", "2020-10-22T16:53:40+08:00", "+08:00", "2020-10-22 16:53:40", "2020-10-22 16:53:40"}, - } - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (dt datetime, ts timestamp)") - for _, ca := range tzcCases { - tk.MustExec("delete from t") - tk.MustExec(fmt.Sprintf("set @@time_zone='%s'", ca.tz1)) - tk.MustExec(fmt.Sprintf("insert into t values ('%s', '%s')", ca.lit, ca.lit)) - tk.MustExec(fmt.Sprintf("set @@time_zone='%s'", ca.tz2)) - tk.MustQuery("select * from t").Check(testkit.Rows(ca.exp1 + " " + ca.exp2)) - } - - // test for datetime in compare - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (ts timestamp)") - tk.MustExec("insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z')") - tk.MustQuery("select count(*) from t where ts > '2020-10-22T12:00:00Z'").Check(testkit.Rows("2")) - - // test for datetime with fsp - fspCases := []struct { - fsp uint - lit string - exp1 string - exp2 string - }{ - {2, "2020-10-27T14:39:10.10+00:00", "2020-10-27 22:39:10.10", "2020-10-27 22:39:10.10"}, - {1, "2020-10-27T14:39:10.3+0200", "2020-10-27 20:39:10.3", "2020-10-27 20:39:10.3"}, - {6, "2020-10-27T14:39:10.3-02", "2020-10-28 00:39:10.300000", "2020-10-28 00:39:10.300000"}, - {2, "2020-10-27T14:39:10.10Z", "2020-10-27 22:39:10.10", "2020-10-27 22:39:10.10"}, - } - - tk.MustExec("set @@time_zone='+08:00'") - for _, ca := range fspCases { - tk.MustExec("drop table if exists t") - tk.MustExec(fmt.Sprintf("create table t (dt datetime(%d), ts timestamp(%d))", ca.fsp, ca.fsp)) - tk.MustExec(fmt.Sprintf("insert into t values ('%s', '%s')", ca.lit, ca.lit)) - tk.MustQuery("select * from t").Check(testkit.Rows(ca.exp1 + " " + ca.exp2)) - } -} - -func TestInsertZeroYear(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a year(4));`) - tk.MustExec(`insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79");`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows( - `0`, - `0`, - `0`, - `2000`, - `2000`, - `2000`, - `1979`, - `1979`, - )) - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`) - tk.MustExec(`insert into t values();`) - tk.MustQuery(`select * from t;`).Check(testkit.Rows( - `0`, - )) - tk.MustExec(`insert into t values('0000');`) - tk.MustQuery(`select * from t;`).Check(testkit.Rows( - `0`, - `0`, - )) -} - -func TestAllowInvalidDates(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t1, t2, t3, t4;`) - tk.MustExec(`create table t1(d date);`) - tk.MustExec(`create table t2(d datetime);`) - tk.MustExec(`create table t3(d date);`) - tk.MustExec(`create table t4(d datetime);`) - - runWithMode := func(mode string) { - inputs := []string{"0000-00-00", "2019-00-00", "2019-01-00", "2019-00-01", "2019-02-31"} - results := testkit.Rows(`0 0 0`, `2019 0 0`, `2019 1 0`, `2019 0 1`, `2019 2 31`) - oldMode := tk.MustQuery(`select @@sql_mode`).Rows()[0][0] - defer func() { - tk.MustExec(fmt.Sprintf(`set sql_mode='%s'`, oldMode)) - }() - - tk.MustExec(`truncate t1;truncate t2;truncate t3;truncate t4;`) - tk.MustExec(fmt.Sprintf(`set sql_mode='%s';`, mode)) - for _, input := range inputs { - tk.MustExec(fmt.Sprintf(`insert into t1 values ('%s')`, input)) - tk.MustExec(fmt.Sprintf(`insert into t2 values ('%s')`, input)) - } - tk.MustQuery(`select year(d), month(d), day(d) from t1;`).Check(results) - tk.MustQuery(`select year(d), month(d), day(d) from t2;`).Check(results) - tk.MustExec(`insert t3 select d from t1;`) - tk.MustQuery(`select year(d), month(d), day(d) from t3;`).Check(results) - tk.MustExec(`insert t4 select d from t2;`) - tk.MustQuery(`select year(d), month(d), day(d) from t4;`).Check(results) - } - - runWithMode("STRICT_TRANS_TABLES,ALLOW_INVALID_DATES") - runWithMode("ALLOW_INVALID_DATES") -} - -func TestPartitionInsertOnDuplicate(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000))`) - tk.MustExec(`insert into t1 set a=1, b=1`) - tk.MustExec(`insert into t1 set a=1,b=1 on duplicate key update a=1,b=1`) - tk.MustQuery(`select * from t1`).Check(testkit.Rows("1 1")) - - tk.MustExec(`create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4`) - tk.MustExec(`insert into t2 set a=1,b=1;`) - tk.MustExec(`insert into t2 set a=1,b=1 on duplicate key update a=1,b=1`) - tk.MustQuery(`select * from t2`).Check(testkit.Rows("1 1")) - - tk.MustExec(`CREATE TABLE t3 (a int, b int, c int, d int, e int, - PRIMARY KEY (a,b), - UNIQUE KEY (b,c,d) -) PARTITION BY RANGE ( b ) ( - PARTITION p0 VALUES LESS THAN (4), - PARTITION p1 VALUES LESS THAN (7), - PARTITION p2 VALUES LESS THAN (11) -)`) - tk.MustExec("insert into t3 values (1,2,3,4,5)") - tk.MustExec("insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e)") - tk.MustQuery("select * from t3").Check(testkit.Rows("1 2 3 4 16")) -} - -func TestBit(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t1 (a bit(3))`) - tk.MustMatchErrMsg("insert into t1 values(-1)", ".*Data too long for column 'a' at.*") - tk.MustMatchErrMsg("insert into t1 values(9)", ".*Data too long for column 'a' at.*") - tk.MustExec(`create table t64 (a bit(64))`) - tk.MustExec("insert into t64 values(-1)") - tk.MustExec("insert into t64 values(18446744073709551615)") // 2^64 - 1 - tk.MustMatchErrMsg("insert into t64 values(18446744073709551616)", ".*Out of range value for column 'a' at.*") // z^64 -} - func TestAllocateContinuousRowID(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -694,92 +291,6 @@ func TestAllocateContinuousRowID(t *testing.T) { wg.Wait() } -func TestJiraIssue5366(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table bug (a varchar(100))`) - tk.MustExec(` insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no;`) - tk.MustQuery(`select * from bug`).Sort().Check(testkit.Rows("20180531557", "20190430140319679394")) -} - -func TestDMLCast(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t (a int, b double)`) - tk.MustExec(`insert into t values (ifnull('',0)+0, 0)`) - tk.MustExec(`insert into t values (0, ifnull('',0)+0)`) - tk.MustQuery(`select * from t`).Check(testkit.Rows("0 0", "0 0")) - tk.MustExecToErr(`insert into t values ('', 0)`) - tk.MustExecToErr(`insert into t values (0, '')`) - tk.MustExecToErr(`update t set a = ''`) - tk.MustExecToErr(`update t set b = ''`) - tk.MustExec("update t set a = ifnull('',0)+0") - tk.MustExec("update t set b = ifnull('',0)+0") - tk.MustExec("delete from t where a = ''") - tk.MustQuery(`select * from t`).Check(testkit.Rows()) -} - -func TestInsertFloatOverflow(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t,t1;`) - tk.MustExec("create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2));") - tk.MustGetErrMsg("insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99);", - "[types:1264]Out of range value for column 'col1' at row 1") - tk.MustGetErrMsg("insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99);", - "[types:1264]Out of range value for column 'col2' at row 1") - tk.MustExec("create table t1(id1 float,id2 float)") - tk.MustExec("insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999)") - tk.MustQuery("select @@warning_count").Check(testkit.RowsWithSep("|", "2")) - tk.MustQuery("select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1").Check(testkit.Rows("340282346638528860000000000000000000000 -340282346638528860000000000000000000000")) - tk.MustExec("drop table if exists t,t1") -} - -// Fix https://github.com/pingcap/tidb/issues/32601. -func TestTextTooLongError(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - // Set strict sql_mode - tk.MustExec("set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION';") - - // For max_allowed_packet default value is big enough to ensure tinytext, text can test correctly. - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4);") - tk.MustGetErrMsg("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128));", - "[types:1406]Data too long for column 'c1' at row 1") - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4);") - tk.MustGetErrMsg("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768));", - "[types:1406]Data too long for column 'c1' at row 1") - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 mediumtext);") - tk.MustGetErrMsg("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215));", - "[types:1406]Data too long for column 'c1' at row 1") - - // For long text, max_allowed_packet default value can not allow 4GB package, skip the test case. - - // Set non strict sql_mode, we are not supposed to raise an error but to truncate the value. - tk.MustExec("set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION';") - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4);") - tk.MustExec("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128));") - - tk.MustQuery(`select length(c1) from t1;`).Check(testkit.Rows("254")) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4);") - tk.MustExec("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768));") - tk.MustQuery(`select length(c1) from t1;`).Check(testkit.Rows("65534")) - // For mediumtext or bigger size, for tikv limit, we will get:ERROR 8025 (HY000): entry too large, the max entry size is 6291456, the size of data is 16777247, no need to test. -} - func TestAutoRandomID(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -892,201 +403,6 @@ func TestAutoRandomIDAllowZero(t *testing.T) { tk.MustExec(`drop table ar`) } - -func TestAutoRandomIDExplicit(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@allow_auto_random_explicit_insert = true") - - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists ar`) - tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) - - tk.MustExec(`insert into ar(id) values (1)`) - tk.MustQuery(`select id from ar`).Check(testkit.Rows("1")) - tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows("0")) - tk.MustExec(`delete from ar`) - - tk.MustExec(`insert into ar(id) values (1), (2)`) - tk.MustQuery(`select id from ar`).Check(testkit.Rows("1", "2")) - tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows("0")) - tk.MustExec(`delete from ar`) - - tk.MustExec(`drop table ar`) -} - -func TestInsertErrorMsg(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t (a int primary key, b datetime, d date)`) - tk.MustContainErrMsg(`insert into t values (1, '2019-02-11 30:00:00', '2019-01-31')`, - "Incorrect datetime value: '2019-02-11 30:00:00' for column 'b' at row 1") - - // test for Issue #35289 - tk.MustExec("CREATE TABLE t1 (a BINARY(16) PRIMARY KEY);") - tk.MustExec(`INSERT INTO t1 VALUES (AES_ENCRYPT('a','a'));`) - err := tk.ExecToErr(`INSERT INTO t1 VALUES (AES_ENCRYPT('a','a'));`) - require.Error(t, err, `ERROR 1062 (23000): Duplicate entry '{ W]\xA1\x06u\x9D\xBD\xB1\xA3.\xE2\xD9\xA7t' for key 't1.PRIMARY'`) - - tk.MustExec(`INSERT INTO t1 VALUES (AES_ENCRYPT('b','b'));`) - err = tk.ExecToErr(`INSERT INTO t1 VALUES (AES_ENCRYPT('b','b'));`) - require.Error(t, err, "ERROR 1062 (23000): Duplicate entry '\\x0C\\x1E\\x8DG`\\xEB\\x93 F&BC\\xF0\\xB5\\xF4\\xB7' for key 't1.PRIMARY'") - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (a bit primary key) engine=innodb;") - tk.MustExec("insert into t1 values (b'0');") - err = tk.ExecToErr(`insert into t1 values (b'0');`) - require.Error(t, err, `ERROR 1062 (23000): Duplicate entry '\x00' for key 't1.PRIMARY'`) -} - -func TestIssue16366(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test;`) - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t(c numeric primary key);`) - tk.MustExec("insert ignore into t values(null);") - tk.MustContainErrMsg(`insert into t values(0);`, "Duplicate entry '0' for key 't.PRIMARY'") -} - -func TestClusterPrimaryTablePlainInsert(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists t1pk`) - tk.MustExec(`create table t1pk(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into t1pk(id, v) values('abc', 1)`) - tk.MustQuery(`select * from t1pk`).Check(testkit.Rows("abc 1")) - tk.MustExec(`set @@tidb_constraint_check_in_place=true`) - tk.MustGetErrCode(`insert into t1pk(id, v) values('abc', 2)`, errno.ErrDupEntry) - tk.MustExec(`set @@tidb_constraint_check_in_place=false`) - tk.MustGetErrCode(`insert into t1pk(id, v) values('abc', 3)`, errno.ErrDupEntry) - tk.MustQuery(`select v, id from t1pk`).Check(testkit.Rows("1 abc")) - tk.MustQuery(`select id from t1pk where id = 'abc'`).Check(testkit.Rows("abc")) - tk.MustQuery(`select v, id from t1pk where id = 'abc'`).Check(testkit.Rows("1 abc")) - - tk.MustExec(`drop table if exists t3pk`) - tk.MustExec(`create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3))`) - tk.MustExec(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1)`) - tk.MustQuery(`select * from t3pk`).Check(testkit.Rows("abc xyz 1 100")) - tk.MustExec(`set @@tidb_constraint_check_in_place=true`) - tk.MustGetErrCode(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2)`, errno.ErrDupEntry) - tk.MustExec(`set @@tidb_constraint_check_in_place=false`) - tk.MustGetErrCode(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3)`, errno.ErrDupEntry) - tk.MustQuery(`select v, id3, id2, id1 from t3pk`).Check(testkit.Rows("1 100 xyz abc")) - tk.MustQuery(`select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'`).Check(testkit.Rows("100 xyz abc")) - tk.MustQuery(`select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'`).Check(testkit.Rows("100 xyz abc 1")) - tk.MustExec(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1)`) - tk.MustExec(`insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1)`) - - tk.MustExec(`drop table if exists t1pku`) - tk.MustExec(`create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk))`) - tk.MustExec(`insert into t1pku(id, uk, v) values('abc', 1, 2)`) - tk.MustQuery(`select * from t1pku where id = 'abc'`).Check(testkit.Rows("abc 1 2")) - tk.MustGetErrCode(`insert into t1pku(id, uk, v) values('aaa', 1, 3)`, errno.ErrDupEntry) - tk.MustQuery(`select * from t1pku`).Check(testkit.Rows("abc 1 2")) - - tk.MustQuery(`select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101))`). - Check(testkit.Rows("abc xyz 1 100", "abc xyz 1 101", "abc zzz 1 101")) -} - -func TestClusterPrimaryTableInsertIgnore(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists it1pk`) - tk.MustExec(`create table it1pk(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into it1pk(id, v) values('abc', 1)`) - tk.MustExec(`insert ignore into it1pk(id, v) values('abc', 2)`) - tk.MustQuery(`select * from it1pk where id = 'abc'`).Check(testkit.Rows("abc 1")) - - tk.MustExec(`drop table if exists it2pk`) - tk.MustExec(`create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2))`) - tk.MustExec(`insert into it2pk(id1, id2, v) values('abc', 'cba', 1)`) - tk.MustQuery(`select * from it2pk where id1 = 'abc' and id2 = 'cba'`).Check(testkit.Rows("abc cba 1")) - tk.MustExec(`insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2)`) - tk.MustQuery(`select * from it2pk where id1 = 'abc' and id2 = 'cba'`).Check(testkit.Rows("abc cba 1")) - - tk.MustExec(`drop table if exists it1pku`) - tk.MustExec(`create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk))`) - tk.MustExec(`insert into it1pku(id, uk, v) values('abc', 1, 2)`) - tk.MustQuery(`select * from it1pku where id = 'abc'`).Check(testkit.Rows("abc 1 2")) - tk.MustExec(`insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1)`) - tk.MustQuery(`select * from it1pku`).Check(testkit.Rows("abc 1 2", "bbb 2 1")) -} - -func TestClusterPrimaryTableInsertDuplicate(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists dt1pi`) - tk.MustExec(`create table dt1pi(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into dt1pi(id, v) values('abb', 1),('acc', 2)`) - tk.MustExec(`insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1`) - tk.MustQuery(`select * from dt1pi`).Check(testkit.Rows("abb 2", "acc 2")) - tk.MustExec(`insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'`) - tk.MustQuery(`select * from dt1pi`).Check(testkit.Rows("acc 2", "xxx 3")) - - tk.MustExec(`drop table if exists dt1piu`) - tk.MustExec(`create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk))`) - tk.MustExec(`insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20)`) - tk.MustExec(`insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1`) - tk.MustQuery(`select * from dt1piu`).Check(testkit.Rows("abb 1 11", "acc 2 20")) - tk.MustExec(`insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'`) - tk.MustQuery(`select * from dt1piu`).Check(testkit.Rows("acc 2 20", "xxx 1 12")) - - tk.MustExec(`drop table if exists ts1pk`) - tk.MustExec(`create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2))`) - ts := "2018-01-01 11:11:11" - tk.MustExec(`insert into ts1pk (id1, id2, v) values(?, ?, ?)`, ts, ts, 1) - tk.MustQuery(`select id1, id2, v from ts1pk`).Check(testkit.Rows("2018-01-01 11:11:11 2018-01-01 11:11:11 1")) - tk.MustExec(`insert into ts1pk (id1, id2, v) values(?, ?, ?) on duplicate key update v = values(v)`, ts, ts, 2) - tk.MustQuery(`select id1, id2, v from ts1pk`).Check(testkit.Rows("2018-01-01 11:11:11 2018-01-01 11:11:11 2")) - tk.MustExec(`insert into ts1pk (id1, id2, v) values(?, ?, ?) on duplicate key update v = values(v), id1 = ?`, ts, ts, 2, "2018-01-01 11:11:12") - tk.MustQuery(`select id1, id2, v from ts1pk`).Check(testkit.Rows("2018-01-01 11:11:12 2018-01-01 11:11:11 2")) -} - -func TestClusterPrimaryKeyForIndexScan(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec("drop table if exists pkt1;") - tk.MustExec("CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b));") - tk.MustExec("insert into pkt1 values ('aaa',1);") - tk.MustQuery(`select b from pkt1 where b = 1;`).Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists pkt2;") - tk.MustExec("CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b));") - tk.MustExec("insert into pkt2 values ('aaa',1);") - tk.MustQuery(`select b from pkt2 where b = 1;`).Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists issue_18232;") - tk.MustExec("create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c));") - - iter, cnt := combination([]string{"a", "b", "c", "d"}), 0 - for { - comb := iter() - if comb == nil { - break - } - selField := strings.Join(comb, ",") - sql := fmt.Sprintf("select %s from issue_18232 use index (idx);", selField) - tk.MustExec(sql) - cnt++ - } - require.Equal(t, 15, cnt) -} - func TestInsertRuntimeStat(t *testing.T) { stats := &executor.InsertRuntimeStat{ BasicRuntimeStats: &execdetails.BasicRuntimeStats{}, @@ -1169,180 +485,6 @@ func TestDuplicateEntryMessage(t *testing.T) { } } -func TestIssue20768(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a year, primary key(a))") - tk.MustExec("insert ignore into t1 values(null)") - tk.MustExec("create table t2(a int, key(a))") - tk.MustExec("insert into t2 values(0)") - tk.MustQuery("select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) -} - -func TestIssue10402(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table vctt (v varchar(4), c char(4))") - tk.MustExec("insert into vctt values ('ab ', 'ab ')") - tk.MustQuery("select * from vctt").Check(testkit.Rows("ab ab")) - tk.MustExec("delete from vctt") - tk.Session().GetSessionVars().StmtCtx.SetWarnings(nil) - tk.MustExec("insert into vctt values ('ab\\n\\n\\n', 'ab\\n\\n\\n'), ('ab\\t\\t\\t', 'ab\\t\\t\\t'), ('ab ', 'ab '), ('ab\\r\\r\\r', 'ab\\r\\r\\r')") - require.Equal(t, uint16(4), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - warns := tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Equal(t, "[{Warning [types:1265]Data truncated for column 'v' at row 1} {Warning [types:1265]Data truncated for column 'v' at row 2} {Warning [types:1265]Data truncated for column 'v' at row 3} {Warning [types:1265]Data truncated for column 'v' at row 4}]", - fmt.Sprintf("%v", warns)) - tk.MustQuery("select * from vctt").Check(testkit.Rows("ab\n\n ab\n\n", "ab\t\t ab\t\t", "ab ab", "ab\r\r ab\r\r")) - tk.MustQuery("select length(v), length(c) from vctt").Check(testkit.Rows("4 4", "4 4", "4 2", "4 4")) -} - -func combination(items []string) func() []string { - current := 1 - buf := make([]string, len(items)) - return func() []string { - if current >= int(math.Pow(2, float64(len(items)))) { - return nil - } - buf = buf[:0] - for i, e := range items { - if (1<", "")) } -func TestSetTiDBEnableParallelApply(t *testing.T) { - // validate the tidb_enable_parallel_apply's value - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=0") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("0")) - tk.MustExec("set tidb_enable_parallel_apply=1") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("1")) - tk.MustExec("set tidb_enable_parallel_apply=on") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("1")) - tk.MustExec("set tidb_enable_parallel_apply=off") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("0")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply=-1")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply=2")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply=1000")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply='onnn'")) -} - func TestMultipleApply(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -404,118 +383,6 @@ func TestApplyWithOtherOperators(t *testing.T) { tk.MustQuery(sql).Sort().Check(testkit.Rows("1")) } -func TestApplyWithOtherFeatures(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=true") - - // collation 1 - tk.MustExec("drop table if exists t, t1") - tk.MustExec("create table t(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int)") - tk.MustExec("create table t1(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int)") - tk.MustExec("insert into t values ('a', 1), ('A', 2), ('a', 3), ('A', 4)") - tk.MustExec("insert into t1 values ('a', 1), ('A', 2), ('a', 3), ('A', 4)") - sql := "select (select min(t1.b) from t1 where t1.a >= t.a), (select sum(t1.b) from t1 where t1.a >= t.a) from t" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 10", "1 10", "1 10", "1 10")) - - // collation 2 - sql = "select (select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b), (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) from t" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 10", "2 9", "3 7", "4 4")) - collate.SetNewCollationEnabledForTest(false) - defer collate.SetNewCollationEnabledForTest(true) - - // plan cache - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t1 values (1, 1), (1, 5), (2, 3), (2, 4), (3, 3)") - tk.MustExec("insert into t2 values (0, 1), (2, -1), (3, 2)") - tk.MustExec(`prepare stmt from "select * from t1 where t1.b >= (select sum(t2.b) from t2 where t2.a > t1.a and t2.a > ?)"`) - tk.MustExec("set @a=1") - tk.MustQuery("execute stmt using @a").Sort().Check(testkit.Rows("1 1", "1 5", "2 3", "2 4")) - tk.MustExec("set @a=2") - tk.MustQuery("execute stmt using @a").Sort().Check(testkit.Rows("1 5", "2 3", "2 4")) - tk.MustQuery(" select @@last_plan_from_cache").Check(testkit.Rows("0")) // sub-queries are not cacheable - - // cluster index - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a int, b int, c int, primary key(a, b))") - tk.MustExec("create table t2(a int, b int, c int, primary key(a, c))") - tk.MustExec("insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)") - tk.MustExec("insert into t2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)") - sql = "select * from t where (select min(t2.b) from t2 where t2.a > t.a) > 0" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 1 1", "2 2 2", "3 3 3")) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - - // partitioning table - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int) partition by range(a) (partition p0 values less than(10), partition p1 values less than(20), partition p2 values less than(30), partition p3 values less than(40))") - tk.MustExec("create table t2(a int, b int) partition by hash(a) partitions 4") - tk.MustExec("insert into t1 values (5, 5), (15, 15), (25, 25), (35, 35)") - tk.MustExec("insert into t2 values (5, 5), (15, 15), (25, 25), (35, 35)") - sql = "select (select count(*) from t2 where t2.a > t1.b and t2.a=20), (select max(t2.b) from t2 where t2.a between t1.a and 20) from t1 where t1.a > 10" - tk.MustQuery(sql).Sort().Check(testkit.Rows("0 15", "0 ", "0 ")) -} - -func TestApplyInDML(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=true") - - // delete - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a bigint, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4)") - tk.MustExec("insert into t2 values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4)") - tk.MustExec("delete from t where (select min(t2.a) * 2 from t2 where t2.a < t.a) > 1") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1 1", "1 1")) - - // insert - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, c int)") - tk.MustExec("insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 1, 1), (2, 2, 2), (3, 3, 3)") - tk.MustExec("insert into t (select * from t where (select count(*) from t t1 where t1.b > t.a) > 2)") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1 1 1", "1 1 1", "1 1 1", "1 1 1", "2 2 2", "2 2 2", "3 3 3", "3 3 3")) - - // update - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a smallint, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3)") - tk.MustExec("insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3)") - tk.MustExec("update t set a = a + 1 where (select count(*) from t2 where t2.a <= t.a) in (1, 2)") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("2 1", "2 1", "2 2", "2 2", "3 3", "3 3")) - - // replace - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a tinyint, b int, unique index idx(a))") - tk.MustExec("create table t2(a tinyint, b int)") - tk.MustExec("insert into t values (1, 1), (2, 2), (3, 3), (4, 4)") - tk.MustExec("insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3)") - tk.MustExec("replace into t (select pow(t2.a, 2), t2.b from t2 where (select min(t.a) from t where t.a > t2.a) between 1 and 5)") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1 1", "2 2", "3 3", "4 2", "9 3")) - - // Transaction - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t1 values (1, 2), (1, 3)") - tk.MustExec("begin") - tk.MustExec("insert into t1 values (1, 4), (2, 3), (2, 5)") - tk.MustExec("insert into t2 values (2, 3), (3, 4)") - sql := "select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b)" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 4", "2 5")) - tk.MustExec("delete from t1 where a = 1") - tk.MustQuery(sql).Sort().Check(testkit.Rows("2 5")) - tk.MustExec("commit") - tk.MustQuery(sql).Sort().Check(testkit.Rows("2 5")) -} - func TestApplyConcurrency(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -622,16 +489,3 @@ func TestApplyGoroutinePanic(t *testing.T) { require.NoError(t, failpoint.Disable(panicPath)) } } - -func TestIssue24930(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=true") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int)") - tk.MustExec("create table t2(a int)") - tk.MustQuery(`select case when t1.a is null - then (select t2.a from t2 where t2.a = t1.a limit 1) else t1.a end a - from t1 where t1.a=1 order by a limit 1`).Check(testkit.Rows()) // can return an empty result instead of hanging forever -} diff --git a/pkg/executor/prepared_test.go b/pkg/executor/prepared_test.go index 0cba4e45bebc8..c65e9151655ec 100644 --- a/pkg/executor/prepared_test.go +++ b/pkg/executor/prepared_test.go @@ -23,10 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" - "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" @@ -34,75 +32,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestPreparedNameResolver(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (id int, KEY id (id))") - tk.MustExec("prepare stmt from 'select * from t limit ? offset ?'") - tk.MustGetErrMsg("prepare stmt from 'select b from t'", - "[planner:1054]Unknown column 'b' in 'field list'") - tk.MustGetErrMsg("prepare stmt from '(select * FROM t) union all (select * FROM t) order by a limit ?'", - "[planner:1054]Unknown column 'a' in 'order clause'") -} - -// a 'create table' DDL statement should be accepted if it has no parameters. -func TestPreparedDDL(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("prepare stmt from 'create table t (id int, KEY id (id))'") -} - -// TestUnsupportedStmtForPrepare is related to https://github.com/pingcap/tidb/issues/17412 -func TestUnsupportedStmtForPrepare(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`prepare stmt0 from "create table t0(a int primary key)"`) - tk.MustGetErrCode(`prepare stmt1 from "execute stmt0"`, mysql.ErrUnsupportedPs) - tk.MustGetErrCode(`prepare stmt2 from "deallocate prepare stmt0"`, mysql.ErrUnsupportedPs) - tk.MustGetErrCode(`prepare stmt4 from "prepare stmt3 from 'create table t1(a int, b int)'"`, mysql.ErrUnsupportedPs) -} - -func TestIgnorePlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - - tk.MustExec("create table t (id int primary key, num int)") - tk.MustExec("insert into t values (1, 1)") - tk.MustExec("insert into t values (2, 2)") - tk.MustExec("insert into t values (3, 3)") - tk.MustExec("prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t where id=?'") - tk.MustExec("set @ignore_plan_doma = 1") - tk.MustExec("execute stmt using @ignore_plan_doma") - require.False(t, tk.Session().GetSessionVars().StmtCtx.UseCache) -} - -func TestPreparedStmtWithHint(t *testing.T) { - // see https://github.com/pingcap/tidb/issues/18535 - store, dom := testkit.CreateMockStoreAndDomain(t) - sv := server.CreateMockServer(t, store) - sv.SetDomain(dom) - defer sv.Close() - - conn1 := server.CreateMockConn(t, sv) - tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) - - go dom.ExpensiveQueryHandle().SetSessionManager(sv).Run() - tk.MustExec("prepare stmt from \"select /*+ max_execution_time(100) */ sleep(10)\"") - tk.MustQuery("execute stmt").Check(testkit.Rows("1")) - - // see https://github.com/pingcap/tidb/issues/46817 - tk.MustExec("use test") - tk.MustExec("create table if not exists t (i int)") - tk.MustExec("prepare stmt from 'with a as (select /*+ qb_name(qb1) */ * from t) select /*+ leading(@qb1)*/ * from a;'") -} - func TestPreparedNullParam(t *testing.T) { store := testkit.CreateMockStore(t) flags := []bool{false, true} @@ -832,23 +761,6 @@ func TestPlanCacheOperators(t *testing.T) { } } -func TestIssue28782(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from 'SELECT IF(?, 1, 0);';") - tk.MustExec("set @a=1, @b=null, @c=0") - - tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) - // TODO(Reminiscent): Support cache more tableDual plan. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt using @c;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - func TestIssue29101(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -919,39 +831,6 @@ func TestIssue29101(t *testing.T) { tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache } -func TestIssue28087And28162(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - // issue 28087 - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists IDT_26207`) - tk.MustExec(`CREATE TABLE IDT_26207 (col1 bit(1))`) - tk.MustExec(`insert into IDT_26207 values(0x0), (0x1)`) - tk.MustExec(`prepare stmt from 'select t1.col1 from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'`) - tk.MustExec(`set @a=0x01, @b=0x01, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x01")) - tk.MustExec(`set @a=0x00, @b=0x00, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x00", "\x01")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) - - // issue 28162 - tk.MustExec(`drop table if exists IDT_MC21780`) - tk.MustExec(`CREATE TABLE IDT_MC21780 ( - COL1 timestamp NULL DEFAULT NULL, - COL2 timestamp NULL DEFAULT NULL, - COL3 timestamp NULL DEFAULT NULL, - KEY U_M_COL (COL1,COL2) - )`) - tk.MustExec(`insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28")`) - tk.MustExec(`prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'`) - tk.MustExec(`set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows()) - tk.MustExec(`set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows("1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) -} - func TestParameterPushDown(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1147,27 +1026,6 @@ func TestPreparePlanCache4DifferentSystemVars(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) } -func TestTemporaryTable4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists tmp2") - tk.MustExec("create temporary table tmp2 (a int, b int, key(a), key(b));") - tk.MustExec("prepare stmt from 'select * from tmp2;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("drop table if exists tmp_t;") - tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows") - tk.MustExec("prepare stmt from 'select * from tmp_t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -1246,19 +1104,6 @@ func TestPreparePC4Binding(t *testing.T) { tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) } -func TestIssue31141(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("set @@tidb_txn_mode = 'pessimistic'") - - // No panic here. - tk.MustExec("prepare stmt1 from 'do 1'") - - tk.MustExec("set @@tidb_txn_mode = 'optimistic'") - tk.MustExec("prepare stmt1 from 'do 1'") -} - func TestMaxPreparedStmtCount(t *testing.T) { oldVal := atomic.LoadInt64(&variable.PreparedStmtCount) atomic.StoreInt64(&variable.PreparedStmtCount, 0) diff --git a/pkg/executor/revoke_test.go b/pkg/executor/revoke_test.go index 0cfb9e006c5d7..e8dbaf2836a31 100644 --- a/pkg/executor/revoke_test.go +++ b/pkg/executor/revoke_test.go @@ -21,9 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) @@ -177,132 +175,3 @@ func TestRevokeColumnScope(t *testing.T) { rows := tk.MustQuery(`SELECT Column_priv FROM mysql.Columns_priv WHERE User="testCol1Revoke" and host="localhost" and db="test" and Table_name="test3"`).Rows() require.Len(t, rows, 0) } - -// ref issue #38421 -func TestRevokeTableSingle(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - // Create a new user. - tk.MustExec(`CREATE USER test;`) - tk.MustExec(`CREATE TABLE test.test1(c1 int);`) - tk.MustExec(`GRANT SELECT ON test.test1 TO test;`) - - tk.MustExec(`REVOKE SELECT ON test.test1 from test;`) - - rows := tk.MustQuery(`SELECT Column_priv FROM mysql.tables_priv WHERE User="test" `).Rows() - require.Len(t, rows, 0) -} - -// ref issue #38421(column fix) -func TestRevokeTableSingleColumn(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - // Create a new user. - tk.MustExec(`CREATE USER test;`) - tk.MustExec(`GRANT SELECT(Host) ON mysql.db TO test`) - tk.MustExec(`GRANT SELECT(DB) ON mysql.db TO test`) - tk.MustExec(`REVOKE SELECT(Host) ON mysql.db FROM test`) - - rows := tk.MustQuery(`SELECT Column_priv FROM mysql.columns_priv WHERE User="test" and Column_name ='Host' `).Rows() - require.Len(t, rows, 0) - rows = tk.MustQuery(`SELECT Column_priv FROM mysql.columns_priv WHERE User="test" and Column_name ='DB' `).Rows() - require.Len(t, rows, 1) -} - -func TestRevokeDynamicPrivs(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("DROP USER if exists dyn") - tk.MustExec("create user dyn") - - tk.MustExec("GRANT BACKUP_Admin ON *.* TO dyn") // grant one priv - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N")) - - // try revoking only on test.* - should fail: - _, err := tk.Exec("REVOKE BACKUP_Admin,system_variables_admin ON test.* FROM dyn") - require.True(t, terror.ErrorEqual(err, exeerrors.ErrIllegalPrivilegeLevel)) - - // privs should still be intact: - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N")) - // with correct usage, the privilege is revoked - tk.MustExec("REVOKE BACKUP_Admin ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows()) - - // Revoke bogus is a warning in MySQL - tk.MustExec("REVOKE bogus ON *.* FROM dyn") - tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 3929 Dynamic privilege 'BOGUS' is not registered with the server.")) - - // grant and revoke two dynamic privileges at once. - tk.MustExec("GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* TO dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N", "dyn % SYSTEM_VARIABLES_ADMIN N")) - tk.MustExec("REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows()) - - // revoke a combination of dynamic + non-dynamic - tk.MustExec("GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* TO dyn") - tk.MustExec("REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows()) - - // revoke grant option from privileges - tk.MustExec("GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT ON *.* TO dyn WITH GRANT OPTION") - tk.MustExec("REVOKE BACKUP_ADMIN, SELECT, GRANT OPTION ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % SYSTEM_VARIABLES_ADMIN Y")) -} - -func TestRevokeOnNonExistTable(t *testing.T) { - // issue #28533 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("CREATE DATABASE d1;") - defer tk.MustExec("DROP DATABASE IF EXISTS d1;") - tk.MustExec("USE d1;") - tk.MustExec("CREATE TABLE t1 (a int)") - defer tk.MustExec("DROP TABLE IF EXISTS t1") - tk.MustExec("CREATE USER issue28533") - defer tk.MustExec("DROP USER issue28533") - - // GRANT ON existent table success - tk.MustExec("GRANT ALTER ON d1.t1 TO issue28533;") - // GRANT ON non-existent table success - tk.MustExec("GRANT INSERT, CREATE ON d1.t2 TO issue28533;") - - // REVOKE ON non-existent table success - tk.MustExec("DROP TABLE t1;") - tk.MustExec("REVOKE ALTER ON d1.t1 FROM issue28533;") -} - -// Check https://github.com/pingcap/tidb/issues/41773. -func TestIssue41773(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table if not exists xx (id int)") - tk.MustExec("CREATE USER 't1234'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU';") - tk.MustExec("GRANT USAGE ON * TO 't1234'@'%';") - tk.MustExec("GRANT USAGE ON test.* TO 't1234'@'%';") - tk.MustExec("GRANT USAGE ON test.xx TO 't1234'@'%';") - tk.MustExec("REVOKE USAGE ON * FROM 't1234'@'%';") - tk.MustExec("REVOKE USAGE ON test.* FROM 't1234'@'%';") - tk.MustExec("REVOKE USAGE ON test.xx FROM 't1234'@'%';") -} - -// Check https://github.com/pingcap/tidb/issues/41048 -func TestCaseInsensitiveSchemaNames(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`CREATE TABLE test.TABLE_PRIV(id int, name varchar(20));`) - // Verify the case-insensitive updates for mysql.tables_priv table. - tk.MustExec(`GRANT SELECT ON test.table_priv TO 'root'@'%';`) - tk.MustExec(`revoke SELECT ON test.TABLE_PRIV from 'root'@'%';;`) - - // Verify the case-insensitive updates for mysql.db table. - tk.MustExec(`GRANT SELECT ON test.* TO 'root'@'%';`) - tk.MustExec(`revoke SELECT ON tESt.* from 'root'@'%';;`) - - // Verify the case-insensitive updates for mysql.columns_priv table. - tk.MustExec(`GRANT SELECT (id), INSERT (ID, name) ON tEst.TABLE_PRIV TO 'root'@'%';`) - tk.MustExec(`REVOKE SELECT (ID) ON test.taBle_priv from 'root'@'%';;`) -} diff --git a/tests/integrationtest/r/executor/insert.result b/tests/integrationtest/r/executor/insert.result new file mode 100644 index 0000000000000..b45d43d6129da --- /dev/null +++ b/tests/integrationtest/r/executor/insert.result @@ -0,0 +1,1217 @@ +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +insert into t values('aa', 2); +Error 1062 (23000): Duplicate entry 'aa' for key 't.PRIMARY' +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +insert into t values ('a', 'b', 'c'); +Error 1062 (23000): Duplicate entry 'a-b-c' for key 't.PRIMARY' +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +c1 +1.0000 +set tidb_enable_clustered_index = default; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; +Error 1062 (23000): Duplicate entry '1-2-4' for key 'c.PRIMARY' +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; + +drop table if exists t1; +create table t1(a bigint); +insert into t1 values("asfasdfsajhlkhlksdaf"); +Error 1366 (HY000): Incorrect bigint value: 'asfasdfsajhlkhlksdaf' for column 'a' at row 1 +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +insert into t1 values('我'); +Error 1366 (HY000): Incorrect string value '\xE6\x88\x91' for column 'a' +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +a b +我 ? +drop table if exists t; +create table t (a year); +insert into t values(2156); +Error 1264 (22003): Out of range value for column 'a' at row 1 +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +Level Code Message +Warning 1292 Incorrect timestamp value: '1018-12-23 00:00:00' for column 'time1' at row 1 +SELECT * FROM ts ORDER BY id; +id time1 +1 0000-00-00 00:00:00 +SET @@sql_mode='STRICT_TRANS_TABLES'; +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +Error 1292 (22007): Incorrect timestamp value: '1018-12-24 00:00:00' for column 'time1' at row 1 +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect smallint value: '*' for column 'c0' at row 1 +Warning 1690 constant 32768 overflows smallint +Warning 1467 Failed to read auto-increment value from storage engine +SET @@sql_mode=default; +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +a +1111111111111.01 +select cast(a as decimal) from t1; +cast(a as decimal) +9999999999 +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2, 222]', 2); +Error 1062 (23000): Duplicate entry '2' for key 't1.idx' +replace into t1 values ('[1, 10]', 10); +select * from t1; +a b +[2, 22] 2 +[1, 10] 10 +replace into t1 values ('[1, 2]', 1); +select * from t1; +a b +[1, 2] 1 +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +a b +[1, 11] 1 +[2, 22] 10 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +id c1 +1 1970-01-01 09:20:34 +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +dt +2020-10-23 10:31:15 +delete from t; +insert into t values ('2020.10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +dt +2020-10-22 10:31:15 +delete from t; +insert into t values ('2020-10:22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +dt ts +2020-10-23 00:53:40 2020-10-22 16:53:40 +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 08:53:40 2020-10-23 00:53:40 +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 10:53:40 2020-10-22 21:53:40 +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 16:53:40 2020-10-22 16:53:40 +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +count(*) +2 +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +dt ts +2020-10-27 20:39:10.3 2020-10-27 20:39:10.3 +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +dt ts +2020-10-28 00:39:10.300000 2020-10-28 00:39:10.300000 +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +set time_zone=default; +set timestamp=default; +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +a +0000 +0000 +0000 +2000 +2000 +2000 +1979 +1979 +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +f_year +0000 +insert into t values('0000'); +select * from t; +f_year +0000 +0000 +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +truncate t1; +truncate t2; +truncate t3; +truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +set sql_mode=default; +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +a b +1 1 +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +a b +1 1 +CREATE TABLE t3 (a int, b int, c int, d int, e int, +PRIMARY KEY (a,b), +UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( +PARTITION p0 VALUES LESS THAN (4), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; +a b c d e +1 2 3 4 16 +drop table if exists t1; +create table t1 (a bit(3)); +insert into t1 values(-1); +Error 1406 (22001): Data too long for column 'a' at row 1 +insert into t1 values(9); +Error 1406 (22001): Data too long for column 'a' at row 1 +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +insert into t64 values(18446744073709551616); +Error 1264 (22003): Out of range value for column 'a' at row 1 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +select * from bug; +a +20180531557 +20190430140319679394 +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +a b +0 0 +0 0 +insert into t values ('', 0); +Error 1366 (HY000): Incorrect int value: '' for column 'a' at row 1 +insert into t values (0, ''); +Error 1366 (HY000): Incorrect double value: '' for column 'b' at row 1 +update t set a = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set b = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; +a b +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col1' at row 1 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col2' at row 1 +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +@@warning_count +2 +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; +convert(id1,decimal(65)) convert(id2,decimal(65)) +340282346638528860000000000000000000000 -340282346638528860000000000000000000000 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +length(c1) +254 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +length(c1) +65534 +set sql_mode = default; +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +id +1 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +id +1 +2 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +Error 1292 (22007): Incorrect datetime value: '2019-02-11 30:00:00' for column 'b' at row 1 +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +Error 1062 (23000): Duplicate entry '{ W]\xA1\x06u\x9D\xBD\xB1\xA3.\xE2\xD9\xA7t' for key 't1.PRIMARY' +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +Error 1062 (23000): Duplicate entry '\x0C\x1E\x8DG`\xEB\x93 F&BC\xF0\xB5\xF4\xB7' for key 't1.PRIMARY' +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +insert into t1 values (b'0'); +Error 1062 (23000): Duplicate entry '\x00' for key 't1.PRIMARY' +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +insert into t values(0); +Error 1062 (23000): Duplicate entry '0' for key 't.PRIMARY' +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +id v +abc 1 +set @@tidb_constraint_check_in_place=true; +insert into t1pk(id, v) values('abc', 2); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t1pk(id, v) values('abc', 3); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +select v, id from t1pk; +v id +1 abc +select id from t1pk where id = 'abc'; +id +abc +select v, id from t1pk where id = 'abc'; +v id +1 abc +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +id1 id2 v id3 +abc xyz 1 100 +set @@tidb_constraint_check_in_place=true; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +select v, id3, id2, id1 from t3pk; +v id3 id2 id1 +1 100 xyz abc +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 +100 xyz abc +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 v +100 xyz abc 1 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +id uk v +abc 1 2 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +Error 1062 (23000): Duplicate entry '1' for key 't1pku.ukk' +select * from t1pku; +id uk v +abc 1 2 +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +id1 id2 v id3 +abc xyz 1 100 +abc xyz 1 101 +abc zzz 1 101 +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +id v +abc 1 +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +id uk v +abc 1 2 +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +id uk v +abc 1 2 +bbb 2 1 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +id v +abb 2 +acc 2 +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +id v +acc 2 +xxx 3 +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +id uk v +abb 1 11 +acc 2 20 +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +id uk v +acc 2 20 +xxx 1 12 +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 1 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 2 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:12 2018-01-01 11:11:11 2 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +b +1 +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +b +1 +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +a +select b from issue_18232 use index (idx); +b +select a,b from issue_18232 use index (idx); +a b +select c from issue_18232 use index (idx); +c +select a,c from issue_18232 use index (idx); +a c +select b,c from issue_18232 use index (idx); +b c +select a,b,c from issue_18232 use index (idx); +a b c +select d from issue_18232 use index (idx); +d +select a,d from issue_18232 use index (idx); +a d +select b,d from issue_18232 use index (idx); +b d +select a,b,d from issue_18232 use index (idx); +a b d +select c,d from issue_18232 use index (idx); +c d +select a,c,d from issue_18232 use index (idx); +a c d +select b,c,d from issue_18232 use index (idx); +b c d +select a,b,c,d from issue_18232 use index (idx); +a b c d +set tidb_enable_clustered_index = default; +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +v c +ab ab +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +Level Code Message +Warning 1265 Data truncated for column 'v' at row 1 +Warning 1265 Data truncated for column 'v' at row 2 +Warning 1265 Data truncated for column 'v' at row 3 +Warning 1265 Data truncated for column 'v' at row 4 +select * from vctt; +v c +ab + + ab + + +ab ab +ab ab +ab ab +select length(v), length(c) from vctt; +length(v) length(c) +4 4 +4 4 +4 2 +4 4 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +insert into t1 values(1,'aaaaa'); +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'aaa'; +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'bb'; +insert into t1 select 1, 'bb'; +Error 1062 (23000): Duplicate entry '1-bb' for key 't1.PRIMARY' +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; +insert into temp_test(id) values(0); +select * from temp_test; +id +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +commit; +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +3 +4 +commit; +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +10 +11 +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +10 +11 +20 +30 +31 +32 +commit; +drop table if exists temp_test; +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +commit; +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +3 +4 +commit; +drop table if exists temp_test; +drop table if exists t1; +create table t1(c1 date); +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +c1 +2020-02-31 +set @@sql_mode='STRICT_TRANS_TABLES'; +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set sql_mode=default; +drop table if exists t; +create table t (id decimal(10)); +insert into t values('1sdf'); +Error 1366 (HY000): Incorrect decimal value: '1sdf' for column 'id' at row 1 +insert into t values('1edf'); +Error 1366 (HY000): Incorrect decimal value: '1edf' for column 'id' at row 1 +insert into t values('12Ea'); +Error 1366 (HY000): Incorrect decimal value: '12Ea' for column 'id' at row 1 +insert into t values('1E'); +Error 1366 (HY000): Incorrect decimal value: '1E' for column 'id' at row 1 +insert into t values('1e'); +Error 1366 (HY000): Incorrect decimal value: '1e' for column 'id' at row 1 +insert into t values('1.2A'); +Error 1366 (HY000): Incorrect decimal value: '1.2A' for column 'id' at row 1 +insert into t values('1.2.3.4.5'); +Error 1366 (HY000): Incorrect decimal value: '1.2.3.4.5' for column 'id' at row 1 +insert into t values('1.2.'); +Error 1366 (HY000): Incorrect decimal value: '1.2.' for column 'id' at row 1 +insert into t values('1,999.00'); +Error 1366 (HY000): Incorrect decimal value: '1,999.00' for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +Level Code Message +Warning 1366 Incorrect decimal value: '12e-3' for column 'id' at row 1 +select id from t; +id +0 +drop table if exists t; +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +Error 1467 (HY000): Failed to read auto-increment value from storage engine +set sql_mode=default; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +a +0 +0 +0 +DROP TABLE t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +cast(t1.c1 as decimal(4, 1)) +999.9 +select cast(t1.c1 as decimal(5, 1)) from t1; +cast(t1.c1 as decimal(5, 1)) +1000.0 +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +cast(t1.c1 as decimal(5, 3)) +99.999 +select cast(t1.c1 as decimal(6, 3)) from t1; +cast(t1.c1 as decimal(6, 3)) +100.000 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +insert into t1 values(1, '1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +select id, a from t1; +id a +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +select id, a from t1 order by id asc; +id a +1 2147483647 +2 -2147483648 +set sql_mode=default; +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +insert into tf values('-100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +a +0 +set @@sql_mode=default; diff --git a/tests/integrationtest/r/executor/parallel_apply.result b/tests/integrationtest/r/executor/parallel_apply.result new file mode 100644 index 0000000000000..a7a53dd84d730 --- /dev/null +++ b/tests/integrationtest/r/executor/parallel_apply.result @@ -0,0 +1,172 @@ +set tidb_enable_parallel_apply=0; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +0 +set tidb_enable_parallel_apply=1; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +1 +set tidb_enable_parallel_apply=on; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +1 +set tidb_enable_parallel_apply=off; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +0 +set tidb_enable_parallel_apply=-1; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of '-1' +set tidb_enable_parallel_apply=2; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of '2' +set tidb_enable_parallel_apply=1000; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of '1000' +set tidb_enable_parallel_apply='onnn'; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of 'onnn' +set tidb_enable_parallel_apply=default; +set tidb_enable_parallel_apply=true; +drop table if exists t, t1; +create table t(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +create table t1(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +insert into t values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +insert into t1 values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +select (select min(t1.b) from t1 where t1.a >= t.a), (select sum(t1.b) from t1 where t1.a >= t.a) from t; +(select min(t1.b) from t1 where t1.a >= t.a) (select sum(t1.b) from t1 where t1.a >= t.a) +1 10 +1 10 +1 10 +1 10 +select (select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b), (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) from t; +(select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) +1 10 +2 9 +3 7 +4 4 +set tidb_enable_prepared_plan_cache=1; +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 1), (1, 5), (2, 3), (2, 4), (3, 3); +insert into t2 values (0, 1), (2, -1), (3, 2); +prepare stmt from "select * from t1 where t1.b >= (select sum(t2.b) from t2 where t2.a > t1.a and t2.a > ?)"; +set @a=1; +execute stmt using @a; +a b +1 1 +1 5 +2 3 +2 4 +set @a=2; +execute stmt using @a; +a b +1 5 +2 3 +2 4 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_clustered_index=ON; +drop table if exists t, t2; +create table t(a int, b int, c int, primary key(a, b)); +create table t2(a int, b int, c int, primary key(a, c)); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +insert into t2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +select * from t where (select min(t2.b) from t2 where t2.a > t.a) > 0; +a b c +1 1 1 +2 2 2 +3 3 3 +set tidb_enable_clustered_index=INT_ONLY; +drop table if exists t1, t2; +create table t1(a int, b int) partition by range(a) (partition p0 values less than(10), partition p1 values less than(20), partition p2 values less than(30), partition p3 values less than(40)); +create table t2(a int, b int) partition by hash(a) partitions 4; +insert into t1 values (5, 5), (15, 15), (25, 25), (35, 35); +insert into t2 values (5, 5), (15, 15), (25, 25), (35, 35); +select (select count(*) from t2 where t2.a > t1.b and t2.a=20), (select max(t2.b) from t2 where t2.a between t1.a and 20) from t1 where t1.a > 10; +(select count(*) from t2 where t2.a > t1.b and t2.a=20) (select max(t2.b) from t2 where t2.a between t1.a and 20) +0 NULL +0 NULL +0 15 +set tidb_enable_parallel_apply=default; +set tidb_enable_prepared_plan_cache=default; +set tidb_enable_clustered_index=default; +set tidb_enable_parallel_apply=true; +drop table if exists t, t2; +create table t(a bigint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +delete from t where (select min(t2.a) * 2 from t2 where t2.a < t.a) > 1; +select * from t; +a b +1 1 +1 1 +drop table if exists t; +create table t(a int, b int, c int); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 1, 1), (2, 2, 2), (3, 3, 3); +insert into t (select * from t where (select count(*) from t t1 where t1.b > t.a) > 2); +select * from t; +a b c +1 1 1 +1 1 1 +1 1 1 +1 1 1 +2 2 2 +2 2 2 +3 3 3 +3 3 3 +drop table if exists t, t2; +create table t(a smallint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +update t set a = a + 1 where (select count(*) from t2 where t2.a <= t.a) in (1, 2); +select * from t; +a b +2 1 +2 1 +2 2 +2 2 +3 3 +3 3 +drop table if exists t, t2; +create table t(a tinyint, b int, unique index idx(a)); +create table t2(a tinyint, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +replace into t (select pow(t2.a, 2), t2.b from t2 where (select min(t.a) from t where t.a > t2.a) between 1 and 5); +select * from t; +a b +1 1 +2 2 +3 3 +4 2 +9 3 +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 2), (1, 3); +begin; +insert into t1 values (1, 4), (2, 3), (2, 5); +insert into t2 values (2, 3), (3, 4); +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +a b +1 4 +2 5 +delete from t1 where a = 1; +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +a b +2 5 +commit; +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +a b +2 5 +set tidb_enable_parallel_apply=default; +set tidb_enable_parallel_apply=true; +drop table if exists t1, t2; +create table t1(a int); +create table t2(a int); +select case when t1.a is null +then (select t2.a from t2 where t2.a = t1.a limit 1) else t1.a end a +from t1 where t1.a=1 order by a limit 1; +a +set tidb_enable_parallel_apply=default; diff --git a/tests/integrationtest/r/executor/prepared.result b/tests/integrationtest/r/executor/prepared.result index ded5242354bf4..fc2ccce32e8a1 100644 --- a/tests/integrationtest/r/executor/prepared.result +++ b/tests/integrationtest/r/executor/prepared.result @@ -179,3 +179,139 @@ data 1.100 11.110 set @@tidb_enable_prepared_plan_cache=default; +drop table if exists t; +create table t (id int, KEY id (id)); +prepare stmt from 'select * from t limit ? offset ?'; +prepare stmt from 'select b from t'; +Error 1054 (42S22): Unknown column 'b' in 'field list' +prepare stmt from '(select * FROM t) union all (select * FROM t) order by a limit ?'; +Error 1054 (42S22): Unknown column 'a' in 'order clause' +drop table if exists t; +prepare stmt from 'create table t (id int, KEY id (id))'; +prepare stmt0 from "create table t0(a int primary key)"; +prepare stmt1 from "execute stmt0"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +prepare stmt2 from "deallocate prepare stmt0"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +prepare stmt4 from "prepare stmt3 from 'create table t1(a int, b int)'"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +drop table if exists t; +create table t (id int primary key, num int); +insert into t values (1, 1); +insert into t values (2, 2); +insert into t values (3, 3); +prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t where id=?'; +set @ignore_plan_doma = 1; +execute stmt using @ignore_plan_doma; +id num +1 1 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +prepare stmt from "select /*+ max_execution_time(10) */ sleep(3)"; +set @a=now(); +execute stmt; +sleep(3) +1 +select timediff(now(), @a) < 3; +timediff(now(), @a) < 3 +1 +set @a=now(); +select /*+ max_execution_time(10) */ sleep(3); +sleep(3) +1 +select timediff(now(), @a) < 3; +timediff(now(), @a) < 3 +1 +drop table if exists t; +create table t (i int); +prepare stmt from 'with a as (select /*+ qb_name(qb1) */ * from t) select /*+ leading(@qb1)*/ * from a;'; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +prepare stmt from 'SELECT IF(?, 1, 0);'; +set @a=1, @b=null, @c=0; +execute stmt using @a; +IF(?, 1, 0) +1 +execute stmt using @b; +IF(?, 1, 0) +0 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt using @c; +IF(?, 1, 0) +0 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +drop table if exists IDT_26207; +CREATE TABLE IDT_26207 (col1 bit(1)); +insert into IDT_26207 values(0x0), (0x1); +prepare stmt from 'select hex(t1.col1) from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'; +set @a=0x01, @b=0x01, @c=0x01; +execute stmt using @a,@b,@c; +hex(t1.col1) +1 +set @a=0x00, @b=0x00, @c=0x01; +execute stmt using @a,@b,@c; +hex(t1.col1) +0 +1 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +drop table if exists IDT_MC21780; +CREATE TABLE IDT_MC21780 ( +COL1 timestamp NULL DEFAULT NULL, +COL2 timestamp NULL DEFAULT NULL, +COL3 timestamp NULL DEFAULT NULL, +KEY U_M_COL (COL1,COL2) +); +insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28"); +prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'; +set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"; +execute stmt using @a,@b,@c,@d; +COL1 COL2 COL3 COL1 COL2 COL3 +set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"; +execute stmt using @a,@b,@c,@d; +COL1 COL2 COL3 COL1 COL2 COL3 +1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +set tidb_enable_prepared_plan_cache=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists tmp2; +create temporary table tmp2 (a int, b int, key(a), key(b)); +prepare stmt from 'select * from tmp2;'; +execute stmt; +a b +execute stmt; +a b +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +drop table if exists tmp_t; +create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows; +prepare stmt from 'select * from tmp_t;'; +execute stmt; +id a b +execute stmt; +id a b +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_txn_mode = 'pessimistic'; +prepare stmt1 from 'do 1'; +set @@tidb_txn_mode = 'optimistic'; +prepare stmt1 from 'do 1'; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_txn_mode=default; diff --git a/tests/integrationtest/r/executor/revoke.result b/tests/integrationtest/r/executor/revoke.result new file mode 100644 index 0000000000000..2ba92d9cf193c --- /dev/null +++ b/tests/integrationtest/r/executor/revoke.result @@ -0,0 +1,85 @@ +drop user if exists test; +drop table if exists test1; +CREATE USER test; +CREATE TABLE executor__revoke.test1(c1 int); +GRANT SELECT ON executor__revoke.test1 TO test; +REVOKE SELECT ON executor__revoke.test1 from test; +SELECT Column_priv FROM mysql.tables_priv WHERE User="test" ; +Column_priv +drop user if exists test; +CREATE USER test; +GRANT SELECT(Host) ON mysql.db TO test; +GRANT SELECT(DB) ON mysql.db TO test; +REVOKE SELECT(Host) ON mysql.db FROM test; +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='Host' ; +count(Column_priv) +0 +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='DB' ; +count(Column_priv) +1 +DROP USER if exists dyn; +create user dyn; +GRANT BACKUP_Admin ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +REVOKE BACKUP_Admin,system_variables_admin ON executor__revoke.* FROM dyn; +Error 3619 (HY000): Illegal privilege level specified for BACKUP_ADMIN,SYSTEM_VARIABLES_ADMIN +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +REVOKE BACKUP_Admin ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +REVOKE bogus ON *.* FROM dyn; +SHOW WARNINGS; +Level Code Message +Warning 3929 Dynamic privilege 'BOGUS' is not registered with the server. +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +dyn % SYSTEM_VARIABLES_ADMIN N +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* TO dyn; +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT ON *.* TO dyn WITH GRANT OPTION; +REVOKE BACKUP_ADMIN, SELECT, GRANT OPTION ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % SYSTEM_VARIABLES_ADMIN Y +drop DATABASE if exists d1; +drop user if exists issue28533; +CREATE DATABASE d1; +USE d1; +CREATE TABLE t1 (a int); +CREATE USER issue28533; +GRANT ALTER ON d1.t1 TO issue28533; +GRANT INSERT, CREATE ON d1.t2 TO issue28533; +DROP TABLE t1; +REVOKE ALTER ON d1.t1 FROM issue28533; +DROP USER issue28533; +DROP TABLE IF EXISTS t1; +DROP DATABASE IF EXISTS d1; +use executor__revoke; +drop user if exists 't1234'@'%'; +create table if not exists xx (id int); +CREATE USER 't1234'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU'; +GRANT USAGE ON * TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.* TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.xx TO 't1234'@'%'; +REVOKE USAGE ON * FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.* FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.xx FROM 't1234'@'%'; +drop table if exists TABLE_PRIV; +CREATE TABLE executor__revoke.TABLE_PRIV(id int, name varchar(20)); +GRANT SELECT ON executor__revoke.table_priv TO 'root'@'%'; +revoke SELECT ON executor__revoke.TABLE_PRIV from 'root'@'%'; +GRANT SELECT ON executor__revoke.* TO 'root'@'%'; +revoke SELECT ON executor__revoke.* from 'root'@'%'; +GRANT SELECT (id), INSERT (ID, name) ON executor__revoke.TABLE_PRIV TO 'root'@'%'; +REVOKE SELECT (ID) ON executor__revoke.taBle_priv from 'root'@'%'; diff --git a/tests/integrationtest/t/executor/insert.test b/tests/integrationtest/t/executor/insert.test new file mode 100644 index 0000000000000..89c9c16cd9269 --- /dev/null +++ b/tests/integrationtest/t/executor/insert.test @@ -0,0 +1,926 @@ +# TestClusterIndexInsertOnDuplicateKey +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +-- error 1062 +insert into t values('aa', 2); +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +-- error 1062 +insert into t values ('a', 'b', 'c'); +set tidb_enable_clustered_index = default; + +# TestPaddingCommonHandle +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +set tidb_enable_clustered_index = default; + +# TestInsertReorgDelete +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +# TestUpdateDuplicateKey +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +-- error 1062 +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; + +# TestIssue37187 +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; + +# TestInsertWrongValueForField +drop table if exists t1; +create table t1(a bigint); +-- error 1366 +insert into t1 values("asfasdfsajhlkhlksdaf"); +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +-- error 1366 +insert into t1 values('我'); +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +drop table if exists t; +create table t (a year); +-- error 1264 +insert into t values(2156); +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +SELECT * FROM ts ORDER BY id; +SET @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +SET @@sql_mode=default; + +# TestInsertValueForCastDecimalField +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +select cast(a as decimal) from t1; + +# TestInsertForMultiValuedIndex +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +-- error 1062 +insert into t1 values ('[2, 222]', 2); +replace into t1 values ('[1, 10]', 10); +select * from t1; +replace into t1 values ('[1, 2]', 1); +select * from t1; +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +insert ignore into t1 values ('[1]', 2); +select * from t1; +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +-- error 1062 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; + +# TestInsertDateTimeWithTimeZone +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +delete from t; +insert into t values ('2020.10-22'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +delete from t; +insert into t values ('2020-10:22'); +select * from t; +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +set time_zone=default; +set timestamp=default; + +# TestInsertZeroYear +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +insert into t values('0000'); +select * from t; + +# TestAllowInvalidDates +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; + +truncate t1;truncate t2;truncate t3;truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +set sql_mode=default; + +# TestPartitionInsertOnDuplicate +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +CREATE TABLE t3 (a int, b int, c int, d int, e int, + PRIMARY KEY (a,b), + UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( + PARTITION p0 VALUES LESS THAN (4), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; + +# TestBit +drop table if exists t1; +create table t1 (a bit(3)); +-- error 1406 +insert into t1 values(-1); +-- error 1406 +insert into t1 values(9); +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +-- error 1264 +insert into t64 values(18446744073709551616); + +# TestJiraIssue5366 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +--sorted_result +select * from bug; + +# TestDMLCast +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +-- error 1366 +insert into t values ('', 0); +-- error 1366 +insert into t values (0, ''); +-- error 1292 +update t set a = ''; +-- error 1292 +update t set b = ''; +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; + +# TestInsertFloatOverflow +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +-- error 1264 +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +-- error 1264 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; + +# TestTextTooLongError +# Fix https://github.com/pingcap/tidb/issues/32601 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +# For max_allowed_packet default value is big enough to ensure tinytext, text can test correctly +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +# For long text, max_allowed_packet default value can not allow 4GB package, skip the test case. +# Set non strict sql_mode, we are not supposed to raise an error but to truncate the value. +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +# For mediumtext or bigger size, for tikv limit, we will get:ERROR 8025 (HY000): entry too large, the max entry size is 6291456, the size of data is 16777247, no need to test. +set sql_mode = default; + +# TestAutoRandomIDExplicit +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +select last_insert_id(); +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +select last_insert_id(); +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; + +# TestInsertErrorMsg +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +-- error 1292 +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +-- error 1062 +insert into t1 values (b'0'); + +# TestIssue16366 +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +-- error 1062 +insert into t values(0); + +# TestClusterPrimaryTablePlainInsert +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t1pk(id, v) values('abc', 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t1pk(id, v) values('abc', 3); +select v, id from t1pk; +select id from t1pk where id = 'abc'; +select v, id from t1pk where id = 'abc'; +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +select v, id3, id2, id1 from t3pk; +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +-- error 1062 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +select * from t1pku; +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertIgnore +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertDuplicate +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryKeyForIndexScan +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +select b from issue_18232 use index (idx); +select a,b from issue_18232 use index (idx); +select c from issue_18232 use index (idx); +select a,c from issue_18232 use index (idx); +select b,c from issue_18232 use index (idx); +select a,b,c from issue_18232 use index (idx); +select d from issue_18232 use index (idx); +select a,d from issue_18232 use index (idx); +select b,d from issue_18232 use index (idx); +select a,b,d from issue_18232 use index (idx); +select c,d from issue_18232 use index (idx); +select a,c,d from issue_18232 use index (idx); +select b,c,d from issue_18232 use index (idx); +select a,b,c,d from issue_18232 use index (idx); +set tidb_enable_clustered_index = default; + +# TestIssue20768 +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; + +# TestIssue10402 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +select * from vctt; +select length(v), length(c) from vctt; + +# TestDuplicatedEntryErr +# See https://github.com/pingcap/tidb/issues/24582 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 select 1, 'aaa'; +insert into t1 select 1, 'bb'; +-- error 1062 +insert into t1 select 1, 'bb'; + +# TestBinaryLiteralInsertToEnum +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestBinaryLiteralInsertToSet +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestGlobalTempTableAutoInc +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select * from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +## Test whether auto-inc is incremental +insert into temp_test(id) values(0); +select id from temp_test order by id; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; + +## rebase +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; +drop table if exists temp_test; + +# TestGlobalTempTableRowID +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +## Test whether row id is incremental +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; +drop table if exists temp_test; + +# TestIssue26762 +drop table if exists t1; +create table t1(c1 date); +-- error 1292 +insert into t1 values('2020-02-31'); +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +set @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +insert into t1 values('2020-02-31'); +set sql_mode=default; + +# TestStringtoDecimal +drop table if exists t; +create table t (id decimal(10)); +-- error 1366 +insert into t values('1sdf'); +-- error 1366 +insert into t values('1edf'); +-- error 1366 +insert into t values('12Ea'); +-- error 1366 +insert into t values('1E'); +-- error 1366 +insert into t values('1e'); +-- error 1366 +insert into t values('1.2A'); +-- error 1366 +insert into t values('1.2.3.4.5'); +-- error 1366 +insert into t values('1.2.'); +-- error 1366 +insert into t values('1,999.00'); +## TODO: MySQL8.0 reports Note 1265 Data truncated for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +select id from t; +drop table if exists t; + +# TestReplaceAllocatingAutoID +# https://github.com/pingcap/tidb/issues/29483 +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +## Note that this error is different from MySQL's duplicated primary key error +-- error 1467 +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +set sql_mode=default; + +# TestInsertIntoSelectError +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +DROP TABLE t1; + +# TestIssue32213 +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +select cast(t1.c1 as decimal(5, 1)) from t1; +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +select cast(t1.c1 as decimal(6, 3)) from t1; + +# TestInsertBigScientificNotation +# https://github.com/pingcap/tidb/issues/47787 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +-- error 1264 +insert into t1 values(1, '1e100'); +-- error 1264 +insert into t1 values(2, '-1e100'); +select id, a from t1; +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +insert into t1 values(2, '-1e100'); +show warnings; +select id, a from t1 order by id asc; +set sql_mode=default; + +# TestUnsignedDecimalFloatInsertNegative +# https://github.com/pingcap/tidb/issues/47945 +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +-- error 1264 +insert into tf values('-100'); +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +set @@sql_mode=default; + diff --git a/tests/integrationtest/t/executor/parallel_apply.test b/tests/integrationtest/t/executor/parallel_apply.test new file mode 100644 index 0000000000000..5cb79755f4397 --- /dev/null +++ b/tests/integrationtest/t/executor/parallel_apply.test @@ -0,0 +1,140 @@ +# TestSetTiDBEnableParallelApply +set tidb_enable_parallel_apply=0; +select @@tidb_enable_parallel_apply; +set tidb_enable_parallel_apply=1; +select @@tidb_enable_parallel_apply; +set tidb_enable_parallel_apply=on; +select @@tidb_enable_parallel_apply; +set tidb_enable_parallel_apply=off; +select @@tidb_enable_parallel_apply; +-- error 1231 +set tidb_enable_parallel_apply=-1; +-- error 1231 +set tidb_enable_parallel_apply=2; +-- error 1231 +set tidb_enable_parallel_apply=1000; +-- error 1231 +set tidb_enable_parallel_apply='onnn'; +set tidb_enable_parallel_apply=default; + +# TestApplyWithOtherFeatures +set tidb_enable_parallel_apply=true; +drop table if exists t, t1; +create table t(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +create table t1(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +insert into t values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +insert into t1 values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +--sorted_result +select (select min(t1.b) from t1 where t1.a >= t.a), (select sum(t1.b) from t1 where t1.a >= t.a) from t; +--sorted_result +select (select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b), (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) from t; + +## plan cache +set tidb_enable_prepared_plan_cache=1; +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 1), (1, 5), (2, 3), (2, 4), (3, 3); +insert into t2 values (0, 1), (2, -1), (3, 2); +prepare stmt from "select * from t1 where t1.b >= (select sum(t2.b) from t2 where t2.a > t1.a and t2.a > ?)"; +set @a=1; +--sorted_result +execute stmt using @a; +set @a=2; +--sorted_result +execute stmt using @a; +select @@last_plan_from_cache; + +## cluster index +set tidb_enable_clustered_index=ON; +drop table if exists t, t2; +create table t(a int, b int, c int, primary key(a, b)); +create table t2(a int, b int, c int, primary key(a, c)); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +insert into t2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +--sorted_result +select * from t where (select min(t2.b) from t2 where t2.a > t.a) > 0; +set tidb_enable_clustered_index=INT_ONLY; + +## partitioning table +drop table if exists t1, t2; +create table t1(a int, b int) partition by range(a) (partition p0 values less than(10), partition p1 values less than(20), partition p2 values less than(30), partition p3 values less than(40)); +create table t2(a int, b int) partition by hash(a) partitions 4; +insert into t1 values (5, 5), (15, 15), (25, 25), (35, 35); +insert into t2 values (5, 5), (15, 15), (25, 25), (35, 35); +--sorted_result +select (select count(*) from t2 where t2.a > t1.b and t2.a=20), (select max(t2.b) from t2 where t2.a between t1.a and 20) from t1 where t1.a > 10; + +set tidb_enable_parallel_apply=default; +set tidb_enable_prepared_plan_cache=default; +set tidb_enable_clustered_index=default; + +# TestApplyInDML +set tidb_enable_parallel_apply=true; + +## delete +drop table if exists t, t2; +create table t(a bigint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +delete from t where (select min(t2.a) * 2 from t2 where t2.a < t.a) > 1; +--sorted_result +select * from t; + +## insert +drop table if exists t; +create table t(a int, b int, c int); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 1, 1), (2, 2, 2), (3, 3, 3); +insert into t (select * from t where (select count(*) from t t1 where t1.b > t.a) > 2); +--sorted_result +select * from t; + +## update +drop table if exists t, t2; +create table t(a smallint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +update t set a = a + 1 where (select count(*) from t2 where t2.a <= t.a) in (1, 2); +--sorted_result +select * from t; + +## replace +drop table if exists t, t2; +create table t(a tinyint, b int, unique index idx(a)); +create table t2(a tinyint, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +replace into t (select pow(t2.a, 2), t2.b from t2 where (select min(t.a) from t where t.a > t2.a) between 1 and 5); +--sorted_result +select * from t; + +## transaction +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 2), (1, 3); +begin; +insert into t1 values (1, 4), (2, 3), (2, 5); +insert into t2 values (2, 3), (3, 4); +--sorted_result +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +delete from t1 where a = 1; +--sorted_result +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +commit; +--sorted_result +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +set tidb_enable_parallel_apply=default; + +# TestIssue24930 +set tidb_enable_parallel_apply=true; +drop table if exists t1, t2; +create table t1(a int); +create table t2(a int); +select case when t1.a is null + then (select t2.a from t2 where t2.a = t1.a limit 1) else t1.a end a + from t1 where t1.a=1 order by a limit 1; +set tidb_enable_parallel_apply=default; + diff --git a/tests/integrationtest/t/executor/prepared.test b/tests/integrationtest/t/executor/prepared.test index fe0868aee6662..273f1529ee73d 100644 --- a/tests/integrationtest/t/executor/prepared.test +++ b/tests/integrationtest/t/executor/prepared.test @@ -108,3 +108,125 @@ set @b = 11.11; execute stmt using @b; select * from t; set @@tidb_enable_prepared_plan_cache=default; + +# TestPreparedNameResolver +drop table if exists t; +create table t (id int, KEY id (id)); +prepare stmt from 'select * from t limit ? offset ?'; +-- error 1054 +prepare stmt from 'select b from t'; +-- error 1054 +prepare stmt from '(select * FROM t) union all (select * FROM t) order by a limit ?'; + +# TestPreparedDDL +# a 'create table' DDL statement should be accepted if it has no parameters. +drop table if exists t; +prepare stmt from 'create table t (id int, KEY id (id))'; + +# TestUnsupportedStmtForPrepare +# https://github.com/pingcap/tidb/issues/17412 +prepare stmt0 from "create table t0(a int primary key)"; +-- error 1295 +prepare stmt1 from "execute stmt0"; +-- error 1295 +prepare stmt2 from "deallocate prepare stmt0"; +-- error 1295 +prepare stmt4 from "prepare stmt3 from 'create table t1(a int, b int)'"; + +# TestIgnorePlanCache +drop table if exists t; +create table t (id int primary key, num int); +insert into t values (1, 1); +insert into t values (2, 2); +insert into t values (3, 3); +prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t where id=?'; +set @ignore_plan_doma = 1; +execute stmt using @ignore_plan_doma; +select @@last_plan_from_cache; + +# TestPreparedStmtWithHint +## https://github.com/pingcap/tidb/issues/18535 +prepare stmt from "select /*+ max_execution_time(10) */ sleep(3)"; +set @a=now(); +execute stmt; +select timediff(now(), @a) < 3; +set @a=now(); +select /*+ max_execution_time(10) */ sleep(3); +select timediff(now(), @a) < 3; + +## see https://github.com/pingcap/tidb/issues/46817 +drop table if exists t; +create table t (i int); +prepare stmt from 'with a as (select /*+ qb_name(qb1) */ * from t) select /*+ leading(@qb1)*/ * from a;'; + +# TestIssue28782 +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +prepare stmt from 'SELECT IF(?, 1, 0);'; +set @a=1, @b=null, @c=0; +execute stmt using @a; +execute stmt using @b; +## TODO(Reminiscent): Support cache more tableDual plan. +select @@last_plan_from_cache; +execute stmt using @c; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestIssue28087And28162 +set tidb_enable_prepared_plan_cache=1; +## issue 28087 +drop table if exists IDT_26207; +CREATE TABLE IDT_26207 (col1 bit(1)); +insert into IDT_26207 values(0x0), (0x1); +prepare stmt from 'select hex(t1.col1) from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'; +set @a=0x01, @b=0x01, @c=0x01; +execute stmt using @a,@b,@c; +set @a=0x00, @b=0x00, @c=0x01; +execute stmt using @a,@b,@c; +select @@last_plan_from_cache; + +## issue 28162 +drop table if exists IDT_MC21780; +CREATE TABLE IDT_MC21780 ( + COL1 timestamp NULL DEFAULT NULL, + COL2 timestamp NULL DEFAULT NULL, + COL3 timestamp NULL DEFAULT NULL, + KEY U_M_COL (COL1,COL2) +); +insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28"); +prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'; +set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"; +execute stmt using @a,@b,@c,@d; +set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"; +execute stmt using @a,@b,@c,@d; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; + +# TestTemporaryTable4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists tmp2; +create temporary table tmp2 (a int, b int, key(a), key(b)); +prepare stmt from 'select * from tmp2;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +drop table if exists tmp_t; +create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows; +prepare stmt from 'select * from tmp_t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestIssue31141 +set tidb_enable_prepared_plan_cache=1; +set @@tidb_txn_mode = 'pessimistic'; +prepare stmt1 from 'do 1'; +set @@tidb_txn_mode = 'optimistic'; +prepare stmt1 from 'do 1'; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_txn_mode=default; + diff --git a/tests/integrationtest/t/executor/revoke.test b/tests/integrationtest/t/executor/revoke.test new file mode 100644 index 0000000000000..734f548a5d4e2 --- /dev/null +++ b/tests/integrationtest/t/executor/revoke.test @@ -0,0 +1,108 @@ +# TestRevokeTableSingle +# ref issue #38421 +drop user if exists test; +drop table if exists test1; +CREATE USER test; +CREATE TABLE executor__revoke.test1(c1 int); +GRANT SELECT ON executor__revoke.test1 TO test; +REVOKE SELECT ON executor__revoke.test1 from test; +SELECT Column_priv FROM mysql.tables_priv WHERE User="test" ; + +# TestRevokeTableSingleColumn +# ref issue #38421(column fix) +drop user if exists test; +CREATE USER test; +GRANT SELECT(Host) ON mysql.db TO test; +GRANT SELECT(DB) ON mysql.db TO test; +REVOKE SELECT(Host) ON mysql.db FROM test; +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='Host' ; +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='DB' ; + +# TestRevokeDynamicPrivs +DROP USER if exists dyn; +create user dyn; +GRANT BACKUP_Admin ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## try revoking only on test.* - should fail: +-- error 3619 +REVOKE BACKUP_Admin,system_variables_admin ON executor__revoke.* FROM dyn; + +## privs should still be intact: +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +## with correct usage, the privilege is revoked +REVOKE BACKUP_Admin ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## Revoke bogus is a warning in MySQL +REVOKE bogus ON *.* FROM dyn; +SHOW WARNINGS; + +## grant and revoke two dynamic privileges at once. +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## revoke a combination of dynamic + non-dynamic +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* TO dyn; +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## revoke grant option from privileges +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT ON *.* TO dyn WITH GRANT OPTION; +REVOKE BACKUP_ADMIN, SELECT, GRANT OPTION ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +# TestRevokeOnNonExistTable +# issue #28533 +drop DATABASE if exists d1; +drop user if exists issue28533; +CREATE DATABASE d1; +USE d1; +CREATE TABLE t1 (a int); +CREATE USER issue28533; + +## GRANT ON existent table success +GRANT ALTER ON d1.t1 TO issue28533; + +## GRANT ON non-existent table success +GRANT INSERT, CREATE ON d1.t2 TO issue28533; + +## REVOKE ON non-existent table success +DROP TABLE t1; +REVOKE ALTER ON d1.t1 FROM issue28533; + +DROP USER issue28533; +DROP TABLE IF EXISTS t1; +DROP DATABASE IF EXISTS d1; +use executor__revoke; + +# TestIssue41773 +drop user if exists 't1234'@'%'; +create table if not exists xx (id int); +CREATE USER 't1234'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU'; +GRANT USAGE ON * TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.* TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.xx TO 't1234'@'%'; +REVOKE USAGE ON * FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.* FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.xx FROM 't1234'@'%'; + +# TestCaseInsensitiveSchemaNames +# Check https://github.com/pingcap/tidb/issues/41048 +drop table if exists TABLE_PRIV; +CREATE TABLE executor__revoke.TABLE_PRIV(id int, name varchar(20)); + +## Verify the case-insensitive updates for mysql.tables_priv table. +GRANT SELECT ON executor__revoke.table_priv TO 'root'@'%'; +revoke SELECT ON executor__revoke.TABLE_PRIV from 'root'@'%'; + +## Verify the case-insensitive updates for mysql.db table. +GRANT SELECT ON executor__revoke.* TO 'root'@'%'; +revoke SELECT ON executor__revoke.* from 'root'@'%'; + +## Verify the case-insensitive updates for mysql.columns_priv table. +GRANT SELECT (id), INSERT (ID, name) ON executor__revoke.TABLE_PRIV TO 'root'@'%'; +REVOKE SELECT (ID) ON executor__revoke.taBle_priv from 'root'@'%'; + From 173e4856108f4163922f018c909051af7a7c379f Mon Sep 17 00:00:00 2001 From: s-shiraki <54130718+highpon@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:21:37 +0900 Subject: [PATCH 27/33] errno: fix typo for overflow message (#48059) close pingcap/tidb#47943 --- errors.toml | 2 +- pkg/errno/errname.go | 2 +- pkg/types/errors.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/errors.toml b/errors.toml index 2881b5ad966d5..62c4f3927eea2 100644 --- a/errors.toml +++ b/errors.toml @@ -3228,7 +3228,7 @@ Bad Number ["types:8030"] error = ''' -Cast to signed converted positive out-of-range integer to it's negative complement +Cast to signed converted positive out-of-range integer to its negative complement ''' ["types:8031"] diff --git a/pkg/errno/errname.go b/pkg/errno/errname.go index e3dcc2625f68f..631afe3d773b7 100644 --- a/pkg/errno/errname.go +++ b/pkg/errno/errname.go @@ -979,7 +979,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrInfoSchemaExpired: mysql.Message("Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV", nil), ErrInfoSchemaChanged: mysql.Message("Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`", nil), ErrBadNumber: mysql.Message("Bad Number", nil), - ErrCastAsSignedOverflow: mysql.Message("Cast to signed converted positive out-of-range integer to it's negative complement", nil), + ErrCastAsSignedOverflow: mysql.Message("Cast to signed converted positive out-of-range integer to its negative complement", nil), ErrCastNegIntAsUnsigned: mysql.Message("Cast to unsigned converted negative integer to it's positive complement", nil), ErrInvalidYearFormat: mysql.Message("invalid year format", nil), ErrInvalidYear: mysql.Message("invalid year", nil), diff --git a/pkg/types/errors.go b/pkg/types/errors.go index 8c7dffcf73b8b..c245816562ace 100644 --- a/pkg/types/errors.go +++ b/pkg/types/errors.go @@ -64,7 +64,7 @@ var ( ErrDuplicatedValueInType = dbterror.ClassTypes.NewStd(mysql.ErrDuplicatedValueInType) // ErrDatetimeFunctionOverflow is returned when the calculation in datetime function cause overflow. ErrDatetimeFunctionOverflow = dbterror.ClassTypes.NewStd(mysql.ErrDatetimeFunctionOverflow) - // ErrCastAsSignedOverflow is returned when positive out-of-range integer, and convert to it's negative complement. + // ErrCastAsSignedOverflow is returned when positive out-of-range integer, and convert to its negative complement. ErrCastAsSignedOverflow = dbterror.ClassTypes.NewStd(mysql.ErrCastAsSignedOverflow) // ErrCastNegIntAsUnsigned is returned when a negative integer be casted to an unsigned int. ErrCastNegIntAsUnsigned = dbterror.ClassTypes.NewStd(mysql.ErrCastNegIntAsUnsigned) From 7a3429ade51a242a9de6daa160100ea4c5e6f4db Mon Sep 17 00:00:00 2001 From: xhe Date: Mon, 30 Oct 2023 12:21:35 +0800 Subject: [PATCH 28/33] executor: use statusPort to retrieve tiproxy grpc services (#47963) close pingcap/tidb#47962 --- pkg/executor/inspection_result_test.go | 2 +- pkg/executor/memtable_reader.go | 4 ++-- pkg/executor/memtable_reader_test.go | 31 ++++++++++++++++++++++++++ pkg/infoschema/tables.go | 2 +- 4 files changed, 35 insertions(+), 4 deletions(-) diff --git a/pkg/executor/inspection_result_test.go b/pkg/executor/inspection_result_test.go index 48a9abfce93fe..462290347e446 100644 --- a/pkg/executor/inspection_result_test.go +++ b/pkg/executor/inspection_result_test.go @@ -477,7 +477,7 @@ func createClusterGRPCServer(t testing.TB) map[string]*testServer { testServers := map[string]*testServer{} // create gRPC servers - for _, typ := range []string{"tidb", "tikv", "pd"} { + for _, typ := range []string{"tidb", "tikv", "tiproxy", "pd"} { tmpDir := t.TempDir() server := grpc.NewServer() diff --git a/pkg/executor/memtable_reader.go b/pkg/executor/memtable_reader.go index e1e21c456797e..0f8fce0579d98 100644 --- a/pkg/executor/memtable_reader.go +++ b/pkg/executor/memtable_reader.go @@ -464,9 +464,9 @@ func (e *clusterLogRetriever) startRetrieving( util.WithRecovery(func() { defer close(ch) - // The TiDB provides diagnostics service via status address + // TiDB and TiProxy provide diagnostics service via status address remote := address - if serverType == "tidb" { + if serverType == "tidb" || serverType == "tiproxy" { remote = statusAddr } conn, err := grpc.Dial(remote, opt) diff --git a/pkg/executor/memtable_reader_test.go b/pkg/executor/memtable_reader_test.go index f968b319bbaee..8e4e858ed9c6c 100644 --- a/pkg/executor/memtable_reader_test.go +++ b/pkg/executor/memtable_reader_test.go @@ -502,6 +502,22 @@ func TestTiDBClusterLog(t *testing.T) { logtime(`2019/08/26 06:28:19.011`) + ` [critical] [test log message tikv 14, bar]`, }) + // TiProxy + writeTmpFile(t, testServers["tiproxy"].tmpDir, "tiproxy.log", []string{ + logtime(`2019/08/26 06:19:13.011`) + ` [INFO] [test log message tiproxy 1, foo]`, + logtime(`2019/08/26 06:20:14.011`) + ` [DEBUG] [test log message tiproxy 2, foo]`, + logtime(`2019/08/26 06:21:15.011`) + ` [error] [test log message tiproxy 3, foo]`, + logtime(`2019/08/26 06:22:16.011`) + ` [trace] [test log message tiproxy 4, foo]`, + logtime(`2019/08/26 06:23:17.011`) + ` [CRITICAL] [test log message tiproxy 5, foo]`, + }) + writeTmpFile(t, testServers["tiproxy"].tmpDir, "tiproxy-1.log", []string{ + logtime(`2019/08/26 06:24:15.011`) + ` [info] [test log message tiproxy 10, bar]`, + logtime(`2019/08/26 06:25:16.011`) + ` [debug] [test log message tiproxy 11, bar]`, + logtime(`2019/08/26 06:26:17.011`) + ` [ERROR] [test log message tiproxy 12, bar]`, + logtime(`2019/08/26 06:27:18.011`) + ` [TRACE] [test log message tiproxy 13, bar]`, + logtime(`2019/08/26 06:28:19.011`) + ` [critical] [test log message tiproxy 14, bar]`, + }) + // PD writeTmpFile(t, testServers["pd"].tmpDir, "pd.log", []string{ logtime(`2019/08/26 06:18:13.011`) + ` [INFO] [test log message pd 1, foo]`, @@ -522,33 +538,43 @@ func TestTiDBClusterLog(t *testing.T) { {"2019/08/26 06:18:13.011", "pd", "INFO", "[test log message pd 1, foo]"}, {"2019/08/26 06:19:13.011", "tidb", "INFO", "[test log message tidb 1, foo]"}, {"2019/08/26 06:19:13.011", "tikv", "INFO", "[test log message tikv 1, foo]"}, + {"2019/08/26 06:19:13.011", "tiproxy", "INFO", "[test log message tiproxy 1, foo]"}, {"2019/08/26 06:19:14.011", "pd", "DEBUG", "[test log message pd 2, foo]"}, {"2019/08/26 06:19:14.011", "tidb", "DEBUG", "[test log message tidb 2, foo]"}, {"2019/08/26 06:19:15.011", "tidb", "error", "[test log message tidb 3, foo]"}, {"2019/08/26 06:19:16.011", "tidb", "trace", "[test log message tidb 4, foo]"}, {"2019/08/26 06:19:17.011", "tidb", "CRITICAL", "[test log message tidb 5, foo]"}, {"2019/08/26 06:20:14.011", "tikv", "DEBUG", "[test log message tikv 2, foo]"}, + {"2019/08/26 06:20:14.011", "tiproxy", "DEBUG", "[test log message tiproxy 2, foo]"}, {"2019/08/26 06:20:15.011", "pd", "error", "[test log message pd 3, foo]"}, {"2019/08/26 06:21:15.011", "tikv", "error", "[test log message tikv 3, foo]"}, + {"2019/08/26 06:21:15.011", "tiproxy", "error", "[test log message tiproxy 3, foo]"}, {"2019/08/26 06:21:16.011", "pd", "trace", "[test log message pd 4, foo]"}, {"2019/08/26 06:22:16.011", "tikv", "trace", "[test log message tikv 4, foo]"}, + {"2019/08/26 06:22:16.011", "tiproxy", "trace", "[test log message tiproxy 4, foo]"}, {"2019/08/26 06:22:17.011", "pd", "CRITICAL", "[test log message pd 5, foo]"}, {"2019/08/26 06:23:13.011", "pd", "info", "[test log message pd 10, bar]"}, {"2019/08/26 06:23:17.011", "tikv", "CRITICAL", "[test log message tikv 5, foo]"}, + {"2019/08/26 06:23:17.011", "tiproxy", "CRITICAL", "[test log message tiproxy 5, foo]"}, {"2019/08/26 06:24:14.011", "pd", "debug", "[test log message pd 11, bar]"}, {"2019/08/26 06:24:15.011", "tikv", "info", "[test log message tikv 10, bar]"}, + {"2019/08/26 06:24:15.011", "tiproxy", "info", "[test log message tiproxy 10, bar]"}, {"2019/08/26 06:25:13.011", "tidb", "info", "[test log message tidb 10, bar]"}, {"2019/08/26 06:25:14.011", "tidb", "debug", "[test log message tidb 11, bar]"}, {"2019/08/26 06:25:15.011", "pd", "ERROR", "[test log message pd 12, bar]"}, {"2019/08/26 06:25:15.011", "tidb", "ERROR", "[test log message tidb 12, bar]"}, {"2019/08/26 06:25:16.011", "tidb", "TRACE", "[test log message tidb 13, bar]"}, {"2019/08/26 06:25:16.011", "tikv", "debug", "[test log message tikv 11, bar]"}, + {"2019/08/26 06:25:16.011", "tiproxy", "debug", "[test log message tiproxy 11, bar]"}, {"2019/08/26 06:25:17.011", "tidb", "critical", "[test log message tidb 14, bar]"}, {"2019/08/26 06:26:16.011", "pd", "TRACE", "[test log message pd 13, bar]"}, {"2019/08/26 06:26:17.011", "tikv", "ERROR", "[test log message tikv 12, bar]"}, + {"2019/08/26 06:26:17.011", "tiproxy", "ERROR", "[test log message tiproxy 12, bar]"}, {"2019/08/26 06:27:17.011", "pd", "critical", "[test log message pd 14, bar]"}, {"2019/08/26 06:27:18.011", "tikv", "TRACE", "[test log message tikv 13, bar]"}, + {"2019/08/26 06:27:18.011", "tiproxy", "TRACE", "[test log message tiproxy 13, bar]"}, {"2019/08/26 06:28:19.011", "tikv", "critical", "[test log message tikv 14, bar]"}, + {"2019/08/26 06:28:19.011", "tiproxy", "critical", "[test log message tiproxy 14, bar]"}, } var cases = []struct { @@ -572,14 +598,17 @@ func TestTiDBClusterLog(t *testing.T) { expected: [][]string{ {"2019/08/26 06:19:13.011", "tidb", "INFO", "[test log message tidb 1, foo]"}, {"2019/08/26 06:19:13.011", "tikv", "INFO", "[test log message tikv 1, foo]"}, + {"2019/08/26 06:19:13.011", "tiproxy", "INFO", "[test log message tiproxy 1, foo]"}, {"2019/08/26 06:19:14.011", "pd", "DEBUG", "[test log message pd 2, foo]"}, {"2019/08/26 06:19:14.011", "tidb", "DEBUG", "[test log message tidb 2, foo]"}, {"2019/08/26 06:19:15.011", "tidb", "error", "[test log message tidb 3, foo]"}, {"2019/08/26 06:19:16.011", "tidb", "trace", "[test log message tidb 4, foo]"}, {"2019/08/26 06:19:17.011", "tidb", "CRITICAL", "[test log message tidb 5, foo]"}, {"2019/08/26 06:20:14.011", "tikv", "DEBUG", "[test log message tikv 2, foo]"}, + {"2019/08/26 06:20:14.011", "tiproxy", "DEBUG", "[test log message tiproxy 2, foo]"}, {"2019/08/26 06:20:15.011", "pd", "error", "[test log message pd 3, foo]"}, {"2019/08/26 06:21:15.011", "tikv", "error", "[test log message tikv 3, foo]"}, + {"2019/08/26 06:21:15.011", "tiproxy", "error", "[test log message tiproxy 3, foo]"}, }, }, { @@ -715,9 +744,11 @@ func TestTiDBClusterLog(t *testing.T) { {"2019/08/26 06:19:17.011", "tidb", "CRITICAL", "[test log message tidb 5, foo]"}, {"2019/08/26 06:22:17.011", "pd", "CRITICAL", "[test log message pd 5, foo]"}, {"2019/08/26 06:23:17.011", "tikv", "CRITICAL", "[test log message tikv 5, foo]"}, + {"2019/08/26 06:23:17.011", "tiproxy", "CRITICAL", "[test log message tiproxy 5, foo]"}, {"2019/08/26 06:25:17.011", "tidb", "critical", "[test log message tidb 14, bar]"}, {"2019/08/26 06:27:17.011", "pd", "critical", "[test log message pd 14, bar]"}, {"2019/08/26 06:28:19.011", "tikv", "critical", "[test log message tikv 14, bar]"}, + {"2019/08/26 06:28:19.011", "tiproxy", "critical", "[test log message tiproxy 14, bar]"}, }, }, { diff --git a/pkg/infoschema/tables.go b/pkg/infoschema/tables.go index 53704292118b0..3a7f48a79ecee 100644 --- a/pkg/infoschema/tables.go +++ b/pkg/infoschema/tables.go @@ -2395,7 +2395,7 @@ func FetchClusterServerInfoWithoutPrivilegeCheck(ctx context.Context, sctx sessi for i, srv := range serversInfo { address := srv.Address remote := address - if srv.ServerType == "tidb" { + if srv.ServerType == "tidb" || srv.ServerType == "tiproxy" { remote = srv.StatusAddr } wg.Add(1) From 4b4dbf615b443532a993688aa1fb7a5050e7941d Mon Sep 17 00:00:00 2001 From: Lynn Date: Mon, 30 Oct 2023 14:05:06 +0800 Subject: [PATCH 29/33] *: update owner op message (#47904) close pingcap/tidb#47903 --- pkg/ddl/job_table.go | 2 +- pkg/owner/manager.go | 13 +++++++++---- pkg/owner/manager_test.go | 18 +++++++++++------- pkg/session/sync_upgrade.go | 4 ++-- 4 files changed, 23 insertions(+), 14 deletions(-) diff --git a/pkg/ddl/job_table.go b/pkg/ddl/job_table.go index 2e494a5a182f7..94125eefdbfc6 100644 --- a/pkg/ddl/job_table.go +++ b/pkg/ddl/job_table.go @@ -329,7 +329,7 @@ func (d *ddl) checkAndUpdateClusterState(needUpdate bool) error { ownerOp := owner.OpNone if stateInfo.State == syncer.StateUpgrading { - ownerOp = owner.OpGetUpgradingState + ownerOp = owner.OpSyncUpgradingState } err = d.ownerManager.SetOwnerOpValue(d.ctx, ownerOp) if err != nil { diff --git a/pkg/owner/manager.go b/pkg/owner/manager.go index e88c1c9398df1..b20051c1acf38 100644 --- a/pkg/owner/manager.go +++ b/pkg/owner/manager.go @@ -76,20 +76,25 @@ type OpType byte // List operation of types. const ( - OpNone OpType = 0 - OpGetUpgradingState OpType = 1 + OpNone OpType = 0 + OpSyncUpgradingState OpType = 1 ) // String implements fmt.Stringer interface. func (ot OpType) String() string { switch ot { - case OpGetUpgradingState: - return "get upgrading state" + case OpSyncUpgradingState: + return "sync upgrading state" default: return "none" } } +// IsSyncedUpgradingState represents whether the upgrading state is synchronized. +func (ot OpType) IsSyncedUpgradingState() bool { + return ot == OpSyncUpgradingState +} + // DDLOwnerChecker is used to check whether tidb is owner. type DDLOwnerChecker interface { // IsOwner returns whether the ownerManager is the owner. diff --git a/pkg/owner/manager_test.go b/pkg/owner/manager_test.go index 5bf2801578296..eff35dbd82108 100644 --- a/pkg/owner/manager_test.go +++ b/pkg/owner/manager_test.go @@ -139,17 +139,20 @@ func TestSetAndGetOwnerOpValue(t *testing.T) { op, err := owner.GetOwnerOpValue(context.Background(), tInfo.client, DDLOwnerKey, "log prefix") require.NoError(t, err) require.Equal(t, op, owner.OpNone) - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + require.False(t, op.IsSyncedUpgradingState()) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.NoError(t, err) op, err = owner.GetOwnerOpValue(context.Background(), tInfo.client, DDLOwnerKey, "log prefix") require.NoError(t, err) - require.Equal(t, op, owner.OpGetUpgradingState) + require.Equal(t, op, owner.OpSyncUpgradingState) + require.True(t, op.IsSyncedUpgradingState()) // update the same as the original value - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.NoError(t, err) op, err = owner.GetOwnerOpValue(context.Background(), tInfo.client, DDLOwnerKey, "log prefix") require.NoError(t, err) - require.Equal(t, op, owner.OpGetUpgradingState) + require.Equal(t, op, owner.OpSyncUpgradingState) + require.True(t, op.IsSyncedUpgradingState()) // test del owner key when SetOwnerOpValue require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/owner/MockDelOwnerKey", `return("delOwnerKeyAndNotOwner")`)) err = manager.SetOwnerOpValue(context.Background(), owner.OpNone) @@ -158,6 +161,7 @@ func TestSetAndGetOwnerOpValue(t *testing.T) { require.NotNil(t, err) require.Equal(t, concurrency.ErrElectionNoLeader.Error(), err.Error()) require.Equal(t, op, owner.OpNone) + require.False(t, op.IsSyncedUpgradingState()) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/owner/MockDelOwnerKey")) // Let ddl run for the owner again. @@ -167,7 +171,7 @@ func TestSetAndGetOwnerOpValue(t *testing.T) { // Mock the manager become not owner because the owner is deleted(like TTL is timeout). // And then the manager campaigns the owner again, and become the owner. require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/owner/MockDelOwnerKey", `return("onlyDelOwnerKey")`)) - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.Error(t, err, "put owner key failed, cmp is false") isOwner = checkOwner(tInfo.ddl, true) require.True(t, isOwner) @@ -199,11 +203,11 @@ func TestGetOwnerOpValueBeforeSet(t *testing.T) { require.NoError(t, err) require.Equal(t, op, owner.OpNone) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/owner/MockNotSetOwnerOp")) - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.NoError(t, err) op, err = owner.GetOwnerOpValue(context.Background(), nil, DDLOwnerKey, "log prefix") require.NoError(t, err) - require.Equal(t, op, owner.OpGetUpgradingState) + require.Equal(t, op, owner.OpSyncUpgradingState) } func TestCluster(t *testing.T) { diff --git a/pkg/session/sync_upgrade.go b/pkg/session/sync_upgrade.go index 97f1a01b8589e..384a505fae871 100644 --- a/pkg/session/sync_upgrade.go +++ b/pkg/session/sync_upgrade.go @@ -61,11 +61,11 @@ func SyncUpgradeState(s sessionctx.Context, timeout time.Duration) error { childCtx, cancel := context.WithTimeout(ctx, 3*time.Second) op, err = owner.GetOwnerOpValue(childCtx, dom.EtcdClient(), ddl.DDLOwnerKey, "upgrade bootstrap") cancel() - if err == nil && op.String() == owner.OpGetUpgradingState.String() { + if err == nil && op.IsSyncedUpgradingState() { break } if i%10 == 0 { - logger.Warn("get owner op failed", zap.Stringer("state", op), zap.Error(err)) + logger.Warn("get owner op failed", zap.Stringer("op", op), zap.Error(err)) } time.Sleep(interval) } From 528932ca2bd2a3178a540233a1e799439feecfc7 Mon Sep 17 00:00:00 2001 From: tangenta Date: Mon, 30 Oct 2023 14:05:13 +0800 Subject: [PATCH 30/33] bootstrap: modify `variable_value` column to `varchar(16383)` (#48030) close pingcap/tidb#48029 --- pkg/session/bootstrap.go | 16 +++++++- pkg/session/bootstrap_test.go | 39 +++++++++++++++++++ .../bootstraptest/bootstrap_upgrade_test.go | 14 +++++-- 3 files changed, 63 insertions(+), 6 deletions(-) diff --git a/pkg/session/bootstrap.go b/pkg/session/bootstrap.go index 27799f7b1a700..00c9aa34e3286 100644 --- a/pkg/session/bootstrap.go +++ b/pkg/session/bootstrap.go @@ -183,7 +183,7 @@ const ( // Maybe we will put it back to INFORMATION_SCHEMA. CreateGlobalVariablesTable = `CREATE TABLE IF NOT EXISTS mysql.GLOBAL_VARIABLES( VARIABLE_NAME VARCHAR(64) NOT NULL PRIMARY KEY, - VARIABLE_VALUE VARCHAR(1024) DEFAULT NULL);` + VARIABLE_VALUE VARCHAR(16383) DEFAULT NULL);` // CreateTiDBTable is the SQL statement creates a table in system db. // This table is a key-value struct contains some information used by TiDB. // Currently we only put bootstrapped in it which indicates if the system is already bootstrapped. @@ -1020,11 +1020,15 @@ const ( // version 178 // write mDDLTableVersion into `mysql.tidb` table version178 = 178 + + // vresion 179 + // enlarge `VARIABLE_VALUE` of `mysql.global_variables` from `varchar(1024)` to `varchar(16383)`. + version179 = 179 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version178 +var currentBootstrapVersion int64 = version179 // DDL owner key's expired time is ManagerSessionTTL seconds, we should wait the time and give more time to have a chance to finish it. var internalSQLTimeout = owner.ManagerSessionTTL + 15 @@ -1178,6 +1182,7 @@ var ( upgradeToVer176, upgradeToVer177, upgradeToVer178, + upgradeToVer179, } ) @@ -2879,6 +2884,13 @@ func upgradeToVer178(s Session, ver int64) { writeDDLTableVersion(s) } +func upgradeToVer179(s Session, ver int64) { + if ver >= version179 { + return + } + doReentrantDDL(s, "ALTER TABLE mysql.global_variables MODIFY COLUMN `VARIABLE_VALUE` varchar(16383)") +} + func writeOOMAction(s Session) { comment := "oom-action is `log` by default in v3.0.x, `cancel` by default in v4.0.11+" mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, %?) ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, diff --git a/pkg/session/bootstrap_test.go b/pkg/session/bootstrap_test.go index a7c9965001eac..e23b15fca0326 100644 --- a/pkg/session/bootstrap_test.go +++ b/pkg/session/bootstrap_test.go @@ -2186,3 +2186,42 @@ func TestWriteDDLTableVersionToMySQLTiDBWhenUpgradingTo178(t *testing.T) { require.Equal(t, []byte(fmt.Sprintf("%d", ddlTableVer)), req.GetRow(0).GetBytes(0)) require.NoError(t, r.Close()) } + +func TestTiDBUpgradeToVer179(t *testing.T) { + ctx := context.Background() + store, _ := CreateStoreAndBootstrap(t) + defer func() { + require.NoError(t, store.Close()) + }() + ver178 := version178 + seV178 := CreateSessionAndSetID(t, store) + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + err = m.FinishBootstrap(int64(ver178)) + require.NoError(t, err) + MustExec(t, seV178, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver178)) + err = txn.Commit(context.Background()) + require.NoError(t, err) + + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV178) + require.NoError(t, err) + require.Equal(t, int64(ver178), ver) + + dom, err := BootstrapSession(store) + require.NoError(t, err) + ver, err = getBootstrapVersion(seV178) + require.NoError(t, err) + require.Less(t, int64(ver178), ver) + + r := MustExecToRecodeSet(t, seV178, "desc mysql.global_variables") + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 2, req.NumRows()) + require.Equal(t, []byte("varchar(16383)"), req.GetRow(1).GetBytes(1)) + require.NoError(t, r.Close()) + + dom.Close() +} diff --git a/pkg/session/bootstraptest/bootstrap_upgrade_test.go b/pkg/session/bootstraptest/bootstrap_upgrade_test.go index d4f681d4f58a1..da38f275afe76 100644 --- a/pkg/session/bootstraptest/bootstrap_upgrade_test.go +++ b/pkg/session/bootstraptest/bootstrap_upgrade_test.go @@ -605,22 +605,28 @@ func TestUpgradeVersionForResumeJob(t *testing.T) { wg.Wait() // Make sure the second add index operation is successful. - sql := fmt.Sprintf("select job_meta from mysql.tidb_ddl_history where job_id=%d or job_id=%d order by job_id", jobID, jobID+1) + sql := fmt.Sprintf("select job_meta from mysql.tidb_ddl_history where job_id >=%d order by job_id", jobID) rows, err := execute(context.Background(), seLatestV, sql) require.NoError(t, err) - require.Len(t, rows, 2) + require.GreaterOrEqual(t, len(rows), 2) var idxFinishTS uint64 for i, row := range rows { jobBinary := row.GetBytes(0) runJob := model.Job{} err := runJob.Decode(jobBinary) require.NoError(t, err) - require.True(t, strings.Contains(runJob.TableName, "upgrade_tbl")) require.Equal(t, model.JobStateSynced.String(), runJob.State.String()) if i == 0 { + // The first add index op. idxFinishTS = runJob.BinlogInfo.FinishedTS } else { - require.Greater(t, runJob.BinlogInfo.FinishedTS, idxFinishTS) + // The second add index op. + if strings.Contains(runJob.TableName, "upgrade_tbl") { + require.Greater(t, runJob.BinlogInfo.FinishedTS, idxFinishTS) + } else { + // The upgrade DDL ops. These jobs' finishedTS must less than add index ops. + require.Less(t, runJob.BinlogInfo.FinishedTS, idxFinishTS) + } } } } From f135464cd716ec6d2fa57f7ec6983fa150d0c94c Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Mon, 30 Oct 2023 14:05:20 +0800 Subject: [PATCH 31/33] *: upgrade grpc (#48057) close pingcap/tidb#48060 --- DEPS.bzl | 2120 ++++++++--------- br/pkg/streamhelper/integration_test.go | 4 +- ...m_github_grpc_ecosystem_grpc_gateway.patch | 4 +- go.mod | 72 +- go.sum | 244 +- pkg/executor/importer/precheck_test.go | 4 +- tests/realtikvtest/brietest/main_test.go | 1 + 7 files changed, 1146 insertions(+), 1303 deletions(-) diff --git a/DEPS.bzl b/DEPS.bzl index fcb826aa24c2d..a311e1a3b6e45 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -177,13 +177,13 @@ def go_deps(): name = "com_github_alecthomas_template", build_file_proto_mode = "disable_global", importpath = "github.com/alecthomas/template", - sha256 = "25e3be7192932d130d0af31ce5bcddae887647ba4afcfb32009c3b9b79dbbdb3", - strip_prefix = "github.com/alecthomas/template@v0.0.0-20190718012654-fb15b899a751", + sha256 = "86de3337a475e323a0fb54ef03386a4e495682201f42795bd7be646c05298692", + strip_prefix = "github.com/alecthomas/template@v0.0.0-20160405071501-a0175ee3bccc", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", - "http://ats.apps.svc/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", - "https://cache.hawkingrei.com/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", + "http://ats.apps.svc/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", + "https://cache.hawkingrei.com/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", ], ) go_repository( @@ -641,19 +641,6 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/biogo/store/com_github_biogo_store-v0.0.0-20160505134755-913427a1d5e8.zip", ], ) - go_repository( - name = "com_github_bketelsen_crypt", - build_file_proto_mode = "disable_global", - importpath = "github.com/bketelsen/crypt", - sha256 = "3df95e9bd6b8861009176bc5e4f5ebc6b0ff9857df6c1b3a8ece4fb595da02e7", - strip_prefix = "github.com/bketelsen/crypt@v0.0.3-0.20200106085610-5cbc8cc4026c", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - "http://ats.apps.svc/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - "https://cache.hawkingrei.com/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - ], - ) go_repository( name = "com_github_bkielbasa_cyclop", build_file_proto_mode = "disable_global", @@ -823,6 +810,19 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cenk/backoff/com_github_cenk_backoff-v2.0.0+incompatible.zip", ], ) + go_repository( + name = "com_github_cenkalti_backoff_v4", + build_file_proto_mode = "disable_global", + importpath = "github.com/cenkalti/backoff/v4", + sha256 = "de69f5db190ee0f2c441e50e4bf607853ab99512a183a5713803888ced502dde", + strip_prefix = "github.com/cenkalti/backoff/v4@v4.1.1", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + "http://ats.apps.svc/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + ], + ) go_repository( name = "com_github_census_instrumentation_opencensus_proto", build_file_proto_mode = "disable_global", @@ -840,13 +840,13 @@ def go_deps(): name = "com_github_certifi_gocertifi", build_file_proto_mode = "disable_global", importpath = "github.com/certifi/gocertifi", - sha256 = "11d525844c3dd711fb0ae31acc9ebd8a4d602215f14ff24ad1764ecb48464849", - strip_prefix = "github.com/certifi/gocertifi@v0.0.0-20200922220541-2c3bb06c6054", + sha256 = "e007c669f49757301c34b7c5bc4a37f0fbe3707ed123995728cb814217fae2f7", + strip_prefix = "github.com/certifi/gocertifi@v0.0.0-20180905225744-ee1a9a0726d2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", - "http://ats.apps.svc/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", - "https://cache.hawkingrei.com/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", + "http://ats.apps.svc/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", + "https://cache.hawkingrei.com/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", ], ) go_repository( @@ -1035,13 +1035,13 @@ def go_deps(): name = "com_github_cncf_xds_go", build_file_proto_mode = "disable_global", importpath = "github.com/cncf/xds/go", - sha256 = "7e33dbf929da89661e8f7507706f7ea28762d7c48c899d8e8352145c11627bf4", - strip_prefix = "github.com/cncf/xds/go@v0.0.0-20230105202645-06c439db220b", + sha256 = "a0c6e66eade357aeda4edaa9d09612085860dc4c0b44edf8226574939bdf6091", + strip_prefix = "github.com/cncf/xds/go@v0.0.0-20230607035331-e9ce68804cb4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", - "http://ats.apps.svc/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", - "https://cache.hawkingrei.com/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", + "http://ats.apps.svc/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", + "https://cache.hawkingrei.com/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", ], ) go_repository( @@ -1100,13 +1100,13 @@ def go_deps(): name = "com_github_cockroachdb_datadriven", build_file_proto_mode = "disable_global", importpath = "github.com/cockroachdb/datadriven", - sha256 = "27661be7dc3cff4288f9a150f7e82fad6bb53382bb8d87bcfe8b22a85732c414", - strip_prefix = "github.com/cockroachdb/datadriven@v1.0.0", + sha256 = "1818b828715b773ea9eaf415fa3cc176c411e18f645ec85440b14abaf1f387c4", + strip_prefix = "github.com/cockroachdb/datadriven@v1.0.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", - "http://ats.apps.svc/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", + "http://ats.apps.svc/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", ], ) go_repository( @@ -1252,30 +1252,17 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coocood/rtutil/com_github_coocood_rtutil-v0.0.0-20190304133409-c84515f646f2.zip", ], ) - go_repository( - name = "com_github_coreos_bbolt", - build_file_proto_mode = "disable_global", - importpath = "github.com/coreos/bbolt", - sha256 = "097e7c6cf2dc9c50a0c8827f451bd3cba44c2cbf086d4fb684f2dfada9bfa841", - strip_prefix = "github.com/coreos/bbolt@v1.3.2", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - "http://ats.apps.svc/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - ], - ) go_repository( name = "com_github_coreos_etcd", build_file_proto_mode = "disable_global", importpath = "github.com/coreos/etcd", - sha256 = "c32b3fc5dba0eeb8533d628489cf862c4eb360644d79c597bcc6290f3d74b046", - strip_prefix = "github.com/coreos/etcd@v3.3.13+incompatible", + sha256 = "5848e1797f8d426f4aa4b61b15611456fb0183f974cbf9e64a8a11e740883367", + strip_prefix = "github.com/coreos/etcd@v3.3.12+incompatible", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", - "http://ats.apps.svc/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", + "http://ats.apps.svc/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", + "https://cache.hawkingrei.com/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", ], ) go_repository( @@ -1295,52 +1282,26 @@ def go_deps(): name = "com_github_coreos_go_semver", build_file_proto_mode = "disable_global", importpath = "github.com/coreos/go-semver", - sha256 = "b2fc075395ffc34cff4b964681d0ae3cd22096cfcadd2970eeaa877596ceb210", - strip_prefix = "github.com/coreos/go-semver@v0.3.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - "http://ats.apps.svc/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - ], - ) - go_repository( - name = "com_github_coreos_go_systemd", - build_file_proto_mode = "disable_global", - importpath = "github.com/coreos/go-systemd", - sha256 = "cd349df002e0900cd0a5f9648720621840164c4b530f3e3457510e7e08589307", - strip_prefix = "github.com/coreos/go-systemd@v0.0.0-20190321100706-95778dfbb74e", + sha256 = "e72820542b5913afe0a52e956e0b3834e9fbb080641fed183117f862fab74e8a", + strip_prefix = "github.com/coreos/go-semver@v0.3.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", - "http://ats.apps.svc/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", + "http://ats.apps.svc/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", ], ) go_repository( name = "com_github_coreos_go_systemd_v22", build_file_proto_mode = "disable_global", importpath = "github.com/coreos/go-systemd/v22", - sha256 = "01134ae89bf4a91c17eeb1f8425e1064f9bde64cf3ce0c9cf546a9fa1ee25e64", - strip_prefix = "github.com/coreos/go-systemd/v22@v22.3.2", + sha256 = "4c44e3a6b84de4db393e341537c7124031fa98d5f98860ad31b32b4890f2234c", + strip_prefix = "github.com/coreos/go-systemd/v22@v22.5.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - "http://ats.apps.svc/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - ], - ) - go_repository( - name = "com_github_coreos_pkg", - build_file_proto_mode = "disable_global", - importpath = "github.com/coreos/pkg", - sha256 = "7fe161d49439a9b4136c932233cb4b803b9e3ac7ee46f39ce247defc4f4ea8d7", - strip_prefix = "github.com/coreos/pkg@v0.0.0-20180928190104-399ea9e2e55f", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", - "http://ats.apps.svc/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", + "http://ats.apps.svc/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", + "https://cache.hawkingrei.com/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", ], ) go_repository( @@ -1776,26 +1737,26 @@ def go_deps(): name = "com_github_envoyproxy_go_control_plane", build_file_proto_mode = "disable_global", importpath = "github.com/envoyproxy/go-control-plane", - sha256 = "aa0530fbbbe2d4683035547b14d58a7318f408398e10092637f20642de82c9ff", - strip_prefix = "github.com/envoyproxy/go-control-plane@v0.10.3", + sha256 = "82588fbec310f9103344442e997c4ee72104821cf69caaccc829d9d272aa0d10", + strip_prefix = "github.com/envoyproxy/go-control-plane@v0.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", - "http://ats.apps.svc/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", - "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", + "http://ats.apps.svc/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", ], ) go_repository( name = "com_github_envoyproxy_protoc_gen_validate", build_file_proto_mode = "disable_global", importpath = "github.com/envoyproxy/protoc-gen-validate", - sha256 = "7ca5aeb463c05869073076ec25ccc4144edd41d48971f1b5fd8cec1bf12a0d48", - strip_prefix = "github.com/envoyproxy/protoc-gen-validate@v0.9.1", + sha256 = "a79d19fb065554b214492c7619d760b94405429e7ca69ede922e968929a66ffb", + strip_prefix = "github.com/envoyproxy/protoc-gen-validate@v1.0.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", - "http://ats.apps.svc/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", - "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", + "http://ats.apps.svc/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", ], ) go_repository( @@ -1993,19 +1954,6 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/fogleman/gg/com_github_fogleman_gg-v1.2.1-0.20190220221249-0403632d5b90.zip", ], ) - go_repository( - name = "com_github_form3tech_oss_jwt_go", - build_file_proto_mode = "disable_global", - importpath = "github.com/form3tech-oss/jwt-go", - sha256 = "ebe8386761761d53fac2de5f8f575ddf66c114ec9835947c761131662f1d38f3", - strip_prefix = "github.com/form3tech-oss/jwt-go@v3.2.6-0.20210809144907-32ab6a8243d7+incompatible", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - "http://ats.apps.svc/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - "https://cache.hawkingrei.com/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - ], - ) go_repository( name = "com_github_fortytw2_leaktest", build_file_proto_mode = "disable_global", @@ -2101,13 +2049,13 @@ def go_deps(): name = "com_github_getsentry_raven_go", build_file_proto_mode = "disable_global", importpath = "github.com/getsentry/raven-go", - sha256 = "eaffe69939612cd05f95e1846b8ddb4043655571be34cdb6412a66b41b6826eb", - strip_prefix = "github.com/getsentry/raven-go@v0.2.0", + sha256 = "99cba0dce93b1c0ca86b44787bb7a61e31da95a11773dfa197a67f4a92f75b71", + strip_prefix = "github.com/getsentry/raven-go@v0.1.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", - "http://ats.apps.svc/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", + "http://ats.apps.svc/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", ], ) go_repository( @@ -2686,13 +2634,13 @@ def go_deps(): name = "com_github_golang_glog", build_file_proto_mode = "disable_global", importpath = "github.com/golang/glog", - sha256 = "668beb5dd923378b00fda4ba0d965000f3f259be5ba05ebd341a2949e8f20db6", - strip_prefix = "github.com/golang/glog@v1.1.0", + sha256 = "f17e7d8a4485e91373c72d7ed688b23cafe647cd4e2bb8de669e39a35432fbec", + strip_prefix = "github.com/golang/glog@v1.1.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", - "http://ats.apps.svc/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", + "http://ats.apps.svc/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", ], ) go_repository( @@ -2725,13 +2673,13 @@ def go_deps(): name = "com_github_golang_jwt_jwt_v4", build_file_proto_mode = "disable_global", importpath = "github.com/golang-jwt/jwt/v4", - sha256 = "bea2e7c045b07f50b60211bee94b62c442322ded7fa893e3fda49dcdce0e2908", - strip_prefix = "github.com/golang-jwt/jwt/v4@v4.2.0", + sha256 = "331efc33198957256c57258caf96199fec534d0c0849da303a11fb013b47d101", + strip_prefix = "github.com/golang-jwt/jwt/v4@v4.4.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", - "http://ats.apps.svc/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", + "http://ats.apps.svc/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", ], ) go_repository( @@ -3102,6 +3050,19 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/renameio/v2/com_github_google_renameio_v2-v2.0.0.zip", ], ) + go_repository( + name = "com_github_google_s2a_go", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/s2a-go", + sha256 = "b01ff39fc8c27f944da1c3f78106d57e165f234d98115c344f448b603ae24ff3", + strip_prefix = "github.com/google/s2a-go@v0.1.4", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + "http://ats.apps.svc/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + "https://cache.hawkingrei.com/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + ], + ) go_repository( name = "com_github_google_skylark", build_file_proto_mode = "disable_global", @@ -3119,39 +3080,39 @@ def go_deps(): name = "com_github_google_uuid", build_file_proto_mode = "disable_global", importpath = "github.com/google/uuid", - sha256 = "0a5fcc05ea492afeaca984a012485f6a15e2259b32f1206d6f36a88c88afc607", - strip_prefix = "github.com/google/uuid@v1.3.0", + sha256 = "9d9d6cfb28ce6dbe4b518c42c6bccd67bb531a106859808f36e82a5c3fb8c64d", + strip_prefix = "github.com/google/uuid@v1.3.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", - "http://ats.apps.svc/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", + "http://ats.apps.svc/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", ], ) go_repository( name = "com_github_googleapis_enterprise_certificate_proxy", build_file_proto_mode = "disable_global", importpath = "github.com/googleapis/enterprise-certificate-proxy", - sha256 = "e3a5b32ca7fc4f8bc36274d87c3547975a2b0603b2a1e4b1129530504d9ddeb7", - strip_prefix = "github.com/googleapis/enterprise-certificate-proxy@v0.2.3", + sha256 = "f0642434f18b33f21c5b2a908907f4c3ae24223791c1c4b92d13d351bfa7ed7e", + strip_prefix = "github.com/googleapis/enterprise-certificate-proxy@v0.2.4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", - "http://ats.apps.svc/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", - "https://cache.hawkingrei.com/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", + "http://ats.apps.svc/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", + "https://cache.hawkingrei.com/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", ], ) go_repository( name = "com_github_googleapis_gax_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/googleapis/gax-go/v2", - sha256 = "b9bdfe36843cdc62b1eb2ba66ac1410164c2478c88c6bfe16c9ce2859922ee80", - strip_prefix = "github.com/googleapis/gax-go/v2@v2.7.1", + sha256 = "10ad5944b8bcce3f2cb9a215a0dda163de5b1f092e61b74a4e162d1eb8f7f7a2", + strip_prefix = "github.com/googleapis/gax-go/v2@v2.12.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", - "http://ats.apps.svc/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", - "https://cache.hawkingrei.com/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", + "http://ats.apps.svc/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", + "https://cache.hawkingrei.com/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", ], ) go_repository( @@ -4480,13 +4441,13 @@ def go_deps(): name = "com_github_konsorten_go_windows_terminal_sequences", build_file_proto_mode = "disable_global", importpath = "github.com/konsorten/go-windows-terminal-sequences", - sha256 = "429b01413b972b108ea86bbde3d5e660913f3e8099190d07ccfb2f186bc6d837", - strip_prefix = "github.com/konsorten/go-windows-terminal-sequences@v1.0.3", + sha256 = "7fd0273fc0855ed08172c150f756e708d6e43c4a6d52ca4939a8b43d03356091", + strip_prefix = "github.com/konsorten/go-windows-terminal-sequences@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", - "http://ats.apps.svc/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", - "https://cache.hawkingrei.com/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", + "http://ats.apps.svc/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", ], ) go_repository( @@ -7067,13 +7028,13 @@ def go_deps(): name = "com_github_tikv_client_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/tikv/client-go/v2", - sha256 = "1b4e8e9df95d2ed7ff41d756cd00c8159f0aa9483791b50af8afebefa94e5b6c", - strip_prefix = "github.com/tikv/client-go/v2@v2.0.8-0.20231025022411-cad314220659", + sha256 = "8397fd84873e82e8a793e9891fed369ddcbe4a880536bdc7259e724d0a362a9e", + strip_prefix = "github.com/tikv/client-go/v2@v2.0.8-0.20231030021533-3520f13fc074", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231025022411-cad314220659.zip", - "http://ats.apps.svc/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231025022411-cad314220659.zip", - "https://cache.hawkingrei.com/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231025022411-cad314220659.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231025022411-cad314220659.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030021533-3520f13fc074.zip", + "http://ats.apps.svc/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030021533-3520f13fc074.zip", + "https://cache.hawkingrei.com/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030021533-3520f13fc074.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030021533-3520f13fc074.zip", ], ) go_repository( @@ -7678,351 +7639,338 @@ def go_deps(): name = "com_google_cloud_go", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go", - sha256 = "8bdce0d7bfc07e71cebbbd7df2d93d1418a35eed09211bb21e3c1ee8d2fabf7c", - strip_prefix = "cloud.google.com/go@v0.110.0", + sha256 = "3d0ed6092ddd6ffdc4ec4f39e627a706c8d71e09330768c8174428db289d21a4", + strip_prefix = "cloud.google.com/go@v0.110.8", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", ], ) go_repository( name = "com_google_cloud_go_accessapproval", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/accessapproval", - sha256 = "4fd31c02273e95e4032c7652822e740dbf074d77d66002df0fb96c1222fd0d1e", - strip_prefix = "cloud.google.com/go/accessapproval@v1.6.0", + sha256 = "e81216a40f4ed1779d4fd2a031ba6df523c9dc4cbe459ed8e746b6044c865248", + strip_prefix = "cloud.google.com/go/accessapproval@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_accesscontextmanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/accesscontextmanager", - sha256 = "90230ccc20b02821de0ef578914c7c32ac3189ebcce539da521228df768fa4f1", - strip_prefix = "cloud.google.com/go/accesscontextmanager@v1.7.0", + sha256 = "4010c95bbdceab4050e4b931bdbe978f50e83de78034c9671a23c414bdb97dc3", + strip_prefix = "cloud.google.com/go/accesscontextmanager@v1.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_aiplatform", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/aiplatform", - sha256 = "e61385ceceb7eb9ef93c80daf51787f083470f104d113c8460794744a853c927", - strip_prefix = "cloud.google.com/go/aiplatform@v1.37.0", + sha256 = "9f49dbeaf81d8bf076b2a23f3d2c822f4d60ab41997ca8e9db081ad2e0945e42", + strip_prefix = "cloud.google.com/go/aiplatform@v1.51.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", ], ) go_repository( name = "com_google_cloud_go_analytics", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/analytics", - sha256 = "b2c08e99d317393ea9102cbb4f309d16170790a793b95eeafd026f8263281b3f", - strip_prefix = "cloud.google.com/go/analytics@v0.19.0", + sha256 = "1f0b79e19aa45178c06545ac0eb1ec067583f7742520c933d25722ebdb8d1c2c", + strip_prefix = "cloud.google.com/go/analytics@v0.21.4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", ], ) go_repository( name = "com_google_cloud_go_apigateway", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/apigateway", - sha256 = "81f9cf7d46093a4cf3bb6dfb7ea942784295f093261c45698656dd844bdfa163", - strip_prefix = "cloud.google.com/go/apigateway@v1.5.0", + sha256 = "d3a522706734344ff09513c72b05a8e39bbfe093f9cbab07c3c081698306b014", + strip_prefix = "cloud.google.com/go/apigateway@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_apigeeconnect", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/apigeeconnect", - sha256 = "a0ae141afd9c762b722778b3508dcc459e18c6890a22586235dafc0f436532a2", - strip_prefix = "cloud.google.com/go/apigeeconnect@v1.5.0", + sha256 = "5d6c4ab3a4a0e921c26b073f7c29ad7dcaff23eef07bd510f2d42fc2a4bb9fc9", + strip_prefix = "cloud.google.com/go/apigeeconnect@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_apigeeregistry", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/apigeeregistry", - sha256 = "1cf7728c1b8d31247d5c2ec10b4b252d6224e9549c2ee7d2222b482dec8aeba4", - strip_prefix = "cloud.google.com/go/apigeeregistry@v0.6.0", + sha256 = "949009434d483756469a40a091e20b979fde2811df9a7f9d5955e1cceecb9b65", + strip_prefix = "cloud.google.com/go/apigeeregistry@v0.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_apikeys", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/apikeys", - sha256 = "511ba83f3837459a9e553026ecf556ebec9007403054635d90f065f7d735ddbe", - strip_prefix = "cloud.google.com/go/apikeys@v0.6.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_appengine", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/appengine", - sha256 = "09f35ee5b9d8782bced76b733c7c3a2a5f3b9e41630236a47854b4a92567e646", - strip_prefix = "cloud.google.com/go/appengine@v1.7.1", + sha256 = "ef8ebfc267080d470ebe4bcebef59e8bae90a1418b3f03b27f334e2058d4517e", + strip_prefix = "cloud.google.com/go/appengine@v1.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_area120", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/area120", - sha256 = "7dcfdf365eb9f29fcedf29b8e32f0023b829732869dc7ad9a2cd8450cbdea8df", - strip_prefix = "cloud.google.com/go/area120@v0.7.1", + sha256 = "04c79c0f28dce15cc4c3ff476995e9691431417b85293b8b202923ea85c2bab5", + strip_prefix = "cloud.google.com/go/area120@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_artifactregistry", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/artifactregistry", - sha256 = "abf73586bdced0f590918b37f19643646c3aa04a651480cbdbfad86171f03d98", - strip_prefix = "cloud.google.com/go/artifactregistry@v1.13.0", + sha256 = "121b1aba80e678166214cdcf45093fdface59a86ff7a930f3a44381e9c3c2f43", + strip_prefix = "cloud.google.com/go/artifactregistry@v1.14.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", ], ) go_repository( name = "com_google_cloud_go_asset", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/asset", - sha256 = "dcaee2c49835e7f9c53d77b21738d4d803e25b2b52dc4c71c5e145332fead841", - strip_prefix = "cloud.google.com/go/asset@v1.13.0", + sha256 = "8c99032799a39f65d87c1ed91bbaebe5ed2b84675231933106b0a8b48997214a", + strip_prefix = "cloud.google.com/go/asset@v1.15.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", ], ) go_repository( name = "com_google_cloud_go_assuredworkloads", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/assuredworkloads", - sha256 = "f82b2f4ba2d692deff3ccf7dacfc23e744d70804f55fbb34affee7552da4f730", - strip_prefix = "cloud.google.com/go/assuredworkloads@v1.10.0", + sha256 = "31788ef9db908faea41ecbd7f8d49246d47e77897029f153df641759f9456e78", + strip_prefix = "cloud.google.com/go/assuredworkloads@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_automl", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/automl", - sha256 = "e8a1b910ab247a441ad74592d93d4c37721d7ecfde2dcd7afceeaffab0505574", - strip_prefix = "cloud.google.com/go/automl@v1.12.0", + sha256 = "2d4aea70974f6409654bad3125ae1d80b810a1cb1777aee622c8502dd52c6693", + strip_prefix = "cloud.google.com/go/automl@v1.13.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", ], ) go_repository( name = "com_google_cloud_go_baremetalsolution", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/baremetalsolution", - sha256 = "f3bdfc95c4743654198599087e86063428d823b10c8f4b59260376255403d3a6", - strip_prefix = "cloud.google.com/go/baremetalsolution@v0.5.0", + sha256 = "79a2af9446dad6522ffaf60e3da8f564813b6a3ec7d71f43080f86e49bf90460", + strip_prefix = "cloud.google.com/go/baremetalsolution@v1.2.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", ], ) go_repository( name = "com_google_cloud_go_batch", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/batch", - sha256 = "9b7fda9ddd263f3cb57afe020014bb4153736e13656dd39896088bda972b3f8c", - strip_prefix = "cloud.google.com/go/batch@v0.7.0", + sha256 = "d06b057177356f5d91140f59c2015a295f98f040f81d54b25f0d0bff230e3b2a", + strip_prefix = "cloud.google.com/go/batch@v1.5.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", ], ) go_repository( name = "com_google_cloud_go_beyondcorp", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/beyondcorp", - sha256 = "6ff3ee86f910355281d4fccbf476922447ea6ba33579e5d40c7dcec407dfdf1a", - strip_prefix = "cloud.google.com/go/beyondcorp@v0.5.0", + sha256 = "e7497be44bc10e4c468a3b100f65ae8e3d351034544c2feb1447f54300659bfd", + strip_prefix = "cloud.google.com/go/beyondcorp@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", ], ) go_repository( name = "com_google_cloud_go_bigquery", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/bigquery", - sha256 = "3866e7d059fb9fb91f5323bc2061aded6834162d76e476da27ab64e48c2a6755", - strip_prefix = "cloud.google.com/go/bigquery@v1.50.0", + sha256 = "e876ce8407d288df3075f142c19c429540a7a917b1fdd6dd68b3438ad8349412", + strip_prefix = "cloud.google.com/go/bigquery@v1.56.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", ], ) go_repository( name = "com_google_cloud_go_billing", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/billing", - sha256 = "6a1422bb60b43683d1b5d1be3eacd1992b1bb656e187cec3e398c9d27299eadb", - strip_prefix = "cloud.google.com/go/billing@v1.13.0", + sha256 = "fecfc00cf9ea70a1109ec8e1a190e328fb1cc2f0159b1d4b10111cd5651b2bae", + strip_prefix = "cloud.google.com/go/billing@v1.17.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", ], ) go_repository( name = "com_google_cloud_go_binaryauthorization", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/binaryauthorization", - sha256 = "4a5d9c61a748d7b2dc14542c66f033701694e537b954619fb70f53aa1f31263f", - strip_prefix = "cloud.google.com/go/binaryauthorization@v1.5.0", + sha256 = "afb102bcbd2836c1371d9e6a179da9109cdaa5c41a286d73ee6c93d3ae775736", + strip_prefix = "cloud.google.com/go/binaryauthorization@v1.7.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", ], ) go_repository( name = "com_google_cloud_go_certificatemanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/certificatemanager", - sha256 = "28c924f5edcc34f79ae7e7542a0179b0f49457f9ce6e89c86336fe5be2fdb8ac", - strip_prefix = "cloud.google.com/go/certificatemanager@v1.6.0", + sha256 = "877ddd1a5c8e2efa94f0055b5371306eb07cf4cd52d5a70c15e0c38d6f6d8e32", + strip_prefix = "cloud.google.com/go/certificatemanager@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_channel", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/channel", - sha256 = "097f8225139cc2f3d4676e6b78d1d4cdbfd0f5558e1ab3a66ded9a085700d4b2", - strip_prefix = "cloud.google.com/go/channel@v1.12.0", + sha256 = "fd6990eda15ff2f698c8f09db37f5ba11d3a39b89fae50b6231c9ae2eae4a768", + strip_prefix = "cloud.google.com/go/channel@v1.17.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", ], ) go_repository( name = "com_google_cloud_go_cloudbuild", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/cloudbuild", - sha256 = "80d00c57b4b55e71e45e4c7427ee0da0aae082fc0b7be0fcdc2d756a71b9d8b3", - strip_prefix = "cloud.google.com/go/cloudbuild@v1.9.0", + sha256 = "f3e6b2f036308af4749695e059c274459a89a3329785a7a68492e85ecb5a5a22", + strip_prefix = "cloud.google.com/go/cloudbuild@v1.14.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", ], ) go_repository( name = "com_google_cloud_go_clouddms", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/clouddms", - sha256 = "9a9488b44e7a18811c0fcb13beb1fe9c3c5f7613b3109734af6f88af19843d90", - strip_prefix = "cloud.google.com/go/clouddms@v1.5.0", + sha256 = "bbdc27660a1a25cbe7f5c8d2ef3b87fc97910178f43a2ba037f7735b5939b186", + strip_prefix = "cloud.google.com/go/clouddms@v1.7.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", ], ) go_repository( name = "com_google_cloud_go_cloudtasks", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/cloudtasks", - sha256 = "9219724339007e7278d19a293285dcb45f4a38addc31d9722c98ce0b8095efe5", - strip_prefix = "cloud.google.com/go/cloudtasks@v1.10.0", + sha256 = "4a4c78416add083ce1b7a8e82138f08f5940c132a1a8705c97e4523f5fbd91e9", + strip_prefix = "cloud.google.com/go/cloudtasks@v1.12.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", ], ) go_repository( name = "com_google_cloud_go_compute", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/compute", - sha256 = "789696687da53dd22d22c5c49e0cc0636a44703459992236d18495e79d9b9c03", - strip_prefix = "cloud.google.com/go/compute@v1.19.0", + sha256 = "cde4afb8f4c63ff4b1b17feb0a9ae75abbd0fbfdd9e94ffa0eaaf300e803dce7", + strip_prefix = "cloud.google.com/go/compute@v1.23.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", ], ) go_repository( @@ -8042,247 +7990,247 @@ def go_deps(): name = "com_google_cloud_go_contactcenterinsights", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/contactcenterinsights", - sha256 = "e06630e09b6ee01e3693ff079ee6279de32566ae29fefeacdd410c61e1a1a5fe", - strip_prefix = "cloud.google.com/go/contactcenterinsights@v1.6.0", + sha256 = "9e08c7acf5ffb2fcff937872cad37d7e1a2dc1b7d0d70aa450beb7cb21c61b1c", + strip_prefix = "cloud.google.com/go/contactcenterinsights@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_container", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/container", - sha256 = "2dfba11e311b5dc9ea7e8b60cfd2dff3b060564a845bdac98945173dc3ef12ac", - strip_prefix = "cloud.google.com/go/container@v1.15.0", + sha256 = "f93f3636acb226560294d803e2f0b563e5ea5a0383707343ddd40c3287e53f43", + strip_prefix = "cloud.google.com/go/container@v1.26.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", ], ) go_repository( name = "com_google_cloud_go_containeranalysis", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/containeranalysis", - sha256 = "6319d5102b56fa4c4576fb3aa9b4aeb30f1c3f5e45bccd747d0da27ccfceb147", - strip_prefix = "cloud.google.com/go/containeranalysis@v0.9.0", + sha256 = "afe6c1616e50df702c99867da30e415b6fcaa6212175b4552c8fba41b171e803", + strip_prefix = "cloud.google.com/go/containeranalysis@v0.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_datacatalog", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datacatalog", - sha256 = "2e79aaa321c13a3cd5d536aa5d8d295afacb03752862c4e78bcfc8ce99501ca6", - strip_prefix = "cloud.google.com/go/datacatalog@v1.13.0", + sha256 = "92d8c36abbd4a2224889e077ca5cfdf25ec9eecbbfd08e3c77817bbdfa414947", + strip_prefix = "cloud.google.com/go/datacatalog@v1.18.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", ], ) go_repository( name = "com_google_cloud_go_dataflow", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataflow", - sha256 = "f20f98ca4fb97f9c027f2e56edf7effe2c95f59d7d5a230dfa3be525fa130595", - strip_prefix = "cloud.google.com/go/dataflow@v0.8.0", + sha256 = "1b26af290f0f57e70e5ddf886e44c4e6e1d4c209819c3dcc698c199ff51ef00d", + strip_prefix = "cloud.google.com/go/dataflow@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_dataform", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataform", - sha256 = "2867f6d78bb34adf8e295fb2158ad2df352cd28d79aa0c6e509dd5a389e04692", - strip_prefix = "cloud.google.com/go/dataform@v0.7.0", + sha256 = "da816fdd03b9c9240c1a62adeb2aed112e3bf68f775e45944791c8a912c4a69e", + strip_prefix = "cloud.google.com/go/dataform@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_datafusion", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datafusion", - sha256 = "9d12d5f177f6db6980afa69a9547e7653276bbb85821404d8856d432c56706bb", - strip_prefix = "cloud.google.com/go/datafusion@v1.6.0", + sha256 = "ea3e48d218c57cbfb2e2bbdab083d4218f918da3864c723fa36cd36af8dacf7e", + strip_prefix = "cloud.google.com/go/datafusion@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_datalabeling", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datalabeling", - sha256 = "9a7084aa65112251f45ed12f3118a33667fb5e90bbd14ddc64c9c64655aee9f0", - strip_prefix = "cloud.google.com/go/datalabeling@v0.7.0", + sha256 = "51e5e5eb727485adc627c9ef3031b27747600804a7a2ae42275f2c4475dfab64", + strip_prefix = "cloud.google.com/go/datalabeling@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_dataplex", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataplex", - sha256 = "047519cc76aedf7b0ddb4e3145d9e96d88bc10776ef9252daa43acd25c367911", - strip_prefix = "cloud.google.com/go/dataplex@v1.6.0", + sha256 = "a2bcf17307e944d70a8745586df8e57f8a46feb464607715b2d916d69ea3431d", + strip_prefix = "cloud.google.com/go/dataplex@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", ], ) go_repository( - name = "com_google_cloud_go_dataproc", + name = "com_google_cloud_go_dataproc_v2", build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/dataproc", - sha256 = "f4adc94c30406a2bd04b62f2a0c8c33ddb605ffda53024b034e5c136407f0c73", - strip_prefix = "cloud.google.com/go/dataproc@v1.12.0", + importpath = "cloud.google.com/go/dataproc/v2", + sha256 = "9f170a095d8a5b7976ce660a31414882a8f38d4443c3cf05d70fe28ceab1e985", + strip_prefix = "cloud.google.com/go/dataproc/v2@v2.2.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", ], ) go_repository( name = "com_google_cloud_go_dataqna", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataqna", - sha256 = "20e60cfe78e1b2f72122cf44184d8e9a9af7bdfc9e44a2c33e4b782dee477d25", - strip_prefix = "cloud.google.com/go/dataqna@v0.7.0", + sha256 = "69fcb32f4dc8b37e5a1a9e6fd3c33e64953ea06cb91fcfbcf59cafa31dfa8d86", + strip_prefix = "cloud.google.com/go/dataqna@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_datastore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datastore", - sha256 = "6b81cf09ce8daee02c880343ff82acfefbd3c7b67ff2b93bf9f1479c5e25f627", - strip_prefix = "cloud.google.com/go/datastore@v1.11.0", + sha256 = "8b89b61b9655adcfb197079184d0438dc15fc12aa7c3ef72f61fa8ddbad22880", + strip_prefix = "cloud.google.com/go/datastore@v1.15.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", ], ) go_repository( name = "com_google_cloud_go_datastream", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datastream", - sha256 = "02571fbbe7aa5052c91c2b99f3c799dc278bbe001871036101959338e789800c", - strip_prefix = "cloud.google.com/go/datastream@v1.7.0", + sha256 = "29df6dde384fe4c964970ef77462fd939b8c45d49ff7cb82fbc39596f8e34893", + strip_prefix = "cloud.google.com/go/datastream@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", ], ) go_repository( name = "com_google_cloud_go_deploy", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/deploy", - sha256 = "9bf6d2ad426d9d80636ca5b7c1486b91a8e31c61a50a79856195fdad65bda004", - strip_prefix = "cloud.google.com/go/deploy@v1.8.0", + sha256 = "f7555f1cd13f36ae70982f4729531176cf322a75c6b9dde6c72f8a843d525481", + strip_prefix = "cloud.google.com/go/deploy@v1.13.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", ], ) go_repository( name = "com_google_cloud_go_dialogflow", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dialogflow", - sha256 = "de2009a08b3db53b7292852a7c28dd52218c8fcb7937fc0049b0219e429bafdb", - strip_prefix = "cloud.google.com/go/dialogflow@v1.32.0", + sha256 = "9a17b92cec11b7877f38b7d31bb42928710b87f55b54c703b0858a86bf26421f", + strip_prefix = "cloud.google.com/go/dialogflow@v1.44.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", ], ) go_repository( name = "com_google_cloud_go_dlp", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dlp", - sha256 = "a32c4dbda0445a401ec25e9faf3f10b25b6fd264917825a0d053e6e297cdfc61", - strip_prefix = "cloud.google.com/go/dlp@v1.9.0", + sha256 = "787fb0c860a5a6df47080feb59dc34bb26fc23fed5a98d8fc7b42636bc81b5ac", + strip_prefix = "cloud.google.com/go/dlp@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_documentai", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/documentai", - sha256 = "9806274a2a5af71b115ddc7357be24757b0331b1661cac642f7d0eb6b6894a7b", - strip_prefix = "cloud.google.com/go/documentai@v1.18.0", + sha256 = "d306b29ea9ed00003eb4a72de10527a9898b32a67a5d9b1c028845db6ee977e6", + strip_prefix = "cloud.google.com/go/documentai@v1.23.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", ], ) go_repository( name = "com_google_cloud_go_domains", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/domains", - sha256 = "26ed447b319c064d0ce19d85c6de127af1aa87c727af6202b1f7a3b95d35bd0a", - strip_prefix = "cloud.google.com/go/domains@v0.8.0", + sha256 = "541b361f96b0f849324e794b2b24f53aa73e30dbb1ba02f12e94b5dd38759db7", + strip_prefix = "cloud.google.com/go/domains@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_edgecontainer", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/edgecontainer", - sha256 = "c22e2f212fcfcf9f0af32c43c47b4311fc07c382e78810a34afe273ba363429c", - strip_prefix = "cloud.google.com/go/edgecontainer@v1.0.0", + sha256 = "6671ed73144587cd1f5d20982d395a8628ad130ffea7d064d790e2c92274b3b3", + strip_prefix = "cloud.google.com/go/edgecontainer@v1.1.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", ], ) go_repository( @@ -8302,767 +8250,715 @@ def go_deps(): name = "com_google_cloud_go_essentialcontacts", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/essentialcontacts", - sha256 = "b595846269076fbabcee96eda6718c41c1b94c2758edc42537f490accaa40b19", - strip_prefix = "cloud.google.com/go/essentialcontacts@v1.5.0", + sha256 = "100839140d920ea39df237c99782dae60aa6827be723a8f17dcd77f29ff71eca", + strip_prefix = "cloud.google.com/go/essentialcontacts@v1.6.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", ], ) go_repository( name = "com_google_cloud_go_eventarc", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/eventarc", - sha256 = "6bdda029e620653f4dcdc10fa1099ec6b28c0e5ecbb5c1b34b58374efcc1beec", - strip_prefix = "cloud.google.com/go/eventarc@v1.11.0", + sha256 = "c1abc76d8cdebcf2fbff6a5f5c289479749713033ba188853f6156b1f3a2c575", + strip_prefix = "cloud.google.com/go/eventarc@v1.13.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", ], ) go_repository( name = "com_google_cloud_go_filestore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/filestore", - sha256 = "77c99a79955f99b33988d4ce7d4656ab3bbeaef794d788ae295eccdecf799839", - strip_prefix = "cloud.google.com/go/filestore@v1.6.0", + sha256 = "ddfc413e66b4e18263d250a7bc7d2d723b4007729107f4a33efc90fafb7149ea", + strip_prefix = "cloud.google.com/go/filestore@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_firestore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/firestore", - sha256 = "f4bd0f35095358181574ae03a8bed7618fe8f50a63d54b2e49a85d71c47104c7", - strip_prefix = "cloud.google.com/go/firestore@v1.9.0", + sha256 = "4e14ba924858cda4925eccd288c8fb4ad377f227252138fe1681c0e9391ad3e0", + strip_prefix = "cloud.google.com/go/firestore@v1.13.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", ], ) go_repository( name = "com_google_cloud_go_functions", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/functions", - sha256 = "9635cbe16b0bf748108ce30c4686a909227d342e2ed47c1c1c45cfaa44be6d89", - strip_prefix = "cloud.google.com/go/functions@v1.13.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_gaming", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/gaming", - sha256 = "5a0680fb577f1ea1d3e815ff2e7fa22931e2c9e492e151087cdef34b1f9ece97", - strip_prefix = "cloud.google.com/go/gaming@v1.9.0", + sha256 = "34232aa309d00ffef25ac784e2e1a702c8aaf5b921152c257772f59dbe4234ee", + strip_prefix = "cloud.google.com/go/functions@v1.15.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkebackup", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkebackup", - sha256 = "d7a06be74c96d73dc3f032431cffd1e01656c670ed85d70da916933b4a91d85d", - strip_prefix = "cloud.google.com/go/gkebackup@v0.4.0", + sha256 = "f1617ab86d537328e3f3c36790da6d432caf00df1c60d7f7c59e49b3552296bf", + strip_prefix = "cloud.google.com/go/gkebackup@v1.3.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkeconnect", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkeconnect", - sha256 = "37fe8da6dd9a04e90a245093f72b30dae67d511ab13a6c24db25b3ee8c547d25", - strip_prefix = "cloud.google.com/go/gkeconnect@v0.7.0", + sha256 = "009e1bb490e0091744f0d5ff7b3b7cbe5085a7795b775204fe45e80535f452ce", + strip_prefix = "cloud.google.com/go/gkeconnect@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkehub", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkehub", - sha256 = "e44073c24ed21976762f6a13f0adad46863eec5ac1dbaa20045fc0b63e1fd2ce", - strip_prefix = "cloud.google.com/go/gkehub@v0.12.0", + sha256 = "acef02e283a877fae6242895bea73e00c655a239b6a34e6c4f26dde75214e897", + strip_prefix = "cloud.google.com/go/gkehub@v0.14.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkemulticloud", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkemulticloud", - sha256 = "9c851d037561d6cc67c20b247c505ca9c0697dc7e85251bd756f478f473483b1", - strip_prefix = "cloud.google.com/go/gkemulticloud@v0.5.0", + sha256 = "ad4f2be30a3e031aaec680b8f4548876b024e02a835a51b4418f04c1a0d45437", + strip_prefix = "cloud.google.com/go/gkemulticloud@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", ], ) go_repository( name = "com_google_cloud_go_gsuiteaddons", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gsuiteaddons", - sha256 = "911963d78ba7974bd3e807888fde1879a5c871cdf3c43369eebb9778a3fdc4c1", - strip_prefix = "cloud.google.com/go/gsuiteaddons@v1.5.0", + sha256 = "c31266cc003017a841473f2eaa162d0d4a58302ac6085153c8961b8673af1b6a", + strip_prefix = "cloud.google.com/go/gsuiteaddons@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_iam", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/iam", - sha256 = "a8236c53eb06cc21c5c972fcfc4153fbce5a44eb7a1b7c88cadc307b8768328a", - strip_prefix = "cloud.google.com/go/iam@v0.13.0", + sha256 = "56e6aba936af03c61fc21eb58f562596cadd6bacc30a07a7fb2a2516c28764bb", + strip_prefix = "cloud.google.com/go/iam@v1.1.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", ], ) go_repository( name = "com_google_cloud_go_iap", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/iap", - sha256 = "c2e76b45c74ecebad179dca0398a5279bcf47d30c35d8c347c8d59d98f944f90", - strip_prefix = "cloud.google.com/go/iap@v1.7.1", + sha256 = "70dd5562de160017ea166cbd5a959eda628b025bc6dc93a269fc183d96eec8cf", + strip_prefix = "cloud.google.com/go/iap@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_ids", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/ids", - sha256 = "8a684da48da978ae35937cb3b9a84da1a7673789e8363501ccc317108b712913", - strip_prefix = "cloud.google.com/go/ids@v1.3.0", + sha256 = "5774a1cf5f3e09af43d38f37163c3ead590bb06119d4256e1a2670d40190094d", + strip_prefix = "cloud.google.com/go/ids@v1.4.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", ], ) go_repository( name = "com_google_cloud_go_iot", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/iot", - sha256 = "960bf7d2c22c0c31d9d903343672d1e949d2bb1442264c15d9de57659b51e126", - strip_prefix = "cloud.google.com/go/iot@v1.6.0", + sha256 = "4dbe9fdf18ecd066a3986acd9a98680adac6a1cf50bbac6c04432e3059570fea", + strip_prefix = "cloud.google.com/go/iot@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_kms", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/kms", - sha256 = "7f54a8218570636a93ea8b33843ed179b4b881f7d5aa8982912ddfdf7090ba38", - strip_prefix = "cloud.google.com/go/kms@v1.10.1", + sha256 = "89ef8ac2bde3827d875928c8911c3ac874b7f0f6d1396ba3f6f17b51448738c4", + strip_prefix = "cloud.google.com/go/kms@v1.15.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", ], ) go_repository( name = "com_google_cloud_go_language", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/language", - sha256 = "c66908967b2558c00ca79b31f6788a1cd5f7ba9ee24ebe109ea3b4ac1ab372a1", - strip_prefix = "cloud.google.com/go/language@v1.9.0", + sha256 = "353e525423b6547a806aea28f63f401759fd090855a1544c1228f48337470201", + strip_prefix = "cloud.google.com/go/language@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_lifesciences", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/lifesciences", - sha256 = "8638174541f6d1b8d03cce39e94d5ba7b85def5550151e69c4d54e61d60101e3", - strip_prefix = "cloud.google.com/go/lifesciences@v0.8.0", + sha256 = "32e38b08fcef1d06b7512261d1116fbac7e5e331942e4512a26d73f62625e5d6", + strip_prefix = "cloud.google.com/go/lifesciences@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_logging", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/logging", - sha256 = "1b56716e7440c5064ed17af2c40bbba0c2e0f1d628f9f4864e81b7bd2958a2f3", - strip_prefix = "cloud.google.com/go/logging@v1.7.0", + sha256 = "8b2275192caa4b3f260c23edcf2ae08a45e510573fca5487c7a21056fd88d3f9", + strip_prefix = "cloud.google.com/go/logging@v1.8.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", ], ) go_repository( name = "com_google_cloud_go_longrunning", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/longrunning", - sha256 = "6cb4e4a6b80435cb12ab0192ca281893e750f20903cdf5f2432a6d61db190361", - strip_prefix = "cloud.google.com/go/longrunning@v0.4.1", + sha256 = "ba7fd6475a3e6f6335461d5a707232ccf8336397802e83c5401c2308906ee76b", + strip_prefix = "cloud.google.com/go/longrunning@v0.5.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", ], ) go_repository( name = "com_google_cloud_go_managedidentities", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/managedidentities", - sha256 = "6ca18f1a180e7ce3159b8c6fdf93ba66122775a112874d9ce9a7f9fca3150a95", - strip_prefix = "cloud.google.com/go/managedidentities@v1.5.0", + sha256 = "d81fe6c82e62b737a430b640c698220c61722b122b6ac9722e7b365eecd4e074", + strip_prefix = "cloud.google.com/go/managedidentities@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_maps", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/maps", - sha256 = "9988ceccfc296bc154f5cbd0ae455131ddec336e93293b07d1c5f4948653dd93", - strip_prefix = "cloud.google.com/go/maps@v0.7.0", + sha256 = "c9cb6250a7ff92586fb2e212fc3b36437c9baa74e9b373461a0b33e40d359909", + strip_prefix = "cloud.google.com/go/maps@v1.4.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", ], ) go_repository( name = "com_google_cloud_go_mediatranslation", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/mediatranslation", - sha256 = "e78d770431918e6653b61029adf076402e15875acaa165c0db216567abeb5e63", - strip_prefix = "cloud.google.com/go/mediatranslation@v0.7.0", + sha256 = "6805e80cc3a6615eb086c1efdbc57bd378779962d134e22509af1ef77db1eb7f", + strip_prefix = "cloud.google.com/go/mediatranslation@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_memcache", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/memcache", - sha256 = "e01bca761af97779d7a4b0d632fd0463d324b80fac75662c594dd008270ed389", - strip_prefix = "cloud.google.com/go/memcache@v1.9.0", + sha256 = "80d1544a452cdb9a051cdd577a2dc018b56a9250c54ca5df194c65855a6cf7b5", + strip_prefix = "cloud.google.com/go/memcache@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_metastore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/metastore", - sha256 = "6ec835f8d18b39056072b7814a51cd6c22179cbf97f2b0204dc73d94082f00a4", - strip_prefix = "cloud.google.com/go/metastore@v1.10.0", + sha256 = "fe4aa0aa4abd1bd6c1cb3c6d506c3acfb58e9851cdbe91b017360e4ce6533ff9", + strip_prefix = "cloud.google.com/go/metastore@v1.13.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", ], ) go_repository( name = "com_google_cloud_go_monitoring", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/monitoring", - sha256 = "3ed009f1b492887939537dc59bea91ad78129eab5cba1fb4f090690a0f2a1f22", - strip_prefix = "cloud.google.com/go/monitoring@v1.13.0", + sha256 = "545af97f19cde57c99d37c8741d45f110a472f62e348313cef2054f8623661cd", + strip_prefix = "cloud.google.com/go/monitoring@v1.16.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", ], ) go_repository( name = "com_google_cloud_go_networkconnectivity", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/networkconnectivity", - sha256 = "c2cd6ef6c8a4141ea70a20669000695559d3f3d41498de98c61878597cca05ea", - strip_prefix = "cloud.google.com/go/networkconnectivity@v1.11.0", + sha256 = "fa63079ab0dfcd34b074145057487d18d95e6b380b2b19c4a9a2113303333fdb", + strip_prefix = "cloud.google.com/go/networkconnectivity@v1.14.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", ], ) go_repository( name = "com_google_cloud_go_networkmanagement", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/networkmanagement", - sha256 = "4c74b55c69b73655d14d2198be6d6e8d4da240e7284c5c99eb2a7591bb95c187", - strip_prefix = "cloud.google.com/go/networkmanagement@v1.6.0", + sha256 = "bed1460ce979230c94121f814c62aee524f223568bfcf00962e8683379016c49", + strip_prefix = "cloud.google.com/go/networkmanagement@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_networksecurity", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/networksecurity", - sha256 = "1a358f55bb3daaba03ad22fe0ecbf67f334e829f3c7412de37f85b607572cb67", - strip_prefix = "cloud.google.com/go/networksecurity@v0.8.0", + sha256 = "b4e959bd1d9c97e7267c529ee023fa55a7bbcd7b5f2964b6c99f0fb51006dbcb", + strip_prefix = "cloud.google.com/go/networksecurity@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_notebooks", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/notebooks", - sha256 = "24ca6efce18d2cb1001280ad2c3dc2a002279b258ecf5d20bf912b666b19d279", - strip_prefix = "cloud.google.com/go/notebooks@v1.8.0", + sha256 = "9f33be59f75e363bd6a90a465ada18ad8cc549b82facbb9541b81ae9b3ba7937", + strip_prefix = "cloud.google.com/go/notebooks@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", ], ) go_repository( name = "com_google_cloud_go_optimization", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/optimization", - sha256 = "a86473b6c76f5669e4c98ad4837a2ec77faab9bfabeb52c0f26b10019e039986", - strip_prefix = "cloud.google.com/go/optimization@v1.3.1", + sha256 = "feb9d564067168d48be5e2bde7f19032a7c27a779beefc09d3aa19f0c2b9eaf2", + strip_prefix = "cloud.google.com/go/optimization@v1.5.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", ], ) go_repository( name = "com_google_cloud_go_orchestration", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/orchestration", - sha256 = "9568ea88c1626f6d69ac48abcbd4dfab26aebe3be89a19f179bf3277bcda26e9", - strip_prefix = "cloud.google.com/go/orchestration@v1.6.0", + sha256 = "81f752cb325e335254f9c33be9d7bacdf0fbc8ef929828a0496e35b2787dc2df", + strip_prefix = "cloud.google.com/go/orchestration@v1.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_orgpolicy", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/orgpolicy", - sha256 = "6fa13831a918ac690ed1073967e210349a13c2cd9bf51f84ba5cd6522a052d32", - strip_prefix = "cloud.google.com/go/orgpolicy@v1.10.0", + sha256 = "2a4f68b17cf411c15e384e0bb1a7d5c823e9212129535075588cbd4c3e3da73d", + strip_prefix = "cloud.google.com/go/orgpolicy@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_osconfig", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/osconfig", - sha256 = "8f97d324f398aebb4af096041f8547a5b6b09cba754ba082fe3eca7f29a8b885", - strip_prefix = "cloud.google.com/go/osconfig@v1.11.0", + sha256 = "275f62c21ecf8371b55e778ab87e9ef588ced27cc63a9e985ab5029eddcdb843", + strip_prefix = "cloud.google.com/go/osconfig@v1.12.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", ], ) go_repository( name = "com_google_cloud_go_oslogin", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/oslogin", - sha256 = "4e1f1ec2a64a8bb7f878185b3e618bb077df6fa94ed6704ab012e18c4ecd4fce", - strip_prefix = "cloud.google.com/go/oslogin@v1.9.0", + sha256 = "29ef72254fe0efd3778f6cc8fbc3df9c33f7ce7b6045d0f6d96eb876044b2237", + strip_prefix = "cloud.google.com/go/oslogin@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_phishingprotection", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/phishingprotection", - sha256 = "7a3ce8e6b2c8f828fcd344b653849cf1e90abeca48a7eef81c75a72cb924d9e2", - strip_prefix = "cloud.google.com/go/phishingprotection@v0.7.0", + sha256 = "6244bb1f396e3cb42e513ddc934923f0e14ab55f40b828f16c90be57a4b94a84", + strip_prefix = "cloud.google.com/go/phishingprotection@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_policytroubleshooter", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/policytroubleshooter", - sha256 = "9d5fccfe01a31ec395ba3a26474168e5a8db09275dfbdfcd5dfd44923d9ac4bd", - strip_prefix = "cloud.google.com/go/policytroubleshooter@v1.6.0", + sha256 = "b46e74184e6b8ed9943f474b976fb6bedd6a4d0700ac696012300886922f9d98", + strip_prefix = "cloud.google.com/go/policytroubleshooter@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_privatecatalog", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/privatecatalog", - sha256 = "f475f487df7906e4e35bda4b69ce53f141ade7ea6463674eb9b57f5fa302c367", - strip_prefix = "cloud.google.com/go/privatecatalog@v0.8.0", + sha256 = "a43190e1dfba2ed7fcb63e5571937bdfc2ed97594fa9b2b7bd119678e977b0f4", + strip_prefix = "cloud.google.com/go/privatecatalog@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_pubsub", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/pubsub", - sha256 = "9c15c75b6204fd3d42114006896a72d82827d01a756d2f78423c101102da4977", - strip_prefix = "cloud.google.com/go/pubsub@v1.30.0", + sha256 = "87f423671714647e1817126df5403c57f1d4627c3e4e83664213a678126e40ca", + strip_prefix = "cloud.google.com/go/pubsub@v1.33.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", ], ) go_repository( name = "com_google_cloud_go_pubsublite", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/pubsublite", - sha256 = "97b1c3637961faf18229a168a5811425b4e64ee6d81bb76e51ebbf93ff3622ba", - strip_prefix = "cloud.google.com/go/pubsublite@v1.7.0", + sha256 = "41933a60c5e0995025320fe1c155b31d636178e60838b04aca9eab0c8c9f3227", + strip_prefix = "cloud.google.com/go/pubsublite@v1.8.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", ], ) go_repository( name = "com_google_cloud_go_recaptchaenterprise_v2", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/recaptchaenterprise/v2", - sha256 = "dbf218232a443651daa58869fb5e87845927c33d683f4fd4f6f4306e056bb7d0", - strip_prefix = "cloud.google.com/go/recaptchaenterprise/v2@v2.7.0", + sha256 = "8151e658e29acc0617fa5bc36f7d6f06a61e8b97558f79fd6137429538ad903f", + strip_prefix = "cloud.google.com/go/recaptchaenterprise/v2@v2.8.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", ], ) go_repository( name = "com_google_cloud_go_recommendationengine", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/recommendationengine", - sha256 = "33cf95d20d5c036b5595c0f66005d82eb3ddb3ccebdcc69c120a1567b0f12f40", - strip_prefix = "cloud.google.com/go/recommendationengine@v0.7.0", + sha256 = "a2636073ab9bd418361f38332b4e922fcfe5ca6bc10aca96f6fcbdab7a37456d", + strip_prefix = "cloud.google.com/go/recommendationengine@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_recommender", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/recommender", - sha256 = "8e9ccaf1167b4a7d3fd682581537f525f712af72c99b586aaea05832b82c86e8", - strip_prefix = "cloud.google.com/go/recommender@v1.9.0", + sha256 = "1f0585da517bd4163b8482c6810cf1c119c0ad5a4d038bdcaa6491a5b3d1417b", + strip_prefix = "cloud.google.com/go/recommender@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_redis", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/redis", - sha256 = "51e5063e393d443f9d265b2aad809f45cee8af95a41ab8b532af38711ff451dc", - strip_prefix = "cloud.google.com/go/redis@v1.11.0", + sha256 = "53e5bd33b17517627ce13404b784a7c8b2b8c65719e8f70977616b061834ee87", + strip_prefix = "cloud.google.com/go/redis@v1.13.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", ], ) go_repository( name = "com_google_cloud_go_resourcemanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/resourcemanager", - sha256 = "92bba6de5d69d3928378722537f0b76ec8f958cece23acb9336512f3407eb8e4", - strip_prefix = "cloud.google.com/go/resourcemanager@v1.7.0", + sha256 = "bb51f46e5a6a219191c258f1b395a1129fc96d4ea940eff412191522c0dbd043", + strip_prefix = "cloud.google.com/go/resourcemanager@v1.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_resourcesettings", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/resourcesettings", - sha256 = "9ff4470670ebcfa07f7964f85e312e41901afed236c14ecd10952d90e81f99f7", - strip_prefix = "cloud.google.com/go/resourcesettings@v1.5.0", + sha256 = "f2327ef037487c9f183e3a52e6456c087f8dc8325311bc6dcb77e5a8c030e360", + strip_prefix = "cloud.google.com/go/resourcesettings@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_retail", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/retail", - sha256 = "5e71739001223ca2cdf7a6fa0ff61673a407ec18503fdd772b96e91ce42b67fc", - strip_prefix = "cloud.google.com/go/retail@v1.12.0", + sha256 = "85c1df965d36c1449655ae20ff44c18eab4177babbc2a851764941073b623862", + strip_prefix = "cloud.google.com/go/retail@v1.14.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", ], ) go_repository( name = "com_google_cloud_go_run", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/run", - sha256 = "7828480d028ff1b8496855bbd9dc264e772fae5f7866ceb5e1a7db6f18052edd", - strip_prefix = "cloud.google.com/go/run@v0.9.0", + sha256 = "5382527d044acc067f77f94001d094b1bd990fe91e68dd6de38d86b8eed9bc04", + strip_prefix = "cloud.google.com/go/run@v1.3.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", ], ) go_repository( name = "com_google_cloud_go_scheduler", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/scheduler", - sha256 = "3e225392a86a45fa9b5144f18bd3ea418f0cd7fab270ab4524a2e897bae54416", - strip_prefix = "cloud.google.com/go/scheduler@v1.9.0", + sha256 = "171f330ba8477da1ab647cc6707f963300fec30750a37e5dd7935d2387c2116d", + strip_prefix = "cloud.google.com/go/scheduler@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_secretmanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/secretmanager", - sha256 = "d24cb4f507e9d531f7d75a4b070bff5f9dc548a2be1591337f4865cd8b084929", - strip_prefix = "cloud.google.com/go/secretmanager@v1.10.0", + sha256 = "4dc8dc1f3dc60a6aa5038ccc7ac988e53af136a544021391a82af303cf811e5a", + strip_prefix = "cloud.google.com/go/secretmanager@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_security", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/security", - sha256 = "e74202ce5419ed745d1c8089a2e4ffb790c0bc045d4f4ab788129ea0f0f5576d", - strip_prefix = "cloud.google.com/go/security@v1.13.0", + sha256 = "8c32c5308500ac6dad8bf4ab42d84c18e5d19bcbfbdb1879a349a6a62d428a61", + strip_prefix = "cloud.google.com/go/security@v1.15.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", ], ) go_repository( name = "com_google_cloud_go_securitycenter", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/securitycenter", - sha256 = "0f451a28499260a21edf268bb8b657fc55fb81a883ab47fb3d2ca472f8707afd", - strip_prefix = "cloud.google.com/go/securitycenter@v1.19.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_servicecontrol", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/servicecontrol", - sha256 = "499ce8763d315e0ffdf3705549a507051a27eff9b8dec9debe43bca8d130fabb", - strip_prefix = "cloud.google.com/go/servicecontrol@v1.11.1", + sha256 = "ed0594a9ed6c492d125c67f490e3a94c135e870a98342e9216df12162fa6911e", + strip_prefix = "cloud.google.com/go/securitycenter@v1.23.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", ], ) go_repository( name = "com_google_cloud_go_servicedirectory", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/servicedirectory", - sha256 = "4705df69c7e353bfa6a03dad8a50dde5066151b82528946b818df40547c79088", - strip_prefix = "cloud.google.com/go/servicedirectory@v1.9.0", + sha256 = "266651c6851b26c8047bec746633d5773d80463352f8e708da3cf2cd7fdfbe40", + strip_prefix = "cloud.google.com/go/servicedirectory@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_servicemanagement", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/servicemanagement", - sha256 = "2e02a723d1c226c2ecba4e47892b96052efb941be2910fd7afc38197f5bc6083", - strip_prefix = "cloud.google.com/go/servicemanagement@v1.8.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_serviceusage", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/serviceusage", - sha256 = "377bad0176bbec558ddb55b1fe10318e2c034c9e87536aba1ba8216b57548f3f", - strip_prefix = "cloud.google.com/go/serviceusage@v1.6.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_shell", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/shell", - sha256 = "f88e9c2ff25a5ea22d71a1125cc6e756845ec8221c821092d05e67859966ca48", - strip_prefix = "cloud.google.com/go/shell@v1.6.0", + sha256 = "f50cd0726dd7109c75b9775b6750b3316acb1f764d608ff02278e98cff327ecd", + strip_prefix = "cloud.google.com/go/shell@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_spanner", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/spanner", - sha256 = "e4f3951ea69d07ed383f41579c3a6af8e639558ecfa796421dc6cf3d268118ec", - strip_prefix = "cloud.google.com/go/spanner@v1.45.0", + sha256 = "eb0dd423ec976db7be0e6a709cab5d7ad2b9e20ca53cf9cd9663475bf896531a", + strip_prefix = "cloud.google.com/go/spanner@v1.50.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", ], ) go_repository( name = "com_google_cloud_go_speech", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/speech", - sha256 = "27c7d30f3573b4d14a6096588fef65635bf7df8b98e921e934a0af1c7fcf7771", - strip_prefix = "cloud.google.com/go/speech@v1.15.0", + sha256 = "1c184b4698eea3072656dc4e4a0279befdd6d2fa31989f5006b0e46ee7ea5ceb", + strip_prefix = "cloud.google.com/go/speech@v1.19.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", ], ) go_repository( @@ -9082,195 +8978,195 @@ def go_deps(): name = "com_google_cloud_go_storagetransfer", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/storagetransfer", - sha256 = "16e315b990875ac30d149de8b20f75338b178a9a4d34f03a7e181ed5fba7dd33", - strip_prefix = "cloud.google.com/go/storagetransfer@v1.8.0", + sha256 = "e5c5f002aa7c05a4702d1bb4568b0d63a3384e095402517afaae4147fd6169e8", + strip_prefix = "cloud.google.com/go/storagetransfer@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", ], ) go_repository( name = "com_google_cloud_go_talent", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/talent", - sha256 = "e6de9c5d91eb9c336fe36bc6c40c724f75773afe38f8719ec31add3a144328e6", - strip_prefix = "cloud.google.com/go/talent@v1.5.0", + sha256 = "b797a2106e3aca18898ea51144e2308574f49f840fe51fe06f03d1dea56646e1", + strip_prefix = "cloud.google.com/go/talent@v1.6.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", ], ) go_repository( name = "com_google_cloud_go_texttospeech", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/texttospeech", - sha256 = "47fd557bca4ad5f4e8dff734c323a24a03253d19d2fcb693c9f3bd6ad3c15cd3", - strip_prefix = "cloud.google.com/go/texttospeech@v1.6.0", + sha256 = "4a897af8724879bf479f715a57d0894f6fa3b52706e35870c385bcaa799aef2f", + strip_prefix = "cloud.google.com/go/texttospeech@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_tpu", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/tpu", - sha256 = "631fdef221fa6e2374bc43fabd37de734b402e6cc04449d095a6ddc8a1f64303", - strip_prefix = "cloud.google.com/go/tpu@v1.5.0", + sha256 = "48e359c9edd853357bb8f157a4ead1601d9c926b1c539fde86b5531139f60647", + strip_prefix = "cloud.google.com/go/tpu@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_trace", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/trace", - sha256 = "8012eaad65d2aa6dca225c708e6b0b43eb91bfc1c7dc82573fe7d993eb2c4384", - strip_prefix = "cloud.google.com/go/trace@v1.9.0", + sha256 = "40dd16a13c80f021b31ee309e80f6ee21323b67b2d3aac8473717ac3b3efce08", + strip_prefix = "cloud.google.com/go/trace@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_translate", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/translate", - sha256 = "2bbf1bd793abf22ec8b0b200e8b49ea08821b1923ed24ffa668999f7330046fa", - strip_prefix = "cloud.google.com/go/translate@v1.7.0", + sha256 = "1776353be213f41195b9da35ae6f39cff060f9c163a0213711d7cb11e4f067ff", + strip_prefix = "cloud.google.com/go/translate@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_video", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/video", - sha256 = "fac96bb5bb2dafb9d19c6b3e70455999c65f2be1f4a0ee86c7772796fcbf660c", - strip_prefix = "cloud.google.com/go/video@v1.15.0", + sha256 = "758131934c35db8aa8d39efa5ce411785da78dd55d82edbb7a9fcb8e9518d2a9", + strip_prefix = "cloud.google.com/go/video@v1.20.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", ], ) go_repository( name = "com_google_cloud_go_videointelligence", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/videointelligence", - sha256 = "d7a24a20e8f4c0b7dc088010263be03132f63f62dbfa9eb69447c229ef80626b", - strip_prefix = "cloud.google.com/go/videointelligence@v1.10.0", + sha256 = "0ca9d8c68825f07c208157bf24503f4a64aec960efe3ea2ff8c1ce2dac92b351", + strip_prefix = "cloud.google.com/go/videointelligence@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_vision_v2", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vision/v2", - sha256 = "323f1c5e07ea11ee90bec85c0fdccbcf73c26ce28baa832528cf4a9c50d0b4f7", - strip_prefix = "cloud.google.com/go/vision/v2@v2.7.0", + sha256 = "89b8e20f0db42816208b1d9c5a6cb1abd276fee95fbdd563e750da588d110464", + strip_prefix = "cloud.google.com/go/vision/v2@v2.7.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", ], ) go_repository( name = "com_google_cloud_go_vmmigration", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vmmigration", - sha256 = "a289f09b2e6249b493e3ae8bb10225d77590f3823302e46a99ea51b732debb65", - strip_prefix = "cloud.google.com/go/vmmigration@v1.6.0", + sha256 = "8ef0ba7a5fa6b436593782de63111e4274cb61267008bff10c9dc90285405dce", + strip_prefix = "cloud.google.com/go/vmmigration@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_vmwareengine", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vmwareengine", - sha256 = "f6f5753bf4ee0c4264f78a78966f019fd200bb5bae79fad321093a439b08a2b6", - strip_prefix = "cloud.google.com/go/vmwareengine@v0.3.0", + sha256 = "906ad046857c81df8a0e8f30d09f3db9d2c13021a3374587d3acd2a734c60a13", + strip_prefix = "cloud.google.com/go/vmwareengine@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", ], ) go_repository( name = "com_google_cloud_go_vpcaccess", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vpcaccess", - sha256 = "8d0662362ec347afedf274930c139afd0c9cdb219646ceb58a07668c5c84278b", - strip_prefix = "cloud.google.com/go/vpcaccess@v1.6.0", + sha256 = "80207274d8a780413505c4efdd881c5798d988ade2bc647ac803d18831a42250", + strip_prefix = "cloud.google.com/go/vpcaccess@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_webrisk", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/webrisk", - sha256 = "8cc27cca95d2dd5efc58f335b085da8b46d6520a1963f6b2a33676f2837f3553", - strip_prefix = "cloud.google.com/go/webrisk@v1.8.0", + sha256 = "f1b45df15670274eae77a62a7ae243b5eafb4e10f8f04c852ca73a026b9d03f7", + strip_prefix = "cloud.google.com/go/webrisk@v1.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_websecurityscanner", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/websecurityscanner", - sha256 = "7f0774556cb41ac4acd16a386a9f8664c7f0ac11ed126d5d771fe07a217ef131", - strip_prefix = "cloud.google.com/go/websecurityscanner@v1.5.0", + sha256 = "ce37d83c1998f0dde1ca5b8e107a8654466271fda7c9b35614672da9d8a33144", + strip_prefix = "cloud.google.com/go/websecurityscanner@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_workflows", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/workflows", - sha256 = "e6e83869c5fbcccd3ee489128a300b75cb02a99b48b59bbb829b2e7d7ab81f9c", - strip_prefix = "cloud.google.com/go/workflows@v1.10.0", + sha256 = "b8de38a09b26fc4a98a10a8370f4780361c27a13cc84244fcf8840c2ca0f402a", + strip_prefix = "cloud.google.com/go/workflows@v1.12.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", ], ) go_repository( @@ -9602,13 +9498,13 @@ def go_deps(): name = "io_etcd_go_bbolt", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/bbolt", - sha256 = "a357fccd93e865dce3d3859ed857ce827f7a2f2dc5b90cfaa95202f5d76e4ac2", - strip_prefix = "go.etcd.io/bbolt@v1.3.6", + sha256 = "18babae67eccdd2982ad0bd44bb77a238e8b6c8da192b5ae6bd3c0dd48d5ba31", + strip_prefix = "go.etcd.io/bbolt@v1.3.8", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", - "http://ats.apps.svc/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", + "http://ats.apps.svc/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", ], ) go_repository( @@ -9619,117 +9515,130 @@ def go_deps(): patches = [ "//build/patches:io_etcd_go_etcd_api_v3.patch", ], - sha256 = "bfd9ce626389c8a11c2d33eb3c823cc277898c51254a6e02ed967f948aec79f6", - strip_prefix = "go.etcd.io/etcd/api/v3@v3.5.2", + sha256 = "d05d41beae43dc75f0a6e7815a447c0e704cfdf94841e96ba661e0b1dbc4a10c", + strip_prefix = "go.etcd.io/etcd/api/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_client_pkg_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/client/pkg/v3", - sha256 = "b183c377b46eb622d80d77b14755acbdbba43b9b5882ed2a5e9975985eaacd25", - strip_prefix = "go.etcd.io/etcd/client/pkg/v3@v3.5.2", + sha256 = "d99c95cd67a6c27868368cb7d31f60f11894d9039bea0c81b9ab66540f01d524", + strip_prefix = "go.etcd.io/etcd/client/pkg/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_client_v2", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/client/v2", - sha256 = "25e0a2e179114cdc122e57dcee974cff927cbe2f04304d71575fe0dbf66d506b", - strip_prefix = "go.etcd.io/etcd/client/v2@v2.305.2", + sha256 = "cb78469abc82a73bf8116cae2d772791065a28662f19771fb3504804896f9cc3", + strip_prefix = "go.etcd.io/etcd/client/v2@v2.305.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_client_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/client/v3", - sha256 = "06aae6f25789a7dea98a2f7df67a4d65b660b81a8accd88ddced9ca8c335d99d", - strip_prefix = "go.etcd.io/etcd/client/v3@v3.5.2", + sha256 = "f35f571c1c46260bfed9222de88d7e87b1cd5de625465b4489f97af1b3a2c881", + strip_prefix = "go.etcd.io/etcd/client/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_etcdutl_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/etcdutl/v3", - sha256 = "9d694d9b204037b05d13c6897a3b81a8234cc444e9b9892846a79a3ade72aeab", - strip_prefix = "go.etcd.io/etcd/etcdutl/v3@v3.5.2", + sha256 = "fbd00834b99644e90ec3f1594bb9901ef2befc2e0b2e957be9605d7e12ca6743", + strip_prefix = "go.etcd.io/etcd/etcdutl/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_pkg_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/pkg/v3", - sha256 = "a1d96686d541509919732896d79e885e40147b5eeb8315db58dc07ad8c191226", - strip_prefix = "go.etcd.io/etcd/pkg/v3@v3.5.2", + sha256 = "8b7c52c59bd9e6b80df28816410846ec61b4318a551c55d9c8fa58b40c0da6f5", + strip_prefix = "go.etcd.io/etcd/pkg/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_raft_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/raft/v3", - sha256 = "2b1fdd35d496af817cfe06ff74949e3cc77efac3473f817f998569107162d41a", - strip_prefix = "go.etcd.io/etcd/raft/v3@v3.5.2", + sha256 = "2ca38be08a7beb77633685d31e18631c0f57b403e41455f524e60a7f2549201f", + strip_prefix = "go.etcd.io/etcd/raft/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_server_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/server/v3", - sha256 = "7eac7dcb18c57f880830d363ab250f9b387c0cbed3e4910427b8e23b7d8e28d3", - strip_prefix = "go.etcd.io/etcd/server/v3@v3.5.2", + sha256 = "a1112d8570540017f35d9ed372ff1dde75c59ee1fd7f20074e377ffc90ebd103", + strip_prefix = "go.etcd.io/etcd/server/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_tests_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/tests/v3", - sha256 = "fc00d13163948f7633e1f53f08d05ee4e75930d02114754384a736f733d35148", - strip_prefix = "go.etcd.io/etcd/tests/v3@v3.5.2", + sha256 = "23bc94e86126c8909155ee770d0b9e42f6260fdc97a8f8355f365e9995c4c559", + strip_prefix = "go.etcd.io/etcd/tests/v3@v3.5.10", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + ], + ) + go_repository( + name = "io_etcd_go_gofail", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/gofail", + sha256 = "4fd6977dd736aba56be58c0b16e96d73433688976a5b352578d3c54d0db9e803", + strip_prefix = "go.etcd.io/gofail@v0.1.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", + "http://ats.apps.svc/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", ], ) go_repository( @@ -9888,147 +9797,95 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/contrib.go.opencensus.io/exporter/ocagent/io_opencensus_go_contrib_exporter_ocagent-v0.4.12.zip", ], ) - go_repository( - name = "io_opentelemetry_go_contrib", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/contrib", - sha256 = "b33252dafaa7884e1925ca052bfc32275bd69f7faa1a294ce2dbf05b7f62fda1", - strip_prefix = "go.opentelemetry.io/contrib@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - ], - ) go_repository( name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", - sha256 = "5d75e50405735d05540a3cc59c3741cc43275ba9203bcc77ac85214ebd5212f8", - strip_prefix = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.20.0", + sha256 = "f7abd5e3fe79b49a398912f67f79d853e329bb4f653b98804a961d5178dadc5e", + strip_prefix = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.25.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", ], ) go_repository( name = "io_opentelemetry_go_otel", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/otel", - sha256 = "8e55c823cde41ae4920f331e3b3999adca4c8729f0f096950454c996520972a3", - strip_prefix = "go.opentelemetry.io/otel@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - ], - ) - go_repository( - name = "io_opentelemetry_go_otel_exporters_otlp", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/exporters/otlp", - sha256 = "abd40ffff96f3caa01ee6854b52e69e6787b10d31a6c2023447d5106496c9b2e", - strip_prefix = "go.opentelemetry.io/otel/exporters/otlp@v0.20.0", + sha256 = "e8c4d785d6a230d5c954d7afbbb0df5c8a2ffb59aeb07bc4f7c731c6f55e0626", + strip_prefix = "go.opentelemetry.io/otel@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", ], ) go_repository( - name = "io_opentelemetry_go_otel_metric", + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace", build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/metric", - sha256 = "d7ae3abbdcf9ea48ff23a477f324cb3595c77f3eb83f6acde5c0c9300e23fedb", - strip_prefix = "go.opentelemetry.io/otel/metric@v0.20.0", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace", + sha256 = "c0b373451618d70053fcfad5acbdc243cbad8b6b9252e0a30303171b0b065499", + strip_prefix = "go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", ], ) go_repository( - name = "io_opentelemetry_go_otel_oteltest", + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc", build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/oteltest", - sha256 = "5773e674e2f095c2348d13133d2c5ed3019c3c4dc43c47dcae788a673f197d20", - strip_prefix = "go.opentelemetry.io/otel/oteltest@v0.20.0", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc", + sha256 = "7e99951c02fdc104a08bff9244de6f9129171ccde70761c61c9f4255ce81dc5d", + strip_prefix = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", ], ) go_repository( name = "io_opentelemetry_go_otel_sdk", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/otel/sdk", - sha256 = "13c01e92ebcbde0b3d2efc4d3a4445c2cce8d505c823aeffff6398a7dabb3806", - strip_prefix = "go.opentelemetry.io/otel/sdk@v0.20.0", + sha256 = "760e9297c941b22cd3a5a2b217de46f8f2411cc7ef8dc1bab8ed02d75e10217d", + strip_prefix = "go.opentelemetry.io/otel/sdk@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - ], - ) - go_repository( - name = "io_opentelemetry_go_otel_sdk_export_metric", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/sdk/export/metric", - sha256 = "e0037e543d27111d06904f8a2060b41fb40e960ddce5cec5e6f190490ae52f57", - strip_prefix = "go.opentelemetry.io/otel/sdk/export/metric@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - ], - ) - go_repository( - name = "io_opentelemetry_go_otel_sdk_metric", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/sdk/metric", - sha256 = "b0d5ffded967229eeee79bb9fb50320c68af812d5f2e6dcb9e44ddb7bd2afe16", - strip_prefix = "go.opentelemetry.io/otel/sdk/metric@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", ], ) go_repository( name = "io_opentelemetry_go_otel_trace", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/otel/trace", - sha256 = "fd6a9646a66f0fa98fc2b12eed1abe11220e5e6cc0cb4b8d9c5905631c87608d", - strip_prefix = "go.opentelemetry.io/otel/trace@v0.20.0", + sha256 = "d7f303c3e1a9d844121309c132fab5f99dc68e9ac6518ef1d9c44f92ae9e97ea", + strip_prefix = "go.opentelemetry.io/otel/trace@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", ], ) go_repository( name = "io_opentelemetry_go_proto_otlp", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/proto/otlp", - sha256 = "a7db0590bc4c5f0b9b99cc958decf644f1e5cc11e0b995dc20b3583a2215259b", - strip_prefix = "go.opentelemetry.io/proto/otlp@v0.7.0", + sha256 = "1a91376c923da07bee23439e8430c32736f6330532df85d3bd1ada90305097d7", + strip_prefix = "go.opentelemetry.io/proto/otlp@v0.9.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", ], ) go_repository( @@ -10100,13 +9957,13 @@ def go_deps(): name = "org_golang_google_api", build_file_proto_mode = "disable_global", importpath = "google.golang.org/api", - sha256 = "42c62aaba1d76efede08c70d8aef7889c5c8ee9c9c4f1e7c455b07838cabb785", - strip_prefix = "google.golang.org/api@v0.114.0", + sha256 = "1c7547012d828329aa90dc77bfa7d826184b14229cc72c93eeca50cb9882158d", + strip_prefix = "google.golang.org/api@v0.128.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", - "http://ats.apps.svc/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", - "https://cache.hawkingrei.com/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", + "http://ats.apps.svc/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", ], ) go_repository( @@ -10126,26 +9983,65 @@ def go_deps(): name = "org_golang_google_genproto", build_file_proto_mode = "disable_global", importpath = "google.golang.org/genproto", - sha256 = "28f0317e6948788a33c07698109005675062f0203ed06bc866350a575bc974bf", - strip_prefix = "google.golang.org/genproto@v0.0.0-20230410155749-daa745c078e1", + sha256 = "11c4f5d4c24c25c9dec4fb2d4e723dead4c558bea81ab3b2deb3b1f5f98f278a", + strip_prefix = "google.golang.org/genproto@v0.0.0-20231016165738-49dd2c1f3d0b", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + ], + ) + go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sha256 = "26f62026bcd267b4fbde3bd16aa9334568af09e623fd64a8e7cd8ec99c0dbf5d", + strip_prefix = "google.golang.org/genproto/googleapis/api@v0.0.0-20231016165738-49dd2c1f3d0b", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + ], + ) + go_repository( + name = "org_golang_google_genproto_googleapis_bytestream", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/bytestream", + sha256 = "cab1c8c198b4c5a226590e8b5b1b847e505a7aaf10a0145ce8c29951eca86b6a", + strip_prefix = "google.golang.org/genproto/googleapis/bytestream@v0.0.0-20230530153820-e85fd2cbaebc", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + ], + ) + go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sha256 = "b35528074783811faaaec1d36c8f42d88b30878e509c5f407c53cb83ec02af78", + strip_prefix = "google.golang.org/genproto/googleapis/rpc@v0.0.0-20231016165738-49dd2c1f3d0b", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", - "http://ats.apps.svc/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", - "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", ], ) go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable_global", importpath = "google.golang.org/grpc", - sha256 = "8e279a7a36347098a00debb5f76ef75b981939c282cd7771cc22b9b576065d84", - strip_prefix = "google.golang.org/grpc@v1.54.0", + sha256 = "8d8be58b73bcbefd731939880edd32aa3a90c4b6937ce07d904075470fce3565", + strip_prefix = "google.golang.org/grpc@v1.59.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", - "http://ats.apps.svc/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", - "https://cache.hawkingrei.com/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", + "http://ats.apps.svc/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", ], ) go_repository( @@ -10217,13 +10113,13 @@ def go_deps(): name = "org_golang_x_lint", build_file_proto_mode = "disable_global", importpath = "golang.org/x/lint", - sha256 = "0a4a5ebd2b1d79e7f480cbf5a54b45a257ae1ec9d11f01688efc5c35268d4603", - strip_prefix = "golang.org/x/lint@v0.0.0-20210508222113-6edffad5e616", + sha256 = "4620205ccd1fd5c5ced7ccbc264217f407c53924e847f4219e48c04c7480b294", + strip_prefix = "golang.org/x/lint@v0.0.0-20201208152925-83fdc39ff7b5", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", - "http://ats.apps.svc/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", - "https://cache.hawkingrei.com/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", + "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", + "http://ats.apps.svc/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", + "https://cache.hawkingrei.com/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", ], ) go_repository( @@ -10269,13 +10165,13 @@ def go_deps(): name = "org_golang_x_oauth2", build_file_proto_mode = "disable_global", importpath = "golang.org/x/oauth2", - sha256 = "774ad761b3732b86eaa3d70c30bcaed6dd09e96eec3cdeb2c0a9c112ce168704", - strip_prefix = "golang.org/x/oauth2@v0.8.0", + sha256 = "06f9bc67776baba78ae443744f846c193e68d775b3339b630788cca03882dda7", + strip_prefix = "golang.org/x/oauth2@v0.11.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", - "http://ats.apps.svc/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", + "http://ats.apps.svc/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", + "https://cache.hawkingrei.com/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", ], ) go_repository( diff --git a/br/pkg/streamhelper/integration_test.go b/br/pkg/streamhelper/integration_test.go index f856ca74d14a4..ff7c23b138fbe 100644 --- a/br/pkg/streamhelper/integration_test.go +++ b/br/pkg/streamhelper/integration_test.go @@ -50,8 +50,8 @@ func runEtcd(t *testing.T) (*embed.Etcd, *clientv3.Client) { cfg := embed.NewConfig() cfg.Dir = t.TempDir() clientURL := getRandomLocalAddr() - cfg.LCUrls = []url.URL{clientURL} - cfg.LPUrls = []url.URL{getRandomLocalAddr()} + cfg.ListenClientUrls = []url.URL{clientURL} + cfg.ListenPeerUrls = []url.URL{getRandomLocalAddr()} cfg.LogLevel = "fatal" etcd, err := embed.StartEtcd(cfg) if err != nil { diff --git a/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch b/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch index 304063a90cb27..db8e4120035a1 100644 --- a/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch +++ b/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch @@ -63,13 +63,13 @@ index c4d18f624..41d5319fb 100644 - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", -+ "@com_github_golang_protobuf//protoc-gen-go/descriptor:go_default_library", ++ "@com_github_golang_protobuf//protoc-gen-go/descriptor:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library", + "@com_github_golang_protobuf//ptypes/any:go_default_library", + "@com_github_golang_protobuf//ptypes/duration:go_default_library", + "@com_github_golang_protobuf//ptypes/timestamp:go_default_library", + "@com_github_golang_protobuf//ptypes/wrappers:go_default_library", -+ "@org_golang_google_genproto//googleapis/api/httpbody:go_default_library", ++ "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto//protobuf/field_mask:go_default_library", "@org_golang_google_grpc//codes:go_default_library", "@org_golang_google_grpc//grpclog:go_default_library", diff --git a/go.mod b/go.mod index b7c94d83f5e00..4d5fef17877c1 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/cockroachdb/errors v1.8.1 github.com/cockroachdb/pebble v0.0.0-20220415182917-06c9d3be25b3 github.com/coocood/freecache v1.2.1 - github.com/coreos/go-semver v0.3.0 + github.com/coreos/go-semver v0.3.1 github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 github.com/daixiang0/gci v0.11.0 github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 @@ -53,7 +53,7 @@ require ( github.com/google/btree v1.1.2 github.com/google/pprof v0.0.0-20211122183932-1daafda22083 github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 github.com/gorilla/mux v1.8.0 github.com/gostaticanalysis/forcetypeassert v0.1.0 @@ -103,7 +103,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/tdakkota/asciicheck v0.2.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.8-0.20231025022411-cad314220659 + github.com/tikv/client-go/v2 v2.0.8-0.20231030021533-3520f13fc074 github.com/tikv/pd/client v0.0.0-20230912103610-2f57a9f050eb github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 github.com/twmb/murmur3 v1.1.6 @@ -112,11 +112,11 @@ require ( github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457 github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 - go.etcd.io/etcd/api/v3 v3.5.2 - go.etcd.io/etcd/client/pkg/v3 v3.5.2 - go.etcd.io/etcd/client/v3 v3.5.2 - go.etcd.io/etcd/server/v3 v3.5.2 - go.etcd.io/etcd/tests/v3 v3.5.2 + go.etcd.io/etcd/api/v3 v3.5.10 + go.etcd.io/etcd/client/pkg/v3 v3.5.10 + go.etcd.io/etcd/client/v3 v3.5.10 + go.etcd.io/etcd/server/v3 v3.5.10 + go.etcd.io/etcd/tests/v3 v3.5.10 go.opencensus.io v0.24.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 @@ -126,15 +126,15 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 golang.org/x/net v0.17.0 - golang.org/x/oauth2 v0.8.0 + golang.org/x/oauth2 v0.11.0 golang.org/x/sync v0.3.0 golang.org/x/sys v0.13.0 golang.org/x/term v0.13.0 golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.10.0 - google.golang.org/api v0.114.0 - google.golang.org/grpc v1.54.0 + google.golang.org/api v0.128.0 + google.golang.org/grpc v1.59.0 gopkg.in/yaml.v2 v2.4.0 honnef.co/go/tools v0.4.5 k8s.io/api v0.27.2 @@ -143,20 +143,27 @@ require ( ) require ( + github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/dolthub/maphash v0.1.0 // indirect + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/jfcg/sixb v1.3.8 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect - cloud.google.com/go/pubsub v1.30.0 // indirect + cloud.google.com/go/iam v1.1.3 // indirect + cloud.google.com/go/pubsub v1.33.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect @@ -179,7 +186,7 @@ require ( github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64 // indirect github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -188,7 +195,6 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/fatih/structtag v1.2.0 github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect github.com/go-kit/kit v0.9.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -196,14 +202,13 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/licensecheck v0.3.1 // indirect github.com/google/renameio/v2 v2.0.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect @@ -274,27 +279,22 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.etcd.io/bbolt v1.3.6 // indirect - go.etcd.io/etcd/client/v2 v2.305.2 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect - go.etcd.io/etcd/raft/v3 v3.5.2 // indirect - go.opentelemetry.io/contrib v0.20.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect - go.opentelemetry.io/otel v0.20.0 // indirect - go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect - go.opentelemetry.io/otel/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk v0.20.0 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect - go.opentelemetry.io/otel/trace v0.20.0 // indirect - go.opentelemetry.io/proto/otlp v0.7.0 // indirect + go.etcd.io/bbolt v1.3.8 // indirect + go.etcd.io/etcd/client/v2 v2.305.10 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/raft/v3 v3.5.10 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 // indirect + go.opentelemetry.io/otel v1.0.1 // indirect + go.opentelemetry.io/otel/sdk v1.0.1 // indirect + go.opentelemetry.io/otel/trace v1.0.1 // indirect + go.opentelemetry.io/proto/otlp v0.9.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.8.2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/go.sum b/go.sum index c602e5e330848..9cd42f0921827 100644 --- a/go.sum +++ b/go.sum @@ -13,33 +13,30 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/kms v1.10.1 h1:7hm1bRqGCA1GBRQUrp831TwJ9TWhP+tvLuP497CQS2g= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= +cloud.google.com/go/kms v1.15.3 h1:RYsbxTRmk91ydKCzekI2YjryO4c5Y2M80Zwcs9/D/cI= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0 h1:vCge8m7aUKBJYOgrZp7EsNDf6QMd2CAlXZqWTn3yq6s= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -108,10 +105,7 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= @@ -142,18 +136,14 @@ github.com/bazelbuild/buildtools v0.0.0-20230926111657-7d855c59baeb h1:4k69c5E7S github.com/bazelbuild/buildtools v0.0.0-20230926111657-7d855c59baeb/go.mod h1:689QdV3hBP7Vo9dJMmzhoYIyo/9iMhEmHkJcnaPRCbo= github.com/bazelbuild/rules_go v0.40.0 h1:i2HspGKiHMAnq2xIsp7sGJ7CiIlLlEKBtL1aogLJhEo= github.com/bazelbuild/rules_go v0.40.0/go.mod h1:TMHmtfpvyfsxaqfL9WnahCsXMWDMICTw7XeK9yVb+YU= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blacktear23/go-proxyprotocol v1.0.6 h1:eTt6UMpEnq59NjON49b3Cay8Dm0sCs1nDliwgkyEsRM= github.com/blacktear23/go-proxyprotocol v1.0.6/go.mod h1:FSCbgnRZrQXazBLL5snfBbrcFSMtcmUDhSRb9OfFA1o= github.com/bmatcuk/doublestar/v2 v2.0.4 h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI= @@ -163,11 +153,11 @@ github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7 github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60= github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac= github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -190,14 +180,20 @@ github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRG github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4= github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/datadriven v1.0.0 h1:uhZrAfEayBecH2w2tZmhe20HJ7hDvrrA4x2Bg9YdZKM= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= @@ -218,24 +214,18 @@ github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu github.com/coocood/freecache v1.2.1/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2 h1:NnLfQ77q0G4k2Of2c1ceQ0ec6MkLQyDp+IGdVM0D8XM= github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2/go.mod h1:7qG7YFnOALvsx6tKTNmQot8d7cGFXM9TidzvRFLWYwM= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A= @@ -283,9 +273,12 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= @@ -303,9 +296,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -321,7 +311,6 @@ github.com/fsouza/fake-gcs-server v1.44.0/go.mod h1:M02aKoTv9Tnlf+gmWnTok1PWVCUH github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/getsentry/raven-go v0.1.2/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -338,10 +327,8 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -376,14 +363,13 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -413,7 +399,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -435,7 +420,6 @@ github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bz github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -479,18 +463,20 @@ github.com/google/pprof v0.0.0-20211122183932-1daafda22083/go.mod h1:KgnwoLYCZ8I github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed h1:rZdD1GeRTHD1aG+VIvhQEYXurx6Wfg4QIT5YVl2tSC8= github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed/go.mod h1:CKSX6SxHW1vp20ZNaeGe3TFFBIwCG6vaYrpAiOzX+NA= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -516,13 +502,11 @@ github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3 github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -619,15 +603,12 @@ github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 h1:O7syWuYG github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g= github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df h1:Zrb0IbuLOGHL7nrO2WrcuNWgDTlzFv3zY69QMx4ggQE= github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -638,7 +619,6 @@ github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0b github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= @@ -665,7 +645,6 @@ github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -702,7 +681,6 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -750,7 +728,6 @@ github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= @@ -869,15 +846,12 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -885,16 +859,12 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 h1:3DyLm+sTAJkfLyR/1pJ3L+fU2lFufWbpcgMFlGtqeyA= @@ -923,7 +893,6 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -962,8 +931,6 @@ github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1K github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -984,7 +951,6 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= @@ -993,7 +959,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spkg/bom v1.0.0 h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64= github.com/spkg/bom v1.0.0/go.mod h1:lAz2VbTuYNcvs7iaFF8WW0ufXrHShJ7ck1fYFFbVXJs= github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U= @@ -1016,7 +981,6 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= @@ -1027,8 +991,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJf github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4= github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM= -github.com/tikv/client-go/v2 v2.0.8-0.20231025022411-cad314220659 h1:hAhyOHyCyuwEdSOdMOOc6EJ47JIDuvwYCVN+cxLduIU= -github.com/tikv/client-go/v2 v2.0.8-0.20231025022411-cad314220659/go.mod h1:SbuYRFuWsvtstvyzIjgbJkgDjVlthLlK1apa876+RpQ= +github.com/tikv/client-go/v2 v2.0.8-0.20231030021533-3520f13fc074 h1:raPY/P5nmIVGCUSQvLSMYJqxPv/qsFOmnXkVlu7/528= +github.com/tikv/client-go/v2 v2.0.8-0.20231030021533-3520f13fc074/go.mod h1:XiEHwWZfJqgafxW/VEgi1ltGWB9yjwCJBs2kW1xHMY4= github.com/tikv/pd/client v0.0.0-20230912103610-2f57a9f050eb h1:hAcH9tFjQzQ3+ofrAHm4ajOTLliYCOfXpj3+boKOtac= github.com/tikv/pd/client v0.0.0-20230912103610-2f57a9f050eb/go.mod h1:E+6qtPu8fJm5kNjvKWPVFqSgNAFPk07y2EjD03GWzuI= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= @@ -1039,7 +1003,6 @@ github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7Am github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= @@ -1087,27 +1050,25 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.2 h1:tXok5yLlKyuQ/SXSjtqHc4uzNaMqZi2XsoSPr/LlJXI= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.2 h1:ymrVwTkefuqA/rPkSW7/B4ApijbPVefRumkY+stNfS0= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= -go.etcd.io/etcd/client/v3 v3.5.2 h1:WdnejrUtQC4nCxK0/dLTMqKOB+U5TP/2Ya0BJL+1otA= -go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o= -go.etcd.io/etcd/etcdutl/v3 v3.5.2/go.mod h1:f+KEUNxRzqQGq1Y/SsaDN5cmlOGRWgfE3lXEDi5F1Ys= -go.etcd.io/etcd/pkg/v3 v3.5.2 h1:YZUojdoPhOyl5QILYnR8LTUbbNefu/sV4ma+ZMr2tto= -go.etcd.io/etcd/pkg/v3 v3.5.2/go.mod h1:zsXz+9D/kijzRiG/UnFGDTyHKcVp0orwiO8iMLAi+k0= -go.etcd.io/etcd/raft/v3 v3.5.2 h1:uCC37qOXqBvKqTGHGyhASsaCsnTuJugl1GvneJNwHWo= -go.etcd.io/etcd/raft/v3 v3.5.2/go.mod h1:G6pCP1sFgbjod7/KnEHY0vHUViqxjkdt6AiKsD0GRr8= -go.etcd.io/etcd/server/v3 v3.5.2 h1:B6ytJvS4Fmt8nkjzS2/8POf4tuPhFMluE0lWd4dx/7U= -go.etcd.io/etcd/server/v3 v3.5.2/go.mod h1:mlG8znIEz4N/28GABrohZCBM11FqgGVQcpbcyJgh0j0= -go.etcd.io/etcd/tests/v3 v3.5.2 h1:uk7/uMGVebpBDl+roivowHt6gJ5Fnqwik3syDkoSKdo= -go.etcd.io/etcd/tests/v3 v3.5.2/go.mod h1:Jdzbei4uFi9C3xDBfCwckRXjlX0UPooiP4g/zXgBMgQ= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= +go.etcd.io/etcd/tests/v3 v3.5.10 h1:F1pbXwKxwZ58aBT2+CSL/r8WUCAVhob0y1y8OVJ204s= +go.etcd.io/etcd/tests/v3 v3.5.10/go.mod h1:vVMWDv9OhopxfJCd+CMI4pih0zUDqlkJj6JcBNlUVXI= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1117,28 +1078,21 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 h1:Wx7nFnvCaissIUZxPkBqDz2963Z+Cl+PkYbDKzTxDqQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 h1:ofMbch7i29qIUf7VtF+r0HRF6ac0SBaPSziSsKp7wkk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 h1:CFMFNoz+CGprjFAFy+RJFrfEe4GBia3RRm2a4fREvCA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1168,7 +1122,6 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -1186,10 +1139,10 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= @@ -1227,7 +1180,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1264,7 +1216,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1308,8 +1259,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1319,7 +1270,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1357,7 +1307,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1372,10 +1321,7 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1383,8 +1329,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1423,9 +1369,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -1436,7 +1382,6 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1467,7 +1412,6 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1499,7 +1443,6 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= @@ -1541,8 +1484,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1584,9 +1527,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1607,10 +1553,12 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1623,6 +1571,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= @@ -1640,7 +1589,6 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= @@ -1659,7 +1607,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1702,7 +1649,6 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h6 sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:ucqkfpjg9WzSUubAO62csmucvxl4/JeW3F4I4909XkM= diff --git a/pkg/executor/importer/precheck_test.go b/pkg/executor/importer/precheck_test.go index 802d3801893dd..9645365fbf055 100644 --- a/pkg/executor/importer/precheck_test.go +++ b/pkg/executor/importer/precheck_test.go @@ -50,9 +50,9 @@ func createMockETCD(t *testing.T) (string, *embed.Etcd) { randPort := int(rand.Int31n(40000)) + 20000 clientAddr := fmt.Sprintf(addrFmt, randPort) lcurl, _ := url.Parse(clientAddr) - cfg.LCUrls, cfg.ACUrls = []url.URL{*lcurl}, []url.URL{*lcurl} + cfg.ListenClientUrls, cfg.AdvertiseClientUrls = []url.URL{*lcurl}, []url.URL{*lcurl} lpurl, _ := url.Parse(fmt.Sprintf(addrFmt, randPort+1)) - cfg.LPUrls, cfg.APUrls = []url.URL{*lpurl}, []url.URL{*lpurl} + cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = []url.URL{*lpurl}, []url.URL{*lpurl} cfg.InitialCluster = "default=" + lpurl.String() cfg.Logger = "zap" embedEtcd, err := embed.StartEtcd(cfg) diff --git a/tests/realtikvtest/brietest/main_test.go b/tests/realtikvtest/brietest/main_test.go index cb7011d295488..91ad1d36f2ece 100644 --- a/tests/realtikvtest/brietest/main_test.go +++ b/tests/realtikvtest/brietest/main_test.go @@ -30,6 +30,7 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("google.golang.org/grpc.(*ccBalancerWrapper).watcher"), goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*controlBuffer).get"), + goleak.IgnoreTopFunction("google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run"), goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), } From 5503eb517f986bc704e2eb1639b9abedcaa20cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E8=B6=85?= Date: Mon, 30 Oct 2023 15:05:06 +0800 Subject: [PATCH 32/33] types: always handle overflow error outside the types package (#47997) close pingcap/tidb#47517 --- pkg/expression/builtin_cast.go | 29 ++++++++++++++------- pkg/expression/builtin_cast_vec.go | 34 ++++++++++++------------ pkg/sessionctx/stmtctx/stmtctx_test.go | 4 +-- pkg/types/context/context.go | 18 +++---------- pkg/types/convert.go | 35 ++++++++++++------------- pkg/types/convert_test.go | 2 +- pkg/types/datum.go | 36 ++++++++++++-------------- pkg/types/datum_test.go | 2 +- 8 files changed, 77 insertions(+), 83 deletions(-) diff --git a/pkg/expression/builtin_cast.go b/pkg/expression/builtin_cast.go index 78e0829b87410..c4d0d39e0b864 100644 --- a/pkg/expression/builtin_cast.go +++ b/pkg/expression/builtin_cast.go @@ -541,7 +541,8 @@ func convertJSON2Tp(evalType types.EvalType) func(*stmtctx.StatementContext, typ if item.TypeCode != types.JSONTypeCodeInt64 && item.TypeCode != types.JSONTypeCodeUint64 { return nil, ErrInvalidJSONForFuncIndex } - jsonToInt, err := types.ConvertJSONToInt(sc, item, mysql.HasUnsignedFlag(tp.GetFlag()), tp.GetType()) + jsonToInt, err := types.ConvertJSONToInt(sc.TypeCtx(), item, mysql.HasUnsignedFlag(tp.GetFlag()), tp.GetType()) + err = sc.HandleOverflow(err, err) if mysql.HasUnsignedFlag(tp.GetFlag()) { return uint64(jsonToInt), err } @@ -702,7 +703,9 @@ func (b *builtinCastIntAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyDe } else { res = types.NewDecFromUint(uint64(val)) } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, b.ctx.GetSessionVars().StmtCtx) + sc := b.ctx.GetSessionVars().StmtCtx + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, isNull, err } @@ -1018,7 +1021,9 @@ func (b *builtinCastRealAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyD return res, false, err } } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, b.ctx.GetSessionVars().StmtCtx) + sc := b.ctx.GetSessionVars().StmtCtx + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1130,7 +1135,8 @@ func (b *builtinCastDecimalAsDecimalSig) evalDecimal(row chunk.Row) (res *types. *res = *evalDecimal } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1454,7 +1460,8 @@ func (b *builtinCastStringAsDecimalSig) evalDecimal(row chunk.Row) (res *types.M return res, false, err } } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1599,7 +1606,8 @@ func (b *builtinCastTimeAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyD return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ProduceDecWithSpecifiedTp(val.ToNumber(), b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), val.ToNumber(), b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1732,7 +1740,8 @@ func (b *builtinCastDurationAsDecimalSig) evalDecimal(row chunk.Row) (res *types return res, false, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ProduceDecWithSpecifiedTp(val.ToNumber(), b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), val.ToNumber(), b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1834,7 +1843,8 @@ func (b *builtinCastJSONAsIntSig) evalInt(row chunk.Row) (res int64, isNull bool return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ConvertJSONToInt64(sc, val, mysql.HasUnsignedFlag(b.tp.GetFlag())) + res, err = types.ConvertJSONToInt64(sc.TypeCtx(), val, mysql.HasUnsignedFlag(b.tp.GetFlag())) + err = sc.HandleOverflow(err, err) return } @@ -1878,7 +1888,8 @@ func (b *builtinCastJSONAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyD if err != nil { return res, false, err } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } diff --git a/pkg/expression/builtin_cast_vec.go b/pkg/expression/builtin_cast_vec.go index 3a1f04347ea92..2af8f84bd97a3 100644 --- a/pkg/expression/builtin_cast_vec.go +++ b/pkg/expression/builtin_cast_vec.go @@ -307,8 +307,8 @@ func (b *builtinCastTimeAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result } *dec = types.MyDecimal{} times[i].FillNumber(dec) - dec, err = types.ProduceDecWithSpecifiedTp(dec, b.tp, sc) - if err != nil { + dec, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } decs[i] = *dec @@ -623,8 +623,8 @@ func (b *builtinCastDecimalAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, resu if !(conditionUnionAndUnsigned && decs[i].IsNegative()) { *dec = decs[i] } - dec, err := types.ProduceDecWithSpecifiedTp(dec, b.tp, sc) - if err != nil { + dec, err := types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } decs[i] = *dec @@ -899,6 +899,7 @@ func (b *builtinCastRealAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result result.MergeNulls(buf) bufreal := buf.Float64s() resdecimal := result.Decimals() + sc := b.ctx.GetSessionVars().StmtCtx for i := 0; i < n; i++ { if result.IsNull(i) { continue @@ -917,8 +918,8 @@ func (b *builtinCastRealAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result } } } - dec, err := types.ProduceDecWithSpecifiedTp(&resdecimal[i], b.tp, b.ctx.GetSessionVars().StmtCtx) - if err != nil { + dec, err := types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), &resdecimal[i], b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } resdecimal[i] = *dec @@ -1058,8 +1059,8 @@ func (b *builtinCastDurationAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, res } duration.Duration = ds[i] duration.Fsp = fsp - res, err := types.ProduceDecWithSpecifiedTp(duration.ToNumber(), b.tp, sc) - if err != nil { + res, err := types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), duration.ToNumber(), b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } d64s[i] = *res @@ -1104,8 +1105,8 @@ func (b *builtinCastIntAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result * dec.FromUint(uint64(nums[i])) } - dec, err = types.ProduceDecWithSpecifiedTp(dec, b.tp, sc) - if err != nil { + dec, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } decs[i] = *dec @@ -1254,12 +1255,13 @@ func (b *builtinCastJSONAsIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C result.MergeNulls(buf) i64s := result.Int64s() sc := b.ctx.GetSessionVars().StmtCtx + tc := sc.TypeCtx() for i := 0; i < n; i++ { if result.IsNull(i) { continue } - i64s[i], err = types.ConvertJSONToInt64(sc, buf.GetJSON(i), mysql.HasUnsignedFlag(b.tp.GetFlag())) - if err != nil { + i64s[i], err = types.ConvertJSONToInt64(tc, buf.GetJSON(i), mysql.HasUnsignedFlag(b.tp.GetFlag())) + if err = sc.HandleOverflow(err, err); err != nil { return err } } @@ -1643,8 +1645,8 @@ func (b *builtinCastJSONAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result if err != nil { return err } - tempres, err = types.ProduceDecWithSpecifiedTp(tempres, b.tp, sc) - if err != nil { + tempres, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), tempres, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } res[i] = *tempres @@ -1733,8 +1735,8 @@ func (b *builtinCastStringAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, resul if err := stmtCtx.HandleTruncate(dec.FromString([]byte(val))); err != nil { return err } - dec, err := types.ProduceDecWithSpecifiedTp(dec, b.tp, stmtCtx) - if err != nil { + dec, err := types.ProduceDecWithSpecifiedTp(stmtCtx.TypeCtx(), dec, b.tp) + if err = stmtCtx.HandleOverflow(err, err); err != nil { return err } res[i] = *dec diff --git a/pkg/sessionctx/stmtctx/stmtctx_test.go b/pkg/sessionctx/stmtctx/stmtctx_test.go index 06c0ead1e2d54..80686718aa784 100644 --- a/pkg/sessionctx/stmtctx/stmtctx_test.go +++ b/pkg/sessionctx/stmtctx/stmtctx_test.go @@ -353,8 +353,8 @@ func TestSetStmtCtxTypeFlags(t *testing.T) { require.Equal(t, typectx.FlagAllowNegativeToUnsigned|typectx.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) - sc.SetTypeFlags(typectx.FlagSkipASCIICheck | typectx.FlagSkipUTF8Check | typectx.FlagInvalidDateAsWarning) - require.Equal(t, typectx.FlagSkipASCIICheck|typectx.FlagSkipUTF8Check|typectx.FlagInvalidDateAsWarning, sc.TypeFlags()) + sc.SetTypeFlags(typectx.FlagSkipASCIICheck | typectx.FlagSkipUTF8Check | typectx.FlagTruncateAsWarning) + require.Equal(t, typectx.FlagSkipASCIICheck|typectx.FlagSkipUTF8Check|typectx.FlagTruncateAsWarning, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) } diff --git a/pkg/types/context/context.go b/pkg/types/context/context.go index 5a218497c23ac..d315cb5019c27 100644 --- a/pkg/types/context/context.go +++ b/pkg/types/context/context.go @@ -34,16 +34,10 @@ const ( FlagTruncateAsWarning // FlagAllowNegativeToUnsigned indicates to allow the casting from negative to unsigned int. // When this flag is not set by default, casting a negative value to unsigned results an overflow error. - // The overflow will also be controlled by `FlagIgnoreOverflowError` and `FlagOverflowAsWarning`. When any of them is set, - // a zero value is returned instead. - // Whe this flag is set, casting a negative value to unsigned will be allowed. And the negative value will be cast to - // a positive value by adding the max value of the unsigned type. + // Otherwise, a negative value will be cast to the corresponding unsigned value without any error. + // For example, when casting -1 to an unsigned bigint with `FlagAllowNegativeToUnsigned` set, + // we will get `18446744073709551615` which is the biggest unsigned value. FlagAllowNegativeToUnsigned - // FlagIgnoreOverflowError indicates to ignore the overflow error. - // If this flag is set, `FlagOverflowAsWarning` will be ignored. - FlagIgnoreOverflowError - // FlagOverflowAsWarning indicates to append the overflow error to warnings instead of returning it to user. - FlagOverflowAsWarning // FlagIgnoreZeroDateErr indicates to ignore the zero-date error. // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_date for details about the "zero-date" error. // If this flag is set, `FlagZeroDateAsWarning` will be ignored. @@ -55,18 +49,12 @@ const ( // This flag is the reverse of `NoZeroDate` in #30507. It's set to `true` for most context, and is only set to // `false` for `alter` (and `create`) statements. FlagIgnoreZeroDateErr - // FlagZeroDateAsWarning indicates to append the zero-date error to warnings instead of returning it to user. - FlagZeroDateAsWarning // FlagIgnoreZeroInDateErr indicates to ignore the zero-in-date error. // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_in_date for details about the "zero-in-date" error. FlagIgnoreZeroInDateErr - // FlagZeroInDateAsWarning indicates to append the zero-in-date error to warnings instead of returning it to user. - FlagZeroInDateAsWarning // FlagIgnoreInvalidDateErr indicates to ignore the invalid-date error. // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_allow_invalid_dates for details about the "invalid-date" error. FlagIgnoreInvalidDateErr - // FlagInvalidDateAsWarning indicates to append the invalid-date error to warnings instead of returning it to user. - FlagInvalidDateAsWarning // FlagSkipASCIICheck indicates to skip the ASCII check when converting the value to an ASCII string. FlagSkipASCIICheck // FlagSkipUTF8Check indicates to skip the UTF8 check when converting the value to an UTF8MB3 string. diff --git a/pkg/types/convert.go b/pkg/types/convert.go index a48bc48852aac..02435c0bed3b1 100644 --- a/pkg/types/convert.go +++ b/pkg/types/convert.go @@ -575,21 +575,21 @@ func StrToFloat(ctx Context, str string, isFuncCast bool) (float64, error) { } // ConvertJSONToInt64 casts JSON into int64. -func ConvertJSONToInt64(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool) (int64, error) { - return ConvertJSONToInt(sc, j, unsigned, mysql.TypeLonglong) +func ConvertJSONToInt64(ctx Context, j BinaryJSON, unsigned bool) (int64, error) { + return ConvertJSONToInt(ctx, j, unsigned, mysql.TypeLonglong) } // ConvertJSONToInt casts JSON into int by type. -func ConvertJSONToInt(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool, tp byte) (int64, error) { +func ConvertJSONToInt(ctx Context, j BinaryJSON, unsigned bool, tp byte) (int64, error) { switch j.TypeCode { case JSONTypeCodeObject, JSONTypeCodeArray, JSONTypeCodeOpaque, JSONTypeCodeDate, JSONTypeCodeDatetime, JSONTypeCodeTimestamp, JSONTypeCodeDuration: - return 0, sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) + return 0, ctx.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) case JSONTypeCodeLiteral: switch j.Value[0] { case JSONLiteralFalse: return 0, nil case JSONLiteralNil: - return 0, sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) + return 0, ctx.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) default: return 1, nil } @@ -597,36 +597,34 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool, i := j.GetInt64() if unsigned { uBound := IntergerUnsignedUpperBound(tp) - u, err := ConvertIntToUint(sc.TypeFlags(), i, uBound, tp) - return int64(u), sc.HandleOverflow(err, err) + u, err := ConvertIntToUint(ctx.Flags(), i, uBound, tp) + return int64(u), err } lBound := IntergerSignedLowerBound(tp) uBound := IntergerSignedUpperBound(tp) - i, err := ConvertIntToInt(i, lBound, uBound, tp) - return i, sc.HandleOverflow(err, err) + return ConvertIntToInt(i, lBound, uBound, tp) case JSONTypeCodeUint64: u := j.GetUint64() if unsigned { uBound := IntergerUnsignedUpperBound(tp) u, err := ConvertUintToUint(u, uBound, tp) - return int64(u), sc.HandleOverflow(err, err) + return int64(u), err } uBound := IntergerSignedUpperBound(tp) - i, err := ConvertUintToInt(u, uBound, tp) - return i, sc.HandleOverflow(err, err) + return ConvertUintToInt(u, uBound, tp) case JSONTypeCodeFloat64: f := j.GetFloat64() if !unsigned { lBound := IntergerSignedLowerBound(tp) uBound := IntergerSignedUpperBound(tp) u, e := ConvertFloatToInt(f, lBound, uBound, tp) - return u, sc.HandleOverflow(e, e) + return u, e } bound := IntergerUnsignedUpperBound(tp) - u, err := ConvertFloatToUint(sc.TypeFlags(), f, bound, tp) - return int64(u), sc.HandleOverflow(err, err) + u, err := ConvertFloatToUint(ctx.Flags(), f, bound, tp) + return int64(u), err case JSONTypeCodeString: str := string(hack.String(j.GetString())) // The behavior of casting json string as an integer is consistent with casting a string as an integer. @@ -634,12 +632,11 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool, // doesn't append any warning. This behavior is compatible with MySQL. isNegative := len(str) > 1 && str[0] == '-' if !isNegative { - r, err := StrToUint(sc.TypeCtxOrDefault(), str, false) - return int64(r), sc.HandleOverflow(err, err) + r, err := StrToUint(ctx, str, false) + return int64(r), err } - r, err := StrToInt(sc.TypeCtxOrDefault(), str, false) - return r, sc.HandleOverflow(err, err) + return StrToInt(ctx, str, false) } return 0, errors.New("Unknown type code in JSON") } diff --git a/pkg/types/convert_test.go b/pkg/types/convert_test.go index 3a633a44c9ecb..8578671d7de18 100644 --- a/pkg/types/convert_test.go +++ b/pkg/types/convert_test.go @@ -1084,7 +1084,7 @@ func TestConvertJSONToInt(t *testing.T) { j, err := ParseBinaryJSONFromString(tt.in) require.NoError(t, err) - casted, err := ConvertJSONToInt64(stmtctx.NewStmtCtx(), j, false) + casted, err := ConvertJSONToInt64(stmtctx.NewStmtCtx().TypeCtx(), j, false) if tt.err { require.Error(t, err, tt) } else { diff --git a/pkg/types/datum.go b/pkg/types/datum.go index 1e7ff4968dd88..2408669b9604e 100644 --- a/pkg/types/datum.go +++ b/pkg/types/datum.go @@ -924,7 +924,7 @@ func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datu } return d.convertToInt(sc, target) case mysql.TypeFloat, mysql.TypeDouble: - return d.convertToFloat(sc, target) + return d.convertToFloat(sc.TypeCtxOrDefault(), target) case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: return d.convertToString(sc, target) @@ -935,7 +935,7 @@ func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datu case mysql.TypeDuration: return d.convertToMysqlDuration(sc, target) case mysql.TypeNewDecimal: - return d.convertToMysqlDecimal(sc, target) + return d.convertToMysqlDecimal(sc.TypeCtxOrDefault(), target) case mysql.TypeYear: return d.ConvertToMysqlYear(sc, target) case mysql.TypeEnum: @@ -953,7 +953,7 @@ func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datu } } -func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToFloat(ctx Context, target *FieldType) (Datum, error) { var ( f float64 ret Datum @@ -969,7 +969,7 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) case KindFloat32, KindFloat64: f = d.GetFloat64() case KindString, KindBytes: - f, err = StrToFloat(sc.TypeCtxOrDefault(), d.GetString(), false) + f, err = StrToFloat(ctx, d.GetString(), false) case KindMysqlTime: f, err = d.GetMysqlTime().ToNumber().ToFloat64() case KindMysqlDuration: @@ -981,10 +981,10 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) case KindMysqlEnum: f = d.GetMysqlEnum().ToNumber() case KindBinaryLiteral, KindMysqlBit: - val, err1 := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + val, err1 := d.GetBinaryLiteral().ToInt(ctx) f, err = float64(val), err1 case KindMysqlJSON: - f, err = ConvertJSONToFloat(sc.TypeCtxOrDefault(), d.GetMysqlJSON()) + f, err = ConvertJSONToFloat(ctx, d.GetMysqlJSON()) default: return invalidConv(d, target.GetType()) } @@ -1029,7 +1029,7 @@ func ProduceFloatWithSpecifiedTp(f float64, target *FieldType) (_ float64, err e } return -math.MaxFloat32, overflow(f, target.GetType()) } - return f, nil + return f, errors.Trace(err) } func (d *Datum) convertToString(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { @@ -1242,7 +1242,7 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( } case KindMysqlJSON: var i64 int64 - i64, err = ConvertJSONToInt(sc, d.GetMysqlJSON(), true, tp) + i64, err = ConvertJSONToInt(sc.TypeCtxOrDefault(), d.GetMysqlJSON(), true, tp) val = uint64(i64) default: return invalidConv(d, target.GetType()) @@ -1443,7 +1443,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie return ret, nil } -func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlDecimal(ctx Context, target *FieldType) (Datum, error) { var ret Datum ret.SetLength(target.GetFlen()) ret.SetFrac(target.GetDecimal()) @@ -1469,11 +1469,11 @@ func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *Fiel case KindMysqlSet: err = dec.FromFloat64(d.GetMysqlSet().ToNumber()) case KindBinaryLiteral, KindMysqlBit: - val, err1 := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + val, err1 := d.GetBinaryLiteral().ToInt(ctx) err = err1 dec.FromUint(val) case KindMysqlJSON: - f, err1 := ConvertJSONToDecimal(sc.TypeCtxOrDefault(), d.GetMysqlJSON()) + f, err1 := ConvertJSONToDecimal(ctx, d.GetMysqlJSON()) if err1 != nil { return ret, errors.Trace(err1) } @@ -1481,7 +1481,7 @@ func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *Fiel default: return invalidConv(d, target.GetType()) } - dec1, err1 := ProduceDecWithSpecifiedTp(dec, target, sc) + dec1, err1 := ProduceDecWithSpecifiedTp(ctx, dec, target) // If there is a error, dec1 may be nil. if dec1 != nil { dec = dec1 @@ -1500,7 +1500,7 @@ func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *Fiel } // ProduceDecWithSpecifiedTp produces a new decimal according to `flen` and `decimal`. -func ProduceDecWithSpecifiedTp(dec *MyDecimal, tp *FieldType, sc *stmtctx.StatementContext) (_ *MyDecimal, err error) { +func ProduceDecWithSpecifiedTp(ctx Context, dec *MyDecimal, tp *FieldType) (_ *MyDecimal, err error) { flen, decimal := tp.GetFlen(), tp.GetDecimal() if flen != UnspecifiedLength && decimal != UnspecifiedLength { if flen < decimal { @@ -1528,14 +1528,10 @@ func ProduceDecWithSpecifiedTp(dec *MyDecimal, tp *FieldType, sc *stmtctx.Statem // select cast(111 as decimal(1)) causes a warning in MySQL. err = ErrOverflow.GenWithStackByArgs("DECIMAL", fmt.Sprintf("(%d, %d)", flen, decimal)) } else if old != nil && dec.Compare(old) != 0 { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("DECIMAL", old)) + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("DECIMAL", old)) } } - if ErrOverflow.Equal(err) { - // TODO: warnErr need to be ErrWarnDataOutOfRange - err = sc.HandleOverflow(err, err) - } unsigned := mysql.HasUnsignedFlag(tp.GetFlag()) if unsigned && dec.IsNegative() { dec = dec.FromUint(0) @@ -1568,7 +1564,7 @@ func (d *Datum) ConvertToMysqlYear(sc *stmtctx.StatementContext, target *FieldTy case KindMysqlTime: y = int64(d.GetMysqlTime().Year()) case KindMysqlJSON: - y, err = ConvertJSONToInt64(sc, d.GetMysqlJSON(), false) + y, err = ConvertJSONToInt64(sc.TypeCtxOrDefault(), d.GetMysqlJSON(), false) if err != nil { ret.SetInt64(0) return ret, errors.Trace(err) @@ -1921,7 +1917,7 @@ func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, e fval := d.GetMysqlSet().ToNumber() return ConvertFloatToInt(fval, lowerBound, upperBound, tp) case KindMysqlJSON: - return ConvertJSONToInt(sc, d.GetMysqlJSON(), false, tp) + return ConvertJSONToInt(sc.TypeCtxOrDefault(), d.GetMysqlJSON(), false, tp) case KindBinaryLiteral, KindMysqlBit: val, err := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) if err != nil { diff --git a/pkg/types/datum_test.go b/pkg/types/datum_test.go index 5abfda17cc064..dc931779fd5a9 100644 --- a/pkg/types/datum_test.go +++ b/pkg/types/datum_test.go @@ -654,7 +654,7 @@ func TestProduceDecWithSpecifiedTp(t *testing.T) { for _, tt := range tests { tp := NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).SetFlen(tt.flen).SetDecimal(tt.frac).BuildP() dec := NewDecFromStringForTest(tt.dec) - newDec, err := ProduceDecWithSpecifiedTp(dec, tp, sc) + newDec, err := ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, tp) if tt.isOverflow { if !ErrOverflow.Equal(err) { assert.FailNow(t, "Error is not overflow", "err: %v before: %v after: %v", err, tt.dec, dec) From f9f6bb35c2e43d4a0e0a86bf03946e0d92ddbb68 Mon Sep 17 00:00:00 2001 From: jiyfhust Date: Mon, 30 Oct 2023 15:59:06 +0800 Subject: [PATCH 33/33] ddl: fix recover table by JobID bug when JobID is set to 0 tidb-server panic (#46343) close pingcap/tidb#46296 --- pkg/executor/ddl.go | 7 ++++--- pkg/executor/recover_test.go | 5 +++++ pkg/parser/ast/ddl.go | 8 ++++---- pkg/parser/parser_test.go | 1 + 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/executor/ddl.go b/pkg/executor/ddl.go index 9cf577ea31688..3a447d1be551c 100644 --- a/pkg/executor/ddl.go +++ b/pkg/executor/ddl.go @@ -393,10 +393,11 @@ func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { var job *model.Job var err error var tblInfo *model.TableInfo - if s.JobID != 0 { - job, tblInfo, err = e.getRecoverTableByJobID(s, dom) - } else { + // Let check table first. Related isssue #46296. + if s.Table != nil { job, tblInfo, err = e.getRecoverTableByTableName(s.Table) + } else { + job, tblInfo, err = e.getRecoverTableByJobID(s, dom) } if err != nil { return err diff --git a/pkg/executor/recover_test.go b/pkg/executor/recover_test.go index 7c678510d28bb..f56de7988bfb9 100644 --- a/pkg/executor/recover_test.go +++ b/pkg/executor/recover_test.go @@ -92,6 +92,11 @@ func TestRecoverTable(t *testing.T) { err := tk.ExecToErr(fmt.Sprintf("recover table by job %d", 10000000)) require.Error(t, err) + // recover table by zero JobID. + // related issue: https://github.com/pingcap/tidb/issues/46296 + err = tk.ExecToErr(fmt.Sprintf("recover table by job %d", 0)) + require.Error(t, err) + // Disable GC by manual first, then after recover table, the GC enable status should also be disabled. require.NoError(t, gcutil.DisableGC(tk.Session())) diff --git a/pkg/parser/ast/ddl.go b/pkg/parser/ast/ddl.go index 8d67dbc51e34e..2ed5135215208 100644 --- a/pkg/parser/ast/ddl.go +++ b/pkg/parser/ast/ddl.go @@ -4381,16 +4381,16 @@ type RecoverTableStmt struct { // Restore implements Node interface. func (n *RecoverTableStmt) Restore(ctx *format.RestoreCtx) error { ctx.WriteKeyWord("RECOVER TABLE ") - if n.JobID != 0 { - ctx.WriteKeyWord("BY JOB ") - ctx.WritePlainf("%d", n.JobID) - } else { + if n.Table != nil { if err := n.Table.Restore(ctx); err != nil { return errors.Annotate(err, "An error occurred while splicing RecoverTableStmt Table") } if n.JobNum > 0 { ctx.WritePlainf(" %d", n.JobNum) } + } else { + ctx.WriteKeyWord("BY JOB ") + ctx.WritePlainf("%d", n.JobID) } return nil } diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index f9b8fb8681096..4c5e5ee39f461 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -3373,6 +3373,7 @@ func TestDDL(t *testing.T) { {"recover table by job 11", true, "RECOVER TABLE BY JOB 11"}, {"recover table by job 11,12,13", false, ""}, {"recover table by job", false, ""}, + {"recover table by job 0", true, "RECOVER TABLE BY JOB 0"}, {"recover table t1", true, "RECOVER TABLE `t1`"}, {"recover table t1,t2", false, ""}, {"recover table ", false, ""},