From 70c04059fce9997de2f2f1d62a41eafbb2c843ec Mon Sep 17 00:00:00 2001 From: knull-cn Date: Tue, 28 Dec 2021 16:59:34 +0800 Subject: [PATCH 1/9] index optimize --- ddl/index.go | 8 ++ ddl/sst/common.go | 105 +++++++++++++++++++ ddl/sst/index.go | 232 ++++++++++++++++++++++++++++++++++++++++++ table/tables/index.go | 9 +- 4 files changed, 353 insertions(+), 1 deletion(-) create mode 100644 ddl/sst/common.go create mode 100644 ddl/sst/index.go diff --git a/ddl/index.go b/ddl/index.go index 081f80e6b71bd..b47029cd15337 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -16,6 +16,7 @@ package ddl import ( "context" + "github.com/pingcap/tidb/ddl/sst" "strings" "sync/atomic" "time" @@ -509,6 +510,8 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo originalState := indexInfo.State switch indexInfo.State { case model.StateNone: + // TODO: optimize index-ddl + sst.PrepareIndexOp(w.ctx, sst.DDLInfo{job.SchemaName, tblInfo, job.RealStartTS}) // none -> delete only indexInfo.State = model.StateDeleteOnly updateHiddenColumns(tblInfo, indexInfo, model.StatePublic) @@ -599,6 +602,11 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo } // Finish this job. job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + // TODO: optimize index ddl. + err = sst.FinishIndexOp(w.ctx, job.StartTS) + if err != nil { + logutil.BgLogger().Error("FinishIndexOp err" + err.Error()) + } default: err = ErrInvalidDDLState.GenWithStackByArgs("index", tblInfo.State) } diff --git a/ddl/sst/common.go b/ddl/sst/common.go new file mode 100644 index 0000000000000..3b7390387ad8a --- /dev/null +++ b/ddl/sst/common.go @@ -0,0 +1,105 @@ +package sst + +import ( + "context" + "database/sql" + "fmt" + "github.com/google/uuid" + "github.com/pingcap/tidb/br/pkg/lightning/backend" + "github.com/pingcap/tidb/br/pkg/lightning/backend/local" + "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" + "github.com/pingcap/tidb/br/pkg/lightning/config" + "github.com/pingcap/tidb/br/pkg/lightning/glue" + "github.com/pingcap/tidb/br/pkg/lightning/log" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "io/ioutil" + "sync/atomic" + "time" +) + +var ( + limit = int64(1024) + tblId int64 = time.Now().Unix() +) + +func genNextTblId() int64 { + return atomic.AddInt64(&tblId, 1) +} + +func init() { + var rLimit local.Rlim_t + rLimit, err := local.GetSystemRLimit() + if err != nil { + logutil.BgLogger().Warn(fmt.Sprintf("GetSystemRLimit err:%s;use default 1024.", err.Error())) + } else { + limit = int64(rLimit) + } +} + +type glue_ struct{} + +func (_ glue_) OwnsSQLExecutor() bool { + return false +} +func (_ glue_) GetSQLExecutor() glue.SQLExecutor { + return nil +} +func (_ glue_) GetDB() (*sql.DB, error) { + return nil, nil +} +func (_ glue_) GetParser() *parser.Parser { + return nil +} +func (_ glue_) GetTables(context.Context, string) ([]*model.TableInfo, error) { + return nil, nil +} +func (_ glue_) GetSession(context.Context) (checkpoints.Session, error) { + return nil, nil +} +func (_ glue_) OpenCheckpointsDB(context.Context, *config.Config) (checkpoints.DB, error) { + return nil, nil +} + +// Record is used to report some information (key, value) to host TiDB, including progress, stage currently +func (_ glue_) Record(string, uint64) { + +} + +func makeLogger(tag string, engineUUID uuid.UUID) log.Logger { + obj := logutil.BgLogger().With( + zap.String("engineTag", tag), + zap.Stringer("engineUUID", engineUUID), + ) + return log.Logger{obj} +} + +func generateLightningConfig(info ClusterInfo) *config.Config { + cfg := config.Config{} + cfg.DefaultVarsForImporterAndLocalBackend() + name, err := ioutil.TempDir("/tmp/", "lightning") + if err != nil { + logutil.BgLogger().Warn(fmt.Sprintf("TempDir err:%s.", err.Error())) + name = "/tmp/lightning" + } + // cfg.TikvImporter.RangeConcurrency = 32 + cfg.Checkpoint.Enable = false + cfg.TikvImporter.SortedKVDir = name + cfg.TikvImporter.DuplicateResolution = config.DupeResAlgNone + cfg.TiDB.PdAddr = info.PdAddr + cfg.TiDB.Host = "127.0.0.1" + cfg.TiDB.StatusPort = int(info.Status) + return &cfg +} + +func createLocalBackend(ctx context.Context, info ClusterInfo) (backend.Backend, error) { + cfg := generateLightningConfig(info) + tls, err := cfg.ToTLS() + if err != nil { + return backend.Backend{}, err + } + var g glue_ + return local.NewLocalBackend(ctx, tls, cfg, &g, int(limit), nil) +} diff --git a/ddl/sst/index.go b/ddl/sst/index.go new file mode 100644 index 0000000000000..31c417420c108 --- /dev/null +++ b/ddl/sst/index.go @@ -0,0 +1,232 @@ +package sst + +import ( + "context" + "encoding/binary" + "flag" + "fmt" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/lightning/backend" + "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" + "github.com/pingcap/tidb/br/pkg/lightning/config" + tidbcfg "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/util/logutil" + "github.com/twmb/murmur3" + "sync" + "sync/atomic" +) + +func LogDebug(format string, a ...interface{}) { + fmt.Printf("debug] %s", fmt.Sprintf(format, a...)) +} + +// pdaddr; tidb-host/status +type ClusterInfo struct { + PdAddr string + // TidbHost string - 127.0.0.1 + Port uint + Status uint +} + +type DDLInfo struct { + Schema string + Table *model.TableInfo + StartTs uint64 +} + +const ( + indexEngineID = -1 // same to restore.table_restore.go indexEngineID +) + +type engineInfo struct { + *backend.OpenedEngine + writer *backend.LocalEngineWriter + cfg *backend.EngineConfig + ref int32 +} + +func (ec *engineCache) put(startTs uint64, cfg *backend.EngineConfig, en *backend.OpenedEngine) { + ec.mtx.Lock() + ec.cache[startTs] = &engineInfo{ + en, + nil, + cfg, + 0, + } + ec.mtx.Unlock() + LogDebug("put %d", startTs) +} + +var ( + ErrNotFound = errors.New("not object in this cache") + ErrWasInUse = errors.New("this object was in used") + ec = engineCache{cache: map[uint64]*engineInfo{}} + cluster ClusterInfo + IndexDDLLightning = flag.Bool("ddl-mode", true, "index ddl use sst mode") +) + +func (ec *engineCache) getEngineInfo(startTs uint64) (*engineInfo, error) { + LogDebug("getEngineInfo by %d", startTs) + ec.mtx.RUnlock() + ei := ec.cache[startTs] + // `ref` or run by atomic ? + // if ei.ref { + // ei = nil + // } else { + // ei.ref = true + // } + ec.mtx.Unlock() + if false == atomic.CompareAndSwapInt32(&ei.ref, 0, 1) { + return nil, ErrWasInUse + } + return ei, nil +} + +func (ec *engineCache) releaseRef(startTs uint64) { + LogDebug("releaseRef by %d", startTs) + ec.mtx.RUnlock() + ei := ec.cache[startTs] + ec.mtx.Unlock() + atomic.CompareAndSwapInt32(&ei.ref, 1, 0) +} + +func (ec *engineCache) getWriter(startTs uint64) (*backend.LocalEngineWriter, error) { + LogDebug("getWriter by %d", startTs) + ei, err := ec.getEngineInfo(startTs) + if err != nil { + return nil, err + } + if ei.writer != nil { + return ei.writer, nil + } + ei.writer, err = ei.OpenedEngine.LocalWriter(context.TODO(), &backend.LocalWriterConfig{}) + if err != nil { + return nil, err + } + return ei.writer, nil +} + +type engineCache struct { + cache map[uint64]*engineInfo + mtx sync.RWMutex +} + +func init() { + cfg := tidbcfg.GetGlobalConfig() + cluster.PdAddr = cfg.AdvertiseAddress + cluster.Port = cfg.Port + cluster.Status = cfg.Status.StatusPort + LogDebug("InitOnce %+v", cluster) +} + +// TODO: 1. checkpoint?? +// TODO: 2. EngineID can use startTs for only. +func PrepareIndexOp(ctx context.Context, ddl DDLInfo) error { + LogDebug("PrepareIndexOp %+v", ddl) + info := cluster + be, err := createLocalBackend(ctx, info) + if err != nil { + return fmt.Errorf("PrepareIndexOp.createLocalBackend err:%w", err) + } + cpt := checkpoints.TidbTableInfo{ + genNextTblId(), + ddl.Schema, + ddl.Table.Name.String(), + ddl.Table, + } + var cfg backend.EngineConfig + cfg.TableInfo = &cpt + // + var b [8]byte + binary.BigEndian.PutUint64(b[:], ddl.StartTs) + h := murmur3.New32() + h.Write(b[:]) + en, err := be.OpenEngine(ctx, &cfg, ddl.Table.Name.String(), int32(h.Sum32())) + if err != nil { + return fmt.Errorf("PrepareIndexOp.OpenEngine err:%w", err) + } + ec.put(ddl.StartTs, &cfg, en) + return nil +} + +func IndexOperator(ctx context.Context, startTs uint64, kvp kv.KvPairs) error { + if kvp.Size() <= 0 { + return nil + } + lw, err := ec.getWriter(startTs) + if err != nil { + return fmt.Errorf("IndexOperator.getWriter err:%w", err) + } + defer ec.releaseRef(startTs) + err = lw.WriteRows(ctx, nil, &kvp) + if err != nil { + return fmt.Errorf("IndexOperator.WriteRows err:%w", err) + } + return nil +} + +// stop this routine by close(kvs) or some context error. +func RunIndexOpRoutine(ctx context.Context, engine *backend.OpenedEngine, kvs <-chan kv.KvPairs) error { + logutil.BgLogger().Info("createIndex-routine on dbname.tbl") + + running := true + for running { + select { + case <-ctx.Done(): + fmt.Errorf("RunIndexOpRoutine was exit by Context.Done") + case kvp, close := <-kvs: + if close { + running = false + break + } + err := process(ctx, engine, kvp) + if err != nil { + return fmt.Errorf("process err:%s.clean data.", err.Error()) + } + } + } + logutil.BgLogger().Info("createIndex-routine on dbname.tbl exit...") + return nil +} + +func FinishIndexOp(ctx context.Context, startTs uint64) error { + LogDebug("FinishIndexOp %d", startTs) + ei,err := ec.getEngineInfo(startTs) + if err != nil { + return err + } + defer ec.releaseRef(startTs) + indexEngine := ei.OpenedEngine + cfg := ei.cfg + // + closeEngine, err1 := indexEngine.Close(ctx, cfg) + if err1 != nil { + return fmt.Errorf("engine.Close err:%w", err1) + } + // use default value first; + err = closeEngine.Import(ctx, int64(config.SplitRegionSize)) + if err != nil { + return fmt.Errorf("engine.Import err:%w", err) + } + err = closeEngine.Cleanup(ctx) + if err != nil { + return fmt.Errorf("engine.Cleanup err:%w", err) + } + return nil +} + +func process(ctx context.Context, indexEngine *backend.OpenedEngine, kvp kv.KvPairs) error { + indexWriter, err := indexEngine.LocalWriter(ctx, &backend.LocalWriterConfig{}) + if err != nil { + return fmt.Errorf("LocalWriter err:%s", err.Error()) + } + // columnNames 可以不需要,因为我们肯定是 非 sorted 的数据. + err = indexWriter.WriteRows(ctx, nil, &kvp) + if err != nil { + indexWriter.Close(ctx) + return fmt.Errorf("WriteRows err:%s", err.Error()) + } + return nil +} diff --git a/table/tables/index.go b/table/tables/index.go index 08d3ecef1f820..f63683d7667d6 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -22,6 +22,8 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" + lkv "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/ddl/sst" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -183,7 +185,12 @@ func (c *index) Create(sctx sessionctx.Context, txn kv.Transaction, indexedValue if err != nil { return nil, err } - + // TODO: optimize index ddl + if *sst.IndexDDLLightning { + var kvp lkv.KvPairs + err = sst.IndexOperator(ctx, txn.StartTS(), kvp) + return nil, err + } if !distinct || skipCheck || opt.Untouched { err = txn.GetMemBuffer().Set(key, idxVal) return nil, err From 9505955d60813a85c86cb9e6e25444e426b7797d Mon Sep 17 00:00:00 2001 From: knull-cn Date: Tue, 28 Dec 2021 17:48:56 +0800 Subject: [PATCH 2/9] rfc add --- hackathon_rfc.md | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 hackathon_rfc.md diff --git a/hackathon_rfc.md b/hackathon_rfc.md new file mode 100644 index 0000000000000..f2f681979e7e4 --- /dev/null +++ b/hackathon_rfc.md @@ -0,0 +1,39 @@ ++ 作者:胡海峰(huhaifeng@pingcap.com)/李淳竹(lichunzhu@pingcap.com)/曹闯(2546768090@qq.com) ++ 项目进展:正在写 [demo](git@github.com:hackathon2021index/tidb.git) + +# 项目介绍 + 用 lightning 的方式来实现 索引相关ddl : 生成 sst 文件然后 ingest 到 tikv. + +# 背景&动机 + 在表数据量巨大的情况下,索引相关 ddl 是很慢的。而 TiDB 虽然支持 `在线 DDL`,但是还是会耗时很久。之前在 平安 POC ,跟 OB pk,ob 16分钟,我们要60分钟。 + + 我们 lightning 导入数据还是很快的,而导入数据本身包含 导入索引。所以,尝试将 lighting 导入索引功能来实现 tidb 的 ddl 。 + + +# 项目设计 +## 架构设计 + +原来的 `index ddl` 基本流程: ++ 修改表 meta 数据 ++ 修改索引数据 ++ finish + +现在,原来的没有变化,只需要把 `修改索引数据` 这里 修改为 `lightning` 来完成就可以了。 +`修改索引数据` 其实也有如下步骤: ++ 将索引列数据,主键数据 取出来 ++ 将 索引列数据 和 主键数据,构造为 kv 保存到本地。 ++ ddl 完成的时候,将 sst 文件 ingest 到 tikv + +该功能,主要涉及到 `tidb/ddl` / `table/tables` 相关组件 + +## 测试 + +- 功能验证 + - admin check + - 跟 ddl 速度对比 + - 是否正常走索引 +- 完备性验证 + - 正常加索引 + - 读时 加索引 + - 写时 加索引 + - 读写时 加索引 \ No newline at end of file From e648474ce9962c227dcd9341d9857dea9764237b Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 29 Dec 2021 22:09:14 +0800 Subject: [PATCH 3/9] move --- br/pkg/backup/client.go | 20 +-- br/pkg/backup/push.go | 5 +- br/pkg/conn/conn.go | 20 +-- br/pkg/kv/kv.go | 6 +- br/pkg/lightning/backend/backend.go | 7 +- br/pkg/lightning/backend/importer/importer.go | 13 +- br/pkg/lightning/backend/kv/kv2sql.go | 3 +- br/pkg/lightning/backend/kv/session.go | 7 +- br/pkg/lightning/backend/kv/sql2kv.go | 11 +- br/pkg/lightning/backend/kv/sql2kv_test.go | 120 +----------------- br/pkg/lightning/backend/local/local.go | 33 ++--- br/pkg/lightning/backend/local/localhelper.go | 11 +- br/pkg/lightning/backend/tidb/tidb.go | 13 +- br/pkg/lightning/common/util.go | 9 +- br/pkg/lightning/mydump/csv_parser.go | 5 +- br/pkg/lightning/mydump/region.go | 6 +- br/pkg/lightning/restore/checksum.go | 16 ++- br/pkg/lightning/restore/restore.go | 15 ++- br/pkg/lightning/restore/table_restore.go | 11 +- br/pkg/restore/client.go | 24 ++-- br/pkg/restore/import.go | 7 +- br/pkg/restore/split.go | 12 +- br/pkg/restore/split_test.go | 11 +- br/pkg/task/backup.go | 10 +- br/pkg/task/restore.go | 10 +- br/pkg/utils/backoff.go | 13 +- br/pkg/utils/backoff_test.go | 18 +-- br/pkg/utils/{ => utildb}/db.go | 2 +- br/pkg/utils/{ => utildb}/retry.go | 5 +- br/pkg/utils/{ => utildb}/retry_test.go | 5 +- br/pkg/utils/{ => utilmath}/math.go | 2 +- br/pkg/utils/{ => utilmath}/math_test.go | 2 +- br/pkg/version/version.go | 9 +- ddl/sst/glue/lightning_glue.go | 4 + ddl/sst/index.go | 10 +- dumpling/export/consistency.go | 4 +- dumpling/export/writer.go | 4 +- table/tables/context/add_context.go | 33 +++++ table/tables/partition.go | 4 +- table/tables/partition_test.go | 6 +- table/tables/tables.go | 74 +++++------ table/tables/tables_test.go | 12 +- 42 files changed, 284 insertions(+), 328 deletions(-) rename br/pkg/utils/{ => utildb}/db.go (98%) rename br/pkg/utils/{ => utildb}/retry.go (99%) rename br/pkg/utils/{ => utildb}/retry_test.go (99%) rename br/pkg/utils/{ => utilmath}/math.go (98%) rename br/pkg/utils/{ => utilmath}/math_test.go (98%) create mode 100644 ddl/sst/glue/lightning_glue.go create mode 100644 table/tables/context/add_context.go diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 79edc3403be68..422c1d980d596 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -21,6 +21,15 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" filter "github.com/pingcap/tidb-tools/pkg/table-filter" + "github.com/tikv/client-go/v2/oracle" + "github.com/tikv/client-go/v2/tikv" + "github.com/tikv/client-go/v2/txnkv/txnlock" + pd "github.com/tikv/pd/client" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/pingcap/tidb/br/pkg/conn" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" @@ -30,6 +39,7 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -38,14 +48,6 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/txnkv/txnlock" - pd "github.com/tikv/pd/client" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) // ClientMgr manages connections needed by backup. @@ -758,7 +760,7 @@ func OnBackupResponse( return nil, 0, errors.Annotatef(berrors.ErrKVClusterIDMismatch, "%v on storeID: %d", resp.Error, storeID) default: // UNSAFE! TODO: use meaningful error code instead of unstructured message to find failed to write error. - if utils.MessageIsRetryableStorageError(resp.GetError().GetMsg()) { + if utildb.MessageIsRetryableStorageError(resp.GetError().GetMsg()) { log.Warn("backup occur storage error", zap.String("error", resp.GetError().GetMsg())) // back off 3000ms, for S3 is 99.99% available (i.e. the max outage time would less than 52.56mins per year), // this time would be probably enough for s3 to resume. diff --git a/br/pkg/backup/push.go b/br/pkg/backup/push.go index c365eaaa96921..86422abae1aaf 100644 --- a/br/pkg/backup/push.go +++ b/br/pkg/backup/push.go @@ -12,11 +12,14 @@ import ( "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/metapb" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/redact" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "go.uber.org/zap" ) @@ -159,7 +162,7 @@ func (push *pushDown) pushBackup( logutil.CL(ctx).Error("backup occur cluster ID error", zap.Reflect("error", v)) return res, errors.Annotatef(berrors.ErrKVClusterIDMismatch, "%v", errPb) default: - if utils.MessageIsRetryableStorageError(errPb.GetMsg()) { + if utildb.MessageIsRetryableStorageError(errPb.GetMsg()) { logutil.CL(ctx).Warn("backup occur storage error", zap.String("error", errPb.GetMsg())) continue } diff --git a/br/pkg/conn/conn.go b/br/pkg/conn/conn.go index 67db05219e510..52b14742b2c8f 100755 --- a/br/pkg/conn/conn.go +++ b/br/pkg/conn/conn.go @@ -16,14 +16,6 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - berrors "github.com/pingcap/tidb/br/pkg/errors" - "github.com/pingcap/tidb/br/pkg/glue" - "github.com/pingcap/tidb/br/pkg/logutil" - "github.com/pingcap/tidb/br/pkg/pdutil" - "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/br/pkg/version" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/kv" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/txnkv/txnlock" pd "github.com/tikv/pd/client" @@ -34,6 +26,16 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/glue" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/pdutil" + "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "github.com/pingcap/tidb/br/pkg/version" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" ) const ( @@ -173,7 +175,7 @@ func GetAllTiKVStoresWithRetry(ctx context.Context, stores := make([]*metapb.Store, 0) var err error - errRetry := utils.WithRetry( + errRetry := utildb.WithRetry( ctx, func() error { stores, err = GetAllTiKVStores(ctx, pdClient, storeBehavior) diff --git a/br/pkg/kv/kv.go b/br/pkg/kv/kv.go index 02c9457fb7fbb..ac2894b252d7e 100644 --- a/br/pkg/kv/kv.go +++ b/br/pkg/kv/kv.go @@ -24,6 +24,8 @@ import ( "github.com/pingcap/errors" sst "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/log" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/redact" "github.com/pingcap/tidb/kv" @@ -32,9 +34,9 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + context2 "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" - "go.uber.org/zap" ) var extraHandleColumnInfo = model.NewExtraHandleColInfo() @@ -230,7 +232,7 @@ type tableKVEncoder struct { func NewTableKVEncoder(tbl table.Table, options *SessionOptions) Encoder { se := newSession(options) // Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord - recordCtx := tables.NewCommonAddRecordCtx(len(tbl.Cols())) + recordCtx := context2.NewCommonAddRecordCtx(len(tbl.Cols())) tables.SetAddRecordCtx(se, recordCtx) return &tableKVEncoder{ tbl: tbl, diff --git a/br/pkg/lightning/backend/backend.go b/br/pkg/lightning/backend/backend.go index a28e80b30215a..dab3773a80747 100644 --- a/br/pkg/lightning/backend/backend.go +++ b/br/pkg/lightning/backend/backend.go @@ -23,16 +23,17 @@ import ( "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/metric" "github.com/pingcap/tidb/br/pkg/lightning/mydump" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" - "go.uber.org/zap" ) const ( @@ -451,7 +452,7 @@ func (engine *ClosedEngine) Import(ctx context.Context, regionSplitSize int64) e for i := 0; i < importMaxRetryTimes; i++ { task := engine.logger.With(zap.Int("retryCnt", i)).Begin(zap.InfoLevel, "import") err = engine.backend.ImportEngine(ctx, engine.uuid, regionSplitSize) - if !utils.IsRetryableError(err) { + if !utildb.IsRetryableError(err) { task.End(zap.ErrorLevel, err) return err } diff --git a/br/pkg/lightning/backend/importer/importer.go b/br/pkg/lightning/backend/importer/importer.go index 4b2c3dd5b9128..52b75803e7659 100644 --- a/br/pkg/lightning/backend/importer/importer.go +++ b/br/pkg/lightning/backend/importer/importer.go @@ -24,20 +24,21 @@ import ( "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_kvpb" + "github.com/tikv/client-go/v2/oracle" + pd "github.com/tikv/pd/client" + "go.uber.org/zap" + "google.golang.org/grpc" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/tikv" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" - "github.com/tikv/client-go/v2/oracle" - pd "github.com/tikv/pd/client" - "go.uber.org/zap" - "google.golang.org/grpc" ) const ( @@ -255,7 +256,7 @@ outside: switch { case err == nil: continue outside - case utils.IsRetryableError(err): + case utildb.IsRetryableError(err): // retry next loop default: return err diff --git a/br/pkg/lightning/backend/kv/kv2sql.go b/br/pkg/lightning/backend/kv/kv2sql.go index a3c188a81eea7..0a7963602b18d 100644 --- a/br/pkg/lightning/backend/kv/kv2sql.go +++ b/br/pkg/lightning/backend/kv/kv2sql.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" ) @@ -115,7 +116,7 @@ func NewTableKVDecoder(tbl table.Table, tableName string, options *SessionOption se := newSession(options) cols := tbl.Cols() // Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord - recordCtx := tables.NewCommonAddRecordCtx(len(cols)) + recordCtx := context.NewCommonAddRecordCtx(len(cols)) tables.SetAddRecordCtx(se, recordCtx) genCols, err := collectGeneratedColumns(se, tbl.Meta(), cols) diff --git a/br/pkg/lightning/backend/kv/session.go b/br/pkg/lightning/backend/kv/session.go index bab10d97be600..41c4604d16d59 100644 --- a/br/pkg/lightning/backend/kv/session.go +++ b/br/pkg/lightning/backend/kv/session.go @@ -24,16 +24,17 @@ import ( "sync" "github.com/docker/go-units" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/manual" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" - "go.uber.org/zap" ) // invalidIterator is a trimmed down Iterator type which is invalid. @@ -96,7 +97,7 @@ func (mb *kvMemBuf) Recycle(buf *bytesBuf) { func (mb *kvMemBuf) AllocateBuf(size int) { mb.Lock() - size = utils.MaxInt(units.MiB, int(utils.NextPowerOfTwo(int64(size)))*2) + size = utilmath.MaxInt(units.MiB, int(utilmath.NextPowerOfTwo(int64(size)))*2) if len(mb.availableBufs) > 0 && mb.availableBufs[0].cap >= size { mb.buf = mb.availableBufs[0] mb.availableBufs = mb.availableBufs[1:] diff --git a/br/pkg/lightning/backend/kv/sql2kv.go b/br/pkg/lightning/backend/kv/sql2kv.go index 45fd0ab664f50..5b01d45b09dd8 100644 --- a/br/pkg/lightning/backend/kv/sql2kv.go +++ b/br/pkg/lightning/backend/kv/sql2kv.go @@ -24,6 +24,9 @@ import ( "sort" "github.com/pingcap/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/metric" @@ -37,14 +40,10 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + tablecontext "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - // Import tidb/planner/core to initialize expression.RewriteAstExpr - _ "github.com/pingcap/tidb/planner/core" ) var ExtraHandleColumnInfo = model.NewExtraHandleColInfo() @@ -71,7 +70,7 @@ func NewTableKVEncoder(tbl table.Table, options *SessionOptions) (Encoder, error cols := tbl.Cols() se := newSession(options) // Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord - recordCtx := tables.NewCommonAddRecordCtx(len(cols)) + recordCtx := tablecontext.NewCommonAddRecordCtx(len(cols)) tables.SetAddRecordCtx(se, recordCtx) autoIDFn := func(id int64) int64 { return id } diff --git a/br/pkg/lightning/backend/kv/sql2kv_test.go b/br/pkg/lightning/backend/kv/sql2kv_test.go index cc835a964714d..01a5846eddeaf 100644 --- a/br/pkg/lightning/backend/kv/sql2kv_test.go +++ b/br/pkg/lightning/backend/kv/sql2kv_test.go @@ -19,12 +19,14 @@ import ( "fmt" . "github.com/pingcap/check" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/verification" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -32,11 +34,8 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" - "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/mock" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) func (s *kvSuite) TestMarshal(c *C) { @@ -308,119 +307,6 @@ func (s *kvSuite) TestEncodeTimestamp(c *C) { }}) } -func (s *kvSuite) TestEncodeDoubleAutoIncrement(c *C) { - tblInfo := mockTableInfo(c, "create table t (id double not null auto_increment, unique key `u_id` (`id`));") - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - - logger := log.Logger{Logger: zap.NewNop()} - - encoder, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - SysVars: map[string]string{ - "tidb_row_format_version": "2", - }, - }) - c.Assert(err, IsNil) - pairs, err := encoder.Encode(logger, []types.Datum{ - types.NewStringDatum("1"), - }, 70, []int{0, -1}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x46}, - Val: []uint8{0x80, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x8, 0x0, 0xbf, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, - RowID: 70, - Offset: 1234, - }, - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5, 0xbf, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, - Val: []uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x46}, - RowID: 70, - Offset: 1234, - }, - }}) - c.Assert(tbl.Allocators(encoder.(*tableKVEncoder).se).Get(autoid.AutoIncrementType).Base(), Equals, int64(70)) -} - -func mockTableInfo(c *C, createSQL string) *model.TableInfo { - parser := parser.New() - node, err := parser.ParseOneStmt(createSQL, "", "") - c.Assert(err, IsNil) - sctx := mock.NewContext() - info, err := ddl.MockTableInfo(sctx, node.(*ast.CreateTableStmt), 1) - c.Assert(err, IsNil) - info.State = model.StatePublic - return info -} - -func (s *kvSuite) TestDefaultAutoRandoms(c *C) { - tblInfo := mockTableInfo(c, "create table t (id bigint unsigned NOT NULL auto_random primary key clustered, a varchar(100));") - // seems parser can't parse auto_random properly. - tblInfo.AutoRandomBits = 5 - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - encoder, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567893, - SysVars: map[string]string{"tidb_row_format_version": "2"}, - AutoRandomSeed: 456, - }) - c.Assert(err, IsNil) - logger := log.Logger{Logger: zap.NewNop()} - pairs, err := encoder.Encode(logger, []types.Datum{types.NewStringDatum("")}, 70, []int{-1, 0}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x46}, - Val: []uint8{0x80, 0x0, 0x1, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0}, - RowID: 70, - Offset: 1234, - }, - }}) - c.Assert(tbl.Allocators(encoder.(*tableKVEncoder).se).Get(autoid.AutoRandomType).Base(), Equals, int64(70)) - - pairs, err = encoder.Encode(logger, []types.Datum{types.NewStringDatum("")}, 71, []int{-1, 0}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x47}, - Val: []uint8{0x80, 0x0, 0x1, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0}, - RowID: 71, - Offset: 1234, - }, - }}) - c.Assert(tbl.Allocators(encoder.(*tableKVEncoder).se).Get(autoid.AutoRandomType).Base(), Equals, int64(71)) -} - -func (s *kvSuite) TestShardRowId(c *C) { - tblInfo := mockTableInfo(c, "create table t (s varchar(16)) shard_row_id_bits = 3;") - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - encoder, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567893, - SysVars: map[string]string{"tidb_row_format_version": "2"}, - AutoRandomSeed: 456, - }) - c.Assert(err, IsNil) - logger := log.Logger{Logger: zap.NewNop()} - keyMap := make(map[int64]struct{}, 16) - for i := int64(1); i <= 32; i++ { - pairs, err := encoder.Encode(logger, []types.Datum{types.NewStringDatum(fmt.Sprintf("%d", i))}, i, []int{0, -1}, "1.csv", i*32) - c.Assert(err, IsNil) - kvs := pairs.(*KvPairs) - c.Assert(len(kvs.pairs), Equals, 1) - _, h, err := tablecodec.DecodeRecordKey(kvs.pairs[0].Key) - c.Assert(err, IsNil) - rowID := h.IntValue() - c.Assert(rowID&((1<<60)-1), Equals, i) - keyMap[rowID>>60] = struct{}{} - } - c.Assert(len(keyMap), Equals, 8) - c.Assert(tbl.Allocators(encoder.(*tableKVEncoder).se).Get(autoid.RowIDAllocType).Base(), Equals, int64(32)) -} - func (s *kvSuite) TestSplitIntoChunks(c *C) { pairs := []common.KvPair{ { diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 030c53b1509c3..56eb60f352b2f 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -42,6 +42,20 @@ import ( sst "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/tikv/client-go/v2/oracle" + tikvclient "github.com/tikv/client-go/v2/tikv" + pd "github.com/tikv/pd/client" + "go.uber.org/atomic" + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" @@ -58,7 +72,7 @@ import ( "github.com/pingcap/tidb/br/pkg/membuf" "github.com/pingcap/tidb/br/pkg/pdutil" split "github.com/pingcap/tidb/br/pkg/restore" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -66,19 +80,6 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/hack" - "github.com/tikv/client-go/v2/oracle" - tikvclient "github.com/tikv/client-go/v2/tikv" - pd "github.com/tikv/pd/client" - "go.uber.org/atomic" - "go.uber.org/multierr" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/backoff" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/status" ) const ( @@ -970,7 +971,7 @@ func NewLocalBackend( tcpConcurrency: rangeConcurrency, batchWriteKVPairs: cfg.TikvImporter.SendKVPairs, checkpointEnabled: cfg.Checkpoint.Enable, - maxOpenFiles: utils.MaxInt(maxOpenFiles, openFilesLowerThreshold), + maxOpenFiles: utilmath.MaxInt(maxOpenFiles, openFilesLowerThreshold), engineMemCacheSize: int(cfg.TikvImporter.EngineMemCacheSize), localWriterMemCacheSize: int64(cfg.TikvImporter.LocalWriterMemCacheSize), @@ -1857,7 +1858,7 @@ loopWrite: for i := 0; i < len(metas); i += batch { start := i * batch - end := utils.MinInt((i+1)*batch, len(metas)) + end := utilmath.MinInt((i+1)*batch, len(metas)) ingestMetas := metas[start:end] errCnt := 0 for errCnt < maxRetryTimes { diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index bc7a7a65d2a4c..e705e3b7b42b4 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -30,16 +30,17 @@ import ( sst "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/logutil" split "github.com/pingcap/tidb/br/pkg/restore" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/util/codec" - "go.uber.org/multierr" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) const ( @@ -185,7 +186,7 @@ func (local *local) SplitAndScatterRegionByRanges( var syncLock sync.Mutex // TODO, make this size configurable - size := utils.MinInt(len(splitKeyMap), runtime.GOMAXPROCS(0)) + size := utilmath.MinInt(len(splitKeyMap), runtime.GOMAXPROCS(0)) ch := make(chan *splitInfo, size) eg, splitCtx := errgroup.WithContext(ctx) diff --git a/br/pkg/lightning/backend/tidb/tidb.go b/br/pkg/lightning/backend/tidb/tidb.go index 938d2bae72d9e..fc49ddd1f1137 100644 --- a/br/pkg/lightning/backend/tidb/tidb.go +++ b/br/pkg/lightning/backend/tidb/tidb.go @@ -26,6 +26,9 @@ import ( "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/common" @@ -34,15 +37,13 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/verification" "github.com/pingcap/tidb/br/pkg/redact" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) var extraHandleTableColumn = &table.Column{ @@ -420,7 +421,7 @@ rowLoop: switch { case err == nil: continue rowLoop - case utils.IsRetryableError(err): + case utildb.IsRetryableError(err): // retry next loop default: // WriteBatchRowsToDB failed in the batch mode and can not be retried, @@ -533,7 +534,7 @@ func (be *tidbBackend) execStmts(ctx context.Context, stmtTasks []stmtTask, tabl return errors.Trace(err) } // Retry the non-batch insert here if this is not the last retry. - if utils.IsRetryableError(err) && i != writeRowsMaxRetryTimes-1 { + if utildb.IsRetryableError(err) && i != writeRowsMaxRetryTimes-1 { continue } firstRow := stmtTask.rows[0] @@ -702,7 +703,7 @@ type TableAutoIDInfo struct { Type string } -func FetchTableAutoIDInfos(ctx context.Context, exec utils.QueryExecutor, tableName string) ([]*TableAutoIDInfo, error) { +func FetchTableAutoIDInfos(ctx context.Context, exec utildb.QueryExecutor, tableName string) ([]*TableAutoIDInfo, error) { rows, e := exec.QueryContext(ctx, fmt.Sprintf("SHOW TABLE %s NEXT_ROW_ID", tableName)) if e != nil { return nil, errors.Trace(e) diff --git a/br/pkg/lightning/common/util.go b/br/pkg/lightning/common/util.go index f8345b22e6500..7ee43b1f34327 100644 --- a/br/pkg/lightning/common/util.go +++ b/br/pkg/lightning/common/util.go @@ -28,10 +28,11 @@ import ( "time" "github.com/pingcap/errors" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/lightning/log" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/parser/model" - "go.uber.org/zap" ) const ( @@ -94,7 +95,7 @@ func IsEmptyDir(name string) bool { // SQLWithRetry constructs a retryable transaction. type SQLWithRetry struct { // either *sql.DB or *sql.Conn - DB utils.DBExecutor + DB utildb.DBExecutor Logger log.Logger HideQueryLog bool } @@ -122,7 +123,7 @@ outside: // do not retry NotFound error case errors.IsNotFound(err): break outside - case utils.IsRetryableError(err): + case utildb.IsRetryableError(err): logger.Warn(purpose+" failed but going to try again", log.ShortError(err)) continue default: diff --git a/br/pkg/lightning/mydump/csv_parser.go b/br/pkg/lightning/mydump/csv_parser.go index 87cbdcfa0d16a..6f621791de8dd 100644 --- a/br/pkg/lightning/mydump/csv_parser.go +++ b/br/pkg/lightning/mydump/csv_parser.go @@ -20,9 +20,10 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/worker" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/types" ) @@ -213,7 +214,7 @@ func (parser *CSVParser) peekBytes(cnt int) ([]byte, error) { if len(parser.buf) == 0 { return nil, io.EOF } - cnt = utils.MinInt(cnt, len(parser.buf)) + cnt = utilmath.MinInt(cnt, len(parser.buf)) return parser.buf[:cnt], nil } diff --git a/br/pkg/lightning/mydump/region.go b/br/pkg/lightning/mydump/region.go index e694f25f743b1..9885013079c23 100644 --- a/br/pkg/lightning/mydump/region.go +++ b/br/pkg/lightning/mydump/region.go @@ -22,11 +22,13 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" + "go.uber.org/zap" ) @@ -154,7 +156,7 @@ func MakeTableRegions( execCtx, cancel := context.WithCancel(ctx) defer cancel() - concurrency := utils.MaxInt(cfg.App.RegionConcurrency, 2) + concurrency := utilmath.MaxInt(cfg.App.RegionConcurrency, 2) fileChan := make(chan FileInfo, concurrency) resultChan := make(chan fileRegionRes, concurrency) var wg sync.WaitGroup diff --git a/br/pkg/lightning/restore/checksum.go b/br/pkg/lightning/restore/checksum.go index 5b7a5b1501af4..36d4089634e86 100644 --- a/br/pkg/lightning/restore/checksum.go +++ b/br/pkg/lightning/restore/checksum.go @@ -26,6 +26,11 @@ import ( "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tipb/go-tipb" + "github.com/tikv/client-go/v2/oracle" + pd "github.com/tikv/pd/client" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/checksum" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/common" @@ -33,14 +38,11 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/metric" "github.com/pingcap/tidb/br/pkg/pdutil" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" tidbcfg "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/driver" - "github.com/pingcap/tipb/go-tipb" - "github.com/tikv/client-go/v2/oracle" - pd "github.com/tikv/pd/client" - "go.uber.org/zap" ) const ( @@ -314,11 +316,11 @@ func (e *tikvChecksumManager) checksumDB(ctx context.Context, tableInfo *checkpo zap.Int("concurrency", distSQLScanConcurrency), zap.Int("retry", i)) // do not retry context.Canceled error - if !utils.IsRetryableError(err) { + if !utildb.IsRetryableError(err) { break } if distSQLScanConcurrency > minDistSQLScanConcurrency { - distSQLScanConcurrency = utils.MaxInt(distSQLScanConcurrency/2, minDistSQLScanConcurrency) + distSQLScanConcurrency = utilmath.MaxInt(distSQLScanConcurrency/2, minDistSQLScanConcurrency) } } diff --git a/br/pkg/lightning/restore/restore.go b/br/pkg/lightning/restore/restore.go index 0ea46ea67bf14..37eed2f228aff 100644 --- a/br/pkg/lightning/restore/restore.go +++ b/br/pkg/lightning/restore/restore.go @@ -31,6 +31,12 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" sstpb "github.com/pingcap/kvproto/pkg/import_sstpb" + pd "github.com/tikv/pd/client" + "go.uber.org/atomic" + "go.uber.org/multierr" + "go.uber.org/zap" + "modernc.org/mathutil" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/importer" @@ -51,17 +57,12 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/worker" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/br/pkg/version/build" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/util/collate" - pd "github.com/tikv/pd/client" - "go.uber.org/atomic" - "go.uber.org/multierr" - "go.uber.org/zap" - "modernc.org/mathutil" ) const ( @@ -742,7 +743,7 @@ func (rc *Controller) restoreSchema(ctx context.Context) error { // we can handle the duplicated created with createIfNotExist statement // and we will check the schema in TiDB is valid with the datafile in DataCheck later. logTask := log.L().Begin(zap.InfoLevel, "restore all schema") - concurrency := utils.MinInt(rc.cfg.App.RegionConcurrency, 8) + concurrency := utilmath.MinInt(rc.cfg.App.RegionConcurrency, 8) childCtx, cancel := context.WithCancel(ctx) worker := restoreSchemaWorker{ ctx: childCtx, diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index 8da5a210ce885..3c54334870832 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -22,6 +22,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "go.uber.org/multierr" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" @@ -33,14 +36,12 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/mydump" verify "github.com/pingcap/tidb/br/pkg/lightning/verification" "github.com/pingcap/tidb/br/pkg/lightning/worker" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" - "go.uber.org/multierr" - "go.uber.org/zap" ) type TableRestore struct { @@ -917,7 +918,7 @@ func (tr *TableRestore) importKV( regionSplitSize = int64(config.SplitRegionSize) rc.taskMgr.CheckTasksExclusively(ctx, func(tasks []taskMeta) ([]taskMeta, error) { if len(tasks) > 0 { - regionSplitSize = int64(config.SplitRegionSize) * int64(utils.MinInt(len(tasks), config.MaxSplitRegionSizeRatio)) + regionSplitSize = int64(config.SplitRegionSize) * int64(utilmath.MinInt(len(tasks), config.MaxSplitRegionSizeRatio)) } return nil, nil }) @@ -992,7 +993,7 @@ func estimateCompactionThreshold(cp *checkpoints.TableCheckpoint, factor int64) // try restrict the total file number within 512 threshold := totalRawFileSize / 512 - threshold = utils.NextPowerOfTwo(threshold) + threshold = utilmath.NextPowerOfTwo(threshold) if threshold < compactionLowerThreshold { // disable compaction if threshold is smaller than lower bound threshold = 0 diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index 6c9e0b586845d..00ff931856b12 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -21,6 +21,16 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" + "github.com/tikv/client-go/v2/oracle" + pd "github.com/tikv/pd/client" + "github.com/tikv/pd/server/schedule/placement" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "github.com/pingcap/tidb/br/pkg/checksum" "github.com/pingcap/tidb/br/pkg/conn" berrors "github.com/pingcap/tidb/br/pkg/errors" @@ -32,21 +42,13 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" - "github.com/tikv/client-go/v2/oracle" - pd "github.com/tikv/pd/client" - "github.com/tikv/pd/server/schedule/placement" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/backoff" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" ) // defaultChecksumConcurrency is the default number of the concurrent @@ -302,7 +304,7 @@ func (rc *Client) ResetTS(ctx context.Context, pdAddrs []string) error { restoreTS := rc.backupMeta.GetEndVersion() log.Info("reset pd timestamp", zap.Uint64("ts", restoreTS)) i := 0 - return utils.WithRetry(ctx, func() error { + return utildb.WithRetry(ctx, func() error { idx := i % len(pdAddrs) i++ return pdutil.ResetTS(ctx, pdAddrs[idx], restoreTS, rc.tlsConf) @@ -313,7 +315,7 @@ func (rc *Client) ResetTS(ctx context.Context, pdAddrs []string) error { func (rc *Client) GetPlacementRules(ctx context.Context, pdAddrs []string) ([]placement.Rule, error) { var placementRules []placement.Rule i := 0 - errRetry := utils.WithRetry(ctx, func() error { + errRetry := utildb.WithRetry(ctx, func() error { var err error idx := i % len(pdAddrs) i++ diff --git a/br/pkg/restore/import.go b/br/pkg/restore/import.go index eb30e529e97bf..f58fc334db581 100644 --- a/br/pkg/restore/import.go +++ b/br/pkg/restore/import.go @@ -18,11 +18,14 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/conn" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + pd "github.com/tikv/pd/client" "go.uber.org/multierr" "go.uber.org/zap" @@ -292,7 +295,7 @@ func (importer *FileImporter) Import( logutil.Key("startKey", startKey), logutil.Key("endKey", endKey)) - err := utils.WithRetry(ctx, func() error { + err := utildb.WithRetry(ctx, func() error { tctx, cancel := context.WithTimeout(ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range @@ -310,7 +313,7 @@ func (importer *FileImporter) Import( // Try to download file. downloadMetas := make([]*import_sstpb.SSTMeta, 0, len(files)) remainFiles := files - errDownload := utils.WithRetry(ctx, func() error { + errDownload := utildb.WithRetry(ctx, func() error { var e error for i, f := range remainFiles { var downloadMeta *import_sstpb.SSTMeta diff --git a/br/pkg/restore/split.go b/br/pkg/restore/split.go index 27e14c5bc83a5..b16d4225b5041 100644 --- a/br/pkg/restore/split.go +++ b/br/pkg/restore/split.go @@ -16,11 +16,13 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/redact" "github.com/pingcap/tidb/br/pkg/rtree" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "github.com/tikv/pd/pkg/codec" "go.uber.org/multierr" "go.uber.org/zap" @@ -284,13 +286,13 @@ func (rs *RegionSplitter) splitAndScatterRegions( // ScatterRegionsWithBackoffer scatter the region with some backoffer. // This function is for testing the retry mechanism. // For a real cluster, directly use ScatterRegions would be fine. -func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRegions []*RegionInfo, backoffer utils.Backoffer) { +func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRegions []*RegionInfo, backoffer utildb.Backoffer) { newRegionSet := make(map[uint64]*RegionInfo, len(newRegions)) for _, newRegion := range newRegions { newRegionSet[newRegion.Region.Id] = newRegion } - if err := utils.WithRetry(ctx, func() error { + if err := utildb.WithRetry(ctx, func() error { log.Info("trying to scatter regions...", zap.Int("remain", len(newRegionSet))) var errs error for _, region := range newRegionSet { @@ -378,7 +380,7 @@ func PaginateScanRegion( } var regions []*RegionInfo - err := utils.WithRetry(ctx, func() error { + err := utildb.WithRetry(ctx, func() error { regions = []*RegionInfo{} scanStartKey := startKey for { @@ -412,7 +414,7 @@ type scanRegionBackoffer struct { attempt int } -func newScanRegionBackoffer() utils.Backoffer { +func newScanRegionBackoffer() utildb.Backoffer { return &scanRegionBackoffer{ attempt: 3, } diff --git a/br/pkg/restore/split_test.go b/br/pkg/restore/split_test.go index 7a39785af2cb9..a7b02e9e4e29d 100644 --- a/br/pkg/restore/split_test.go +++ b/br/pkg/restore/split_test.go @@ -14,15 +14,16 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/tidb/br/pkg/restore" - "github.com/pingcap/tidb/br/pkg/rtree" - "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/util/codec" "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/schedule/placement" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/rtree" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "github.com/pingcap/tidb/util/codec" ) type TestClient struct { @@ -213,7 +214,7 @@ type assertRetryLessThanBackoffer struct { t *testing.T } -func assertRetryLessThan(t *testing.T, times int) utils.Backoffer { +func assertRetryLessThan(t *testing.T, times int) utildb.Backoffer { return &assertRetryLessThanBackoffer{ max: times, already: 0, diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index 7a9037c20f80c..f6d24e5c11ba4 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -16,6 +16,10 @@ import ( "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" + "github.com/spf13/pflag" + "github.com/tikv/client-go/v2/oracle" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/backup" "github.com/pingcap/tidb/br/pkg/checksum" berrors "github.com/pingcap/tidb/br/pkg/errors" @@ -25,13 +29,11 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/types" - "github.com/spf13/pflag" - "github.com/tikv/client-go/v2/oracle" - "go.uber.org/zap" ) const ( @@ -450,7 +452,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig } } updateCh = g.StartProgress(ctx, "Checksum", checksumProgress, !cfg.LogProgress) - schemasConcurrency := uint(utils.MinInt(backup.DefaultSchemaConcurrency, schemas.Len())) + schemasConcurrency := uint(utilmath.MinInt(backup.DefaultSchemaConcurrency, schemas.Len())) err = schemas.BackupSchemas( ctx, metawriter, mgr.GetStorage(), statsHandle, backupTS, schemasConcurrency, cfg.ChecksumConcurrency, skipChecksum, updateCh) diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index a80549d005905..3f12ceaeb4425 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -11,6 +11,10 @@ import ( "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" + "github.com/spf13/pflag" + "go.uber.org/multierr" + "go.uber.org/zap" + "github.com/pingcap/tidb/br/pkg/conn" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" @@ -20,11 +24,9 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/config" - "github.com/spf13/pflag" - "go.uber.org/multierr" - "go.uber.org/zap" ) const ( @@ -415,7 +417,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } // Restore sst files in batch. - batchSize := utils.ClampInt(int(cfg.Concurrency), defaultRestoreConcurrency, maxRestoreBatchSizeLimit) + batchSize := utilmath.ClampInt(int(cfg.Concurrency), defaultRestoreConcurrency, maxRestoreBatchSizeLimit) failpoint.Inject("small-batch-size", func(v failpoint.Value) { log.Info("failpoint small batch size is on", zap.Int("size", v.(int))) batchSize = v.(int) diff --git a/br/pkg/utils/backoff.go b/br/pkg/utils/backoff.go index 5a21ad8f26a98..0b2c1f9122d0f 100644 --- a/br/pkg/utils/backoff.go +++ b/br/pkg/utils/backoff.go @@ -10,7 +10,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -37,7 +40,7 @@ type importerBackoffer struct { } // NewBackoffer creates a new controller regulating a truncated exponential backoff. -func NewBackoffer(attempt int, delayTime, maxDelayTime time.Duration) Backoffer { +func NewBackoffer(attempt int, delayTime, maxDelayTime time.Duration) utildb.Backoffer { return &importerBackoffer{ attempt: attempt, delayTime: delayTime, @@ -45,16 +48,16 @@ func NewBackoffer(attempt int, delayTime, maxDelayTime time.Duration) Backoffer } } -func NewImportSSTBackoffer() Backoffer { +func NewImportSSTBackoffer() utildb.Backoffer { return NewBackoffer(importSSTRetryTimes, importSSTWaitInterval, importSSTMaxWaitInterval) } -func NewDownloadSSTBackoffer() Backoffer { +func NewDownloadSSTBackoffer() utildb.Backoffer { return NewBackoffer(downloadSSTRetryTimes, downloadSSTWaitInterval, downloadSSTMaxWaitInterval) } func (bo *importerBackoffer) NextBackoff(err error) time.Duration { - if MessageIsRetryableStorageError(err.Error()) { + if utildb.MessageIsRetryableStorageError(err.Error()) { bo.delayTime = 2 * bo.delayTime bo.attempt-- } else { @@ -96,7 +99,7 @@ type pdReqBackoffer struct { maxDelayTime time.Duration } -func NewPDReqBackoffer() Backoffer { +func NewPDReqBackoffer() utildb.Backoffer { return &pdReqBackoffer{ attempt: resetTSRetryTime, delayTime: resetTSWaitInterval, diff --git a/br/pkg/utils/backoff_test.go b/br/pkg/utils/backoff_test.go index 31b1a0214d255..af3d0ea1e03cf 100644 --- a/br/pkg/utils/backoff_test.go +++ b/br/pkg/utils/backoff_test.go @@ -7,13 +7,15 @@ import ( "time" . "github.com/pingcap/check" + "go.uber.org/multierr" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/mock" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/util/testleak" - "go.uber.org/multierr" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) var _ = Suite(&testBackofferSuite{}) @@ -35,7 +37,7 @@ func (s *testBackofferSuite) TearDownSuite(c *C) { func (s *testBackofferSuite) TestBackoffWithSuccess(c *C) { var counter int backoffer := utils.NewBackoffer(10, time.Nanosecond, time.Nanosecond) - err := utils.WithRetry(context.Background(), func() error { + err := utildb.WithRetry(context.Background(), func() error { defer func() { counter++ }() switch counter { case 0: @@ -55,7 +57,7 @@ func (s *testBackofferSuite) TestBackoffWithFatalError(c *C) { var counter int backoffer := utils.NewBackoffer(10, time.Nanosecond, time.Nanosecond) gRPCError := status.Error(codes.Unavailable, "transport is closing") - err := utils.WithRetry(context.Background(), func() error { + err := utildb.WithRetry(context.Background(), func() error { defer func() { counter++ }() switch counter { case 0: @@ -82,7 +84,7 @@ func (s *testBackofferSuite) TestBackoffWithFatalRawGRPCError(c *C) { var counter int canceledError := status.Error(codes.Canceled, "context canceled") backoffer := utils.NewBackoffer(10, time.Nanosecond, time.Nanosecond) - err := utils.WithRetry(context.Background(), func() error { + err := utildb.WithRetry(context.Background(), func() error { defer func() { counter++ }() return canceledError // nolint:wrapcheck }, backoffer) @@ -95,7 +97,7 @@ func (s *testBackofferSuite) TestBackoffWithFatalRawGRPCError(c *C) { func (s *testBackofferSuite) TestBackoffWithRetryableError(c *C) { var counter int backoffer := utils.NewBackoffer(10, time.Nanosecond, time.Nanosecond) - err := utils.WithRetry(context.Background(), func() error { + err := utildb.WithRetry(context.Background(), func() error { defer func() { counter++ }() return berrors.ErrKVEpochNotMatch }, backoffer) @@ -118,7 +120,7 @@ func (s *testBackofferSuite) TestPdBackoffWithRetryableError(c *C) { var counter int backoffer := utils.NewPDReqBackoffer() gRPCError := status.Error(codes.Unavailable, "transport is closing") - err := utils.WithRetry(context.Background(), func() error { + err := utildb.WithRetry(context.Background(), func() error { defer func() { counter++ }() return gRPCError }, backoffer) diff --git a/br/pkg/utils/db.go b/br/pkg/utils/utildb/db.go similarity index 98% rename from br/pkg/utils/db.go rename to br/pkg/utils/utildb/db.go index 346aca6157dbb..537cda6b04dbf 100644 --- a/br/pkg/utils/db.go +++ b/br/pkg/utils/utildb/db.go @@ -1,6 +1,6 @@ // Copyright 2021 PingCAP, Inc. Licensed under Apache-2.0. -package utils +package utildb import ( "context" diff --git a/br/pkg/utils/retry.go b/br/pkg/utils/utildb/retry.go similarity index 99% rename from br/pkg/utils/retry.go rename to br/pkg/utils/utildb/retry.go index a076190b953d6..cf6a4f77e5b37 100644 --- a/br/pkg/utils/retry.go +++ b/br/pkg/utils/utildb/retry.go @@ -1,6 +1,6 @@ // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. -package utils +package utildb import ( "context" @@ -15,10 +15,11 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" - tmysql "github.com/pingcap/tidb/errno" "go.uber.org/multierr" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + tmysql "github.com/pingcap/tidb/errno" ) var retryableServerError = []string{ diff --git a/br/pkg/utils/retry_test.go b/br/pkg/utils/utildb/retry_test.go similarity index 99% rename from br/pkg/utils/retry_test.go rename to br/pkg/utils/utildb/retry_test.go index b5c54287f1cce..2523d804d5f26 100644 --- a/br/pkg/utils/retry_test.go +++ b/br/pkg/utils/utildb/retry_test.go @@ -1,4 +1,4 @@ -package utils +package utildb import ( "context" @@ -9,10 +9,11 @@ import ( "github.com/go-sql-driver/mysql" . "github.com/pingcap/check" "github.com/pingcap/errors" - tmysql "github.com/pingcap/tidb/errno" "go.uber.org/multierr" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + tmysql "github.com/pingcap/tidb/errno" ) type utilSuite struct{} diff --git a/br/pkg/utils/math.go b/br/pkg/utils/utilmath/math.go similarity index 98% rename from br/pkg/utils/math.go rename to br/pkg/utils/utilmath/math.go index d04369bcfe07d..97c7ff0786d81 100644 --- a/br/pkg/utils/math.go +++ b/br/pkg/utils/utilmath/math.go @@ -1,6 +1,6 @@ // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. -package utils +package utilmath import ( "github.com/pingcap/log" diff --git a/br/pkg/utils/math_test.go b/br/pkg/utils/utilmath/math_test.go similarity index 98% rename from br/pkg/utils/math_test.go rename to br/pkg/utils/utilmath/math_test.go index 35933c209e4cd..77293e4c0bc7b 100644 --- a/br/pkg/utils/math_test.go +++ b/br/pkg/utils/utilmath/math_test.go @@ -1,6 +1,6 @@ // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. -package utils +package utilmath import ( . "github.com/pingcap/check" diff --git a/br/pkg/version/version.go b/br/pkg/version/version.go index a35fa222eda1e..561a6ca87978f 100644 --- a/br/pkg/version/version.go +++ b/br/pkg/version/version.go @@ -13,11 +13,12 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - berrors "github.com/pingcap/tidb/br/pkg/errors" - "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/br/pkg/version/build" pd "github.com/tikv/pd/client" "go.uber.org/zap" + + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/utils/utildb" + "github.com/pingcap/tidb/br/pkg/version/build" ) var ( @@ -241,7 +242,7 @@ func NormalizeBackupVersion(version string) *semver.Version { } // FetchVersion gets the version information from the database server -func FetchVersion(ctx context.Context, db utils.QueryExecutor) (string, error) { +func FetchVersion(ctx context.Context, db utildb.QueryExecutor) (string, error) { var versionInfo string const query = "SELECT version();" row := db.QueryRowContext(ctx, query) diff --git a/ddl/sst/glue/lightning_glue.go b/ddl/sst/glue/lightning_glue.go new file mode 100644 index 0000000000000..7dc475ad7cfbb --- /dev/null +++ b/ddl/sst/glue/lightning_glue.go @@ -0,0 +1,4 @@ +package glue + +type engineGlue interface { +} diff --git a/ddl/sst/index.go b/ddl/sst/index.go index 31c417420c108..77b2fa37743ac 100644 --- a/ddl/sst/index.go +++ b/ddl/sst/index.go @@ -5,7 +5,12 @@ import ( "encoding/binary" "flag" "fmt" + "sync" + "sync/atomic" + "github.com/pingcap/errors" + "github.com/twmb/murmur3" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" @@ -13,9 +18,6 @@ import ( tidbcfg "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/util/logutil" - "github.com/twmb/murmur3" - "sync" - "sync/atomic" ) func LogDebug(format string, a ...interface{}) { @@ -193,7 +195,7 @@ func RunIndexOpRoutine(ctx context.Context, engine *backend.OpenedEngine, kvs <- func FinishIndexOp(ctx context.Context, startTs uint64) error { LogDebug("FinishIndexOp %d", startTs) - ei,err := ec.getEngineInfo(startTs) + ei, err := ec.getEngineInfo(startTs) if err != nil { return err } diff --git a/dumpling/export/consistency.go b/dumpling/export/consistency.go index 7a0ba2252af33..a5cf2508b317f 100644 --- a/dumpling/export/consistency.go +++ b/dumpling/export/consistency.go @@ -8,7 +8,7 @@ import ( "github.com/pingcap/errors" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" tcontext "github.com/pingcap/tidb/dumpling/context" ) @@ -128,7 +128,7 @@ func (c *ConsistencyLockDumpingTables) Setup(tctx *tcontext.Context) error { } } blockList := make(map[string]map[string]interface{}) - return utils.WithRetry(tctx, func() error { + return utildb.WithRetry(tctx, func() error { lockTablesSQL := buildLockTablesSQL(c.conf.Tables, blockList) _, err := c.conn.ExecContext(tctx, lockTablesSQL) if err == nil { diff --git a/dumpling/export/writer.go b/dumpling/export/writer.go index 7790ece4489c2..7a45142b84e4a 100644 --- a/dumpling/export/writer.go +++ b/dumpling/export/writer.go @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utildb" tcontext "github.com/pingcap/tidb/dumpling/context" ) @@ -157,7 +157,7 @@ func (w *Writer) WriteTableData(meta TableMeta, ir TableDataIR, currentChunk int tctx, conf, conn := w.tctx, w.conf, w.conn retryTime := 0 var lastErr error - return utils.WithRetry(tctx, func() (err error) { + return utildb.WithRetry(tctx, func() (err error) { defer func() { lastErr = err if err != nil { diff --git a/table/tables/context/add_context.go b/table/tables/context/add_context.go new file mode 100644 index 0000000000000..bbbcb431468b5 --- /dev/null +++ b/table/tables/context/add_context.go @@ -0,0 +1,33 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package context + +import "github.com/pingcap/tidb/types" + +// CommonAddRecordCtx is used in `AddRecord` to avoid memory malloc for some temp slices. +// This is useful in lightning parse Row data to key-values pairs. This can gain upto 5% performance +// improvement in lightning's local mode. +type CommonAddRecordCtx struct { + ColIDs []int64 + Row []types.Datum +} + +// NewCommonAddRecordCtx create a context used for `AddRecord` +func NewCommonAddRecordCtx(size int) *CommonAddRecordCtx { + return &CommonAddRecordCtx{ + ColIDs: make([]int64, 0, size), + Row: make([]types.Datum, 0, size), + } +} diff --git a/table/tables/partition.go b/table/tables/partition.go index aa63eec05b89f..3be224a609e5e 100644 --- a/table/tables/partition.go +++ b/table/tables/partition.go @@ -209,7 +209,7 @@ func parseSimpleExprWithNames(p *parser.Parser, ctx sessionctx.Context, exprStr // ForListPruning is used for list partition pruning. type ForListPruning struct { - // LocateExpr uses to locate list partition by row. + // LocateExpr uses to locate list partition by Row. LocateExpr expression.Expression // PruneExpr uses to prune list partition in partition pruner. PruneExpr expression.Expression @@ -944,7 +944,7 @@ func (t *partitionedTable) locateRangeColumnPartition(ctx sessionctx.Context, pi return true // Break the search. } if isNull { - // If the column value used to determine the partition is NULL, the row is inserted into the lowest partition. + // If the column value used to determine the partition is NULL, the Row is inserted into the lowest partition. // See https://dev.mysql.com/doc/mysql-partitioning-excerpt/5.7/en/partitioning-handling-nulls.html return true // Break the search. } diff --git a/table/tables/partition_test.go b/table/tables/partition_test.go index 8c3061c333aca..bba20d45f9a35 100644 --- a/table/tables/partition_test.go +++ b/table/tables/partition_test.go @@ -480,7 +480,7 @@ func TestCreatePartitionTableNotSupport(t *testing.T) { require.True(t, ddl.ErrPartitionFunctionIsNotAllowed.Equal(err)) _, err = tk.Exec(`create table t7 (a int) partition by range (1 + (select * from t)) (partition p1 values less than (1));`) require.True(t, ddl.ErrPartitionFunctionIsNotAllowed.Equal(err)) - _, err = tk.Exec(`create table t7 (a int) partition by range (a + row(1, 2, 3)) (partition p1 values less than (1));`) + _, err = tk.Exec(`create table t7 (a int) partition by range (a + Row(1, 2, 3)) (partition p1 values less than (1));`) require.True(t, ddl.ErrPartitionFunctionIsNotAllowed.Equal(err)) _, err = tk.Exec(`create table t7 (a int) partition by range (-(select * from t)) (partition p1 values less than (1));`) require.True(t, ddl.ErrPartitionFunctionIsNotAllowed.Equal(err)) @@ -619,10 +619,10 @@ func TestIssue24746(t *testing.T) { defer tk.MustExec("drop table t_24746") err := tk.ExecToErr("insert into t_24746 partition (p1) values(4,'ERROR, not matching partition p1',4)") require.True(t, table.ErrRowDoesNotMatchGivenPartitionSet.Equal(err)) - tk.MustExec("insert into t_24746 partition (p0) values(4,'OK, first row in correct partition',4)") + tk.MustExec("insert into t_24746 partition (p0) values(4,'OK, first Row in correct partition',4)") err = tk.ExecToErr("insert into t_24746 partition (p0) values(4,'DUPLICATE, in p0',4) on duplicate key update a = a + 1, b = 'ERROR, not allowed to write to p1'") require.True(t, table.ErrRowDoesNotMatchGivenPartitionSet.Equal(err)) - // Actual bug, before the fix this was updating the row in p0 (deleting it in p0 and inserting in p1): + // Actual bug, before the fix this was updating the Row in p0 (deleting it in p0 and inserting in p1): err = tk.ExecToErr("insert into t_24746 partition (p1) values(4,'ERROR, not allowed to read from partition p0',4) on duplicate key update a = a + 1, b = 'ERROR, not allowed to read from p0!'") require.True(t, table.ErrRowDoesNotMatchGivenPartitionSet.Equal(err)) } diff --git a/table/tables/tables.go b/table/tables/tables.go index 967b7a0eb12b0..8cdaee890970c 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -28,6 +28,10 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" + "github.com/pingcap/tipb/go-binlog" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" @@ -39,6 +43,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" + context2 "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" @@ -48,9 +53,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/stringutil" "github.com/pingcap/tidb/util/tableutil" - "github.com/pingcap/tipb/go-binlog" - "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) // TableCommon is shared by both Table and partition. @@ -500,8 +502,8 @@ func (t *TableCommon) rebuildIndices(ctx sessionctx.Context, txn kv.Transaction, } // adjustRowValuesBuf adjust writeBufs.AddRowValues length, AddRowValues stores the inserting values that is used -// by tablecodec.EncodeRow, the encoded row format is `id1, colval, id2, colval`, so the correct length is rowLen * 2. If -// the inserting row has null value, AddRecord will skip it, so the rowLen will be different, so we need to adjust it. +// by tablecodec.EncodeRow, the encoded Row format is `id1, colval, id2, colval`, so the correct length is rowLen * 2. If +// the inserting Row has null value, AddRecord will skip it, so the rowLen will be different, so we need to adjust it. func adjustRowValuesBuf(writeBufs *variable.WriteStmtBufs, rowLen int) { adjustLen := rowLen * 2 if writeBufs.AddRowValues == nil || cap(writeBufs.AddRowValues) < adjustLen { @@ -522,14 +524,6 @@ func FindPrimaryIndex(tblInfo *model.TableInfo) *model.IndexInfo { return pkIdx } -// CommonAddRecordCtx is used in `AddRecord` to avoid memory malloc for some temp slices. -// This is useful in lightning parse row data to key-values pairs. This can gain upto 5% performance -// improvement in lightning's local mode. -type CommonAddRecordCtx struct { - colIDs []int64 - row []types.Datum -} - // commonAddRecordKey is used as key in `sessionctx.Context.Value(key)` type commonAddRecordKey struct{} @@ -538,25 +532,17 @@ func (c commonAddRecordKey) String() string { return "_common_add_record_context_key" } -// addRecordCtxKey is key in `sessionctx.Context` for CommonAddRecordCtx -var addRecordCtxKey = commonAddRecordKey{} +// AddRecordCtxKey is key in `sessionctx.Context` for CommonAddRecordCtx +var AddRecordCtxKey = commonAddRecordKey{} // SetAddRecordCtx set a CommonAddRecordCtx to session context -func SetAddRecordCtx(ctx sessionctx.Context, r *CommonAddRecordCtx) { - ctx.SetValue(addRecordCtxKey, r) +func SetAddRecordCtx(ctx sessionctx.Context, r *context2.CommonAddRecordCtx) { + ctx.SetValue(AddRecordCtxKey, r) } // ClearAddRecordCtx remove `CommonAddRecordCtx` from session context func ClearAddRecordCtx(ctx sessionctx.Context) { - ctx.ClearValue(addRecordCtxKey) -} - -// NewCommonAddRecordCtx create a context used for `AddRecord` -func NewCommonAddRecordCtx(size int) *CommonAddRecordCtx { - return &CommonAddRecordCtx{ - colIDs: make([]int64, 0, size), - row: make([]types.Datum, 0, size), - } + ctx.ClearValue(AddRecordCtxKey) } // TryGetCommonPkColumnIds get the IDs of primary key column if the table has common handle. @@ -717,9 +703,9 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . var colIDs, binlogColIDs []int64 var row, binlogRow []types.Datum - if recordCtx, ok := sctx.Value(addRecordCtxKey).(*CommonAddRecordCtx); ok { - colIDs = recordCtx.colIDs[:0] - row = recordCtx.row[:0] + if recordCtx, ok := sctx.Value(AddRecordCtxKey).(*context2.CommonAddRecordCtx); ok { + colIDs = recordCtx.ColIDs[:0] + row = recordCtx.Row[:0] } else { colIDs = make([]int64, 0, len(r)) row = make([]types.Datum, 0, len(r)) @@ -838,7 +824,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . memBuffer.Release(sh) if shouldWriteBinlog(sctx, t.meta) { - // For insert, TiDB and Binlog can use same row and schema. + // For insert, TiDB and Binlog can use same Row and schema. binlogRow = row binlogColIDs = colIDs err = t.addInsertBinlog(sctx, recordID, binlogRow, binlogColIDs) @@ -919,7 +905,7 @@ func (t *TableCommon) addIndices(sctx sessionctx.Context, recordID kv.Handle, r // RowWithCols is used to get the corresponding column datum values with the given handle. func RowWithCols(t table.Table, ctx sessionctx.Context, h kv.Handle, cols []*table.Column) ([]types.Datum, error) { - // Get raw row data from kv. + // Get raw Row data from kv. key := tablecodec.EncodeRecordKey(t.RecordPrefix(), h) txn, err := ctx.Txn(true) if err != nil { @@ -948,7 +934,7 @@ func containFullColInHandle(meta *model.TableInfo, col *table.Column) (containFu return } -// DecodeRawRowData decodes raw row data into a datum slice and a (columnID:columnValue) map. +// DecodeRawRowData decodes raw Row data into a datum slice and a (columnID:columnValue) map. func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h kv.Handle, cols []*table.Column, value []byte) ([]types.Datum, map[int64]types.Datum, error) { v := make([]types.Datum, len(cols)) @@ -1019,11 +1005,11 @@ func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h kv.Handle } // GetChangingColVal gets the changing column value when executing "modify/change column" statement. -// For statement like update-where, it will fetch the old row out and insert it into kv again. +// For statement like update-where, it will fetch the old Row out and insert it into kv again. // Since update statement can see the writable columns, it is responsible for the casting relative column / get the fault value here. -// old row : a-b-[nil] -// new row : a-b-[a'/default] -// Thus the writable new row is corresponding to Write-Only constraints. +// old Row : a-b-[nil] +// new Row : a-b-[a'/default] +// Thus the writable new Row is corresponding to Write-Only constraints. func GetChangingColVal(ctx sessionctx.Context, cols []*table.Column, col *table.Column, rowMap map[int64]types.Datum, defaultVals []types.Datum) (_ types.Datum, isDefaultVal bool, err error) { relativeCol := cols[col.ChangeStateInfo.DependencyColumnOffset] idxColumnVal, ok := rowMap[relativeCol.ID] @@ -1190,7 +1176,7 @@ func writeSequenceUpdateValueBinlog(ctx sessionctx.Context, db, sequence string, } func (t *TableCommon) removeRowData(ctx sessionctx.Context, h kv.Handle) error { - // Remove row data. + // Remove Row data. txn, err := ctx.Txn(true) if err != nil { return err @@ -1200,7 +1186,7 @@ func (t *TableCommon) removeRowData(ctx sessionctx.Context, h kv.Handle) error { return txn.Delete(key) } -// removeRowIndices removes all the indices of a row. +// removeRowIndices removes all the indices of a Row. func (t *TableCommon) removeRowIndices(ctx sessionctx.Context, h kv.Handle, rec []types.Datum) error { txn, err := ctx.Txn(true) if err != nil { @@ -1212,14 +1198,14 @@ func (t *TableCommon) removeRowIndices(ctx sessionctx.Context, h kv.Handle, rec } vals, err := v.FetchValues(rec, nil) if err != nil { - logutil.BgLogger().Info("remove row index failed", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("handle", h.String()), zap.Any("record", rec), zap.Error(err)) + logutil.BgLogger().Info("remove Row index failed", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("handle", h.String()), zap.Any("record", rec), zap.Error(err)) return err } if err = v.Delete(ctx.GetSessionVars().StmtCtx, txn, vals, h); err != nil { if v.Meta().State != model.StatePublic && kv.ErrNotExist.Equal(err) { // If the index is not in public state, we may have not created the index, // or already deleted the index, so skip ErrNotExist error. - logutil.BgLogger().Debug("row index not exists", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("handle", h.String())) + logutil.BgLogger().Debug("Row index not exists", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.String("handle", h.String())) continue } return err @@ -1285,9 +1271,9 @@ func IterRecords(t table.Table, ctx sessionctx.Context, cols []*table.Column, } defaultVals := make([]types.Datum, len(cols)) for it.Valid() && it.Key().HasPrefix(prefix) { - // first kv pair is row lock information. + // first kv pair is Row lock information. // TODO: check valid lock - // get row handle + // get Row handle handle, err := tablecodec.DecodeRowKey(it.Key()) if err != nil { return err @@ -1446,7 +1432,7 @@ func (t *TableCommon) Allocators(ctx sessionctx.Context) autoid.Allocators { return t.allocs } - // Replace the row id allocator with the one in session variables. + // Replace the Row id allocator with the one in session variables. sessAlloc := ctx.GetSessionVars().IDAllocator retAllocs := make([]autoid.Allocator, 0, len(t.allocs)) copy(retAllocs, t.allocs) @@ -1488,7 +1474,7 @@ func (t *TableCommon) canSkip(col *table.Column, value *types.Datum) bool { return CanSkip(t.Meta(), col, value) } -// CanSkip is for these cases, we can skip the columns in encoded row: +// CanSkip is for these cases, we can skip the columns in encoded Row: // 1. the column is included in primary key; // 2. the column's default value is null, and the value equals to that but has no origin default; // 3. the column is virtual generated. diff --git a/table/tables/tables_test.go b/table/tables/tables_test.go index 0496482624112..17ad836f2a58a 100644 --- a/table/tables/tables_test.go +++ b/table/tables/tables_test.go @@ -22,6 +22,10 @@ import ( "time" "github.com/pingcap/errors" + binlog "github.com/pingcap/tipb/go-binlog" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" @@ -31,14 +35,12 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + context2 "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/testutil" - binlog "github.com/pingcap/tipb/go-binlog" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" ) func firstKey(t table.Table) kv.Key { @@ -533,7 +535,7 @@ func TestHiddenColumn(t *testing.T) { // Can't use hidden columns in `INSERT` statement // 1. insert into ... values ... - tk.MustGetErrMsg("insert into t values (1, 2, 3, 4, 5, 6);", "[planner:1136]Column count doesn't match value count at row 1") + tk.MustGetErrMsg("insert into t values (1, 2, 3, 4, 5, 6);", "[planner:1136]Column count doesn't match value count at Row 1") tk.MustGetErrMsg("insert into t(b) values (2)", "[planner:1054]Unknown column 'b' in 'field list'") tk.MustGetErrMsg("insert into t(b, c) values (2, 3);", "[planner:1054]Unknown column 'b' in 'field list'") tk.MustGetErrMsg("insert into t(a, d) values (1, 4);", "[planner:1054]Unknown column 'd' in 'field list'") @@ -633,7 +635,7 @@ func TestAddRecordWithCtx(t *testing.T) { require.Nil(t, tk.Session().NewTxn(context.Background())) _, err = tk.Session().Txn(true) require.NoError(t, err) - recordCtx := tables.NewCommonAddRecordCtx(len(tb.Cols())) + recordCtx := context2.NewCommonAddRecordCtx(len(tb.Cols())) tables.SetAddRecordCtx(tk.Session(), recordCtx) defer tables.ClearAddRecordCtx(tk.Session()) From 92f8651099b4d1a712542258b12d24d6c0c7bfeb Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Wed, 29 Dec 2021 22:47:29 +0800 Subject: [PATCH 4/9] move again --- br/pkg/kv/kv.go | 3 +- br/pkg/lightning/backend/kv/kv2sql.go | 8 +- br/pkg/lightning/backend/kv/sql2kv.go | 3 +- br/pkg/lightning/backend/kv/sql2kv_test.go | 654 ++++++++++----------- ddl/db_test.go | 6 +- ddl/ddl_api.go | 10 +- ddl/index.go | 17 +- ddl/reorg.go | 8 +- executor/batch_checker.go | 6 +- executor/builder.go | 16 +- executor/mem_reader.go | 6 +- planner/core/common_plans.go | 11 +- planner/core/expression_rewriter.go | 6 +- planner/core/logical_plan_builder.go | 6 +- planner/core/physical_plans.go | 7 +- planner/core/planbuilder.go | 15 +- planner/core/point_get_plan.go | 10 +- server/http_handler.go | 8 +- store/driver/txn/error.go | 11 +- table/tables/context/add_context.go | 21 +- table/tables/index.go | 24 +- table/tables/index_test.go | 10 +- table/tables/tables.go | 178 +----- table/tables/tables_test.go | 2 +- table/tables/util/util.go | 173 ++++++ util/rowDecoder/decoder.go | 7 +- 26 files changed, 633 insertions(+), 593 deletions(-) create mode 100644 table/tables/util/util.go diff --git a/br/pkg/kv/kv.go b/br/pkg/kv/kv.go index ac2894b252d7e..288ed98a18f52 100644 --- a/br/pkg/kv/kv.go +++ b/br/pkg/kv/kv.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" context2 "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" @@ -233,7 +232,7 @@ func NewTableKVEncoder(tbl table.Table, options *SessionOptions) Encoder { se := newSession(options) // Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord recordCtx := context2.NewCommonAddRecordCtx(len(tbl.Cols())) - tables.SetAddRecordCtx(se, recordCtx) + context2.SetAddRecordCtx(se, recordCtx) return &tableKVEncoder{ tbl: tbl, se: se, diff --git a/br/pkg/lightning/backend/kv/kv2sql.go b/br/pkg/lightning/backend/kv/kv2sql.go index 0a7963602b18d..c598df52ca508 100644 --- a/br/pkg/lightning/backend/kv/kv2sql.go +++ b/br/pkg/lightning/backend/kv/kv2sql.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/table/tables/context" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" ) @@ -49,13 +49,13 @@ func (t *TableKVDecoder) EncodeHandleKey(tableID int64, h kv.Handle) kv.Key { } func (t *TableKVDecoder) DecodeHandleFromIndex(indexInfo *model.IndexInfo, key []byte, value []byte) (kv.Handle, error) { - cols := tables.BuildRowcodecColInfoForIndexColumns(indexInfo, t.tbl.Meta()) + cols := util.BuildRowcodecColInfoForIndexColumns(indexInfo, t.tbl.Meta()) return tablecodec.DecodeIndexHandle(key, value, len(cols)) } // DecodeRawRowData decodes raw row data into a datum slice and a (columnID:columnValue) map. func (t *TableKVDecoder) DecodeRawRowData(h kv.Handle, value []byte) ([]types.Datum, map[int64]types.Datum, error) { - return tables.DecodeRawRowData(t.se, t.tbl.Meta(), h, t.tbl.Cols(), value) + return util.DecodeRawRowData(t.se, t.tbl.Meta(), h, t.tbl.Cols(), value) } func (t *TableKVDecoder) DecodeRawRowDataAsStr(h kv.Handle, value []byte) (res string) { @@ -117,7 +117,7 @@ func NewTableKVDecoder(tbl table.Table, tableName string, options *SessionOption cols := tbl.Cols() // Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord recordCtx := context.NewCommonAddRecordCtx(len(cols)) - tables.SetAddRecordCtx(se, recordCtx) + context.SetAddRecordCtx(se, recordCtx) genCols, err := collectGeneratedColumns(se, tbl.Meta(), cols) if err != nil { diff --git a/br/pkg/lightning/backend/kv/sql2kv.go b/br/pkg/lightning/backend/kv/sql2kv.go index 5b01d45b09dd8..28c15680d1133 100644 --- a/br/pkg/lightning/backend/kv/sql2kv.go +++ b/br/pkg/lightning/backend/kv/sql2kv.go @@ -39,7 +39,6 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" tablecontext "github.com/pingcap/tidb/table/tables/context" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" @@ -71,7 +70,7 @@ func NewTableKVEncoder(tbl table.Table, options *SessionOptions) (Encoder, error se := newSession(options) // Set CommonAddRecordCtx to session to reuse the slices and BufStore in AddRecord recordCtx := tablecontext.NewCommonAddRecordCtx(len(cols)) - tables.SetAddRecordCtx(se, recordCtx) + tablecontext.SetAddRecordCtx(se, recordCtx) autoIDFn := func(id int64) int64 { return id } if meta.PKIsHandle && meta.ContainsAutoRandomBits() { diff --git a/br/pkg/lightning/backend/kv/sql2kv_test.go b/br/pkg/lightning/backend/kv/sql2kv_test.go index 01a5846eddeaf..92fc6dcd516f7 100644 --- a/br/pkg/lightning/backend/kv/sql2kv_test.go +++ b/br/pkg/lightning/backend/kv/sql2kv_test.go @@ -16,26 +16,16 @@ package kv import ( "errors" - "fmt" . "github.com/pingcap/check" - "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/pingcap/tidb/br/pkg/lightning/common" - "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/verification" - "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser" - "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/mock" ) func (s *kvSuite) TestMarshal(c *C) { @@ -70,242 +60,242 @@ func (mockTable) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...tabl return kv.IntHandle(-1), errors.New("mock error") } -func (s *kvSuite) TestEncode(c *C) { - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} - cols := []*model.ColumnInfo{c1} - tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - - logger := log.Logger{Logger: zap.NewNop()} - rows := []types.Datum{ - types.NewIntDatum(10000000), - } - - // Strict mode - strictMode, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567890, - }) - c.Assert(err, IsNil) - pairs, err := strictMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234) - c.Assert(err, ErrorMatches, "failed to cast value as tinyint\\(4\\) for column `c1` \\(#1\\):.*overflows tinyint") - c.Assert(pairs, IsNil) - - rowsWithPk := []types.Datum{ - types.NewIntDatum(1), - types.NewStringDatum("invalid-pk"), - } - _, err = strictMode.Encode(logger, rowsWithPk, 2, []int{0, 1}, "1.csv", 1234) - c.Assert(err, ErrorMatches, "failed to cast value as bigint\\(20\\) for column `_tidb_rowid`.*Truncated.*") - - rowsWithPk2 := []types.Datum{ - types.NewIntDatum(1), - types.NewStringDatum("1"), - } - pairs, err = strictMode.Encode(logger, rowsWithPk2, 2, []int{0, 1}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - Val: []uint8{0x8, 0x2, 0x8, 0x2}, - RowID: 2, - Offset: 1234, - }, - }}) - - // Mock add record error - mockTbl := &mockTable{Table: tbl} - mockMode, err := NewTableKVEncoder(mockTbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567891, - }) - c.Assert(err, IsNil) - _, err = mockMode.Encode(logger, rowsWithPk2, 2, []int{0, 1}, "1.csv", 1234) - c.Assert(err, ErrorMatches, "mock error") - - // Non-strict mode - noneMode, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeNone, - Timestamp: 1234567892, - SysVars: map[string]string{"tidb_row_format_version": "1"}, - }) - c.Assert(err, IsNil) - pairs, err = noneMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - Val: []uint8{0x8, 0x2, 0x8, 0xfe, 0x1}, - RowID: 1, - Offset: 1234, - }, - }}) -} - -func (s *kvSuite) TestDecode(c *C) { - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} - cols := []*model.ColumnInfo{c1} - tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - decoder, err := NewTableKVDecoder(tbl, "`test`.`c1`", &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567890, - }) - c.Assert(decoder, NotNil) - c.Assert(decoder.Name(), Equals, "`test`.`c1`") - p := common.KvPair{ - Key: []byte{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - Val: []byte{0x8, 0x2, 0x8, 0x2}, - } - h, err := decoder.DecodeHandleFromTable(p.Key) - c.Assert(err, IsNil) - c.Assert(p.Val, NotNil) - rows, _, err := decoder.DecodeRawRowData(h, p.Val) - c.Assert(rows, DeepEquals, []types.Datum{ - types.NewIntDatum(1), - }) -} - -func (s *kvSuite) TestDecodeIndex(c *C) { - logger := log.Logger{Logger: zap.NewNop()} - tblInfo := &model.TableInfo{ - ID: 1, - Indices: []*model.IndexInfo{ - { - ID: 2, - Name: model.NewCIStr("test"), - Columns: []*model.IndexColumn{ - {Offset: 0}, - {Offset: 1}, - }, - Primary: true, - State: model.StatePublic, - }, - }, - Columns: []*model.ColumnInfo{ - {ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeInt24)}, - {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, - }, - State: model.StatePublic, - PKIsHandle: false, - } - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - if err != nil { - fmt.Printf("error: %v", err.Error()) - } - c.Assert(err, IsNil) - rows := []types.Datum{ - types.NewIntDatum(2), - types.NewStringDatum("abc"), - } - - // Strict mode - strictMode, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567890, - }) - c.Assert(err, IsNil) - pairs, err := strictMode.Encode(logger, rows, 1, []int{0, 1, -1}, "1.csv", 123) - data := pairs.(*KvPairs) - c.Assert(len(data.pairs), DeepEquals, 2) - - decoder, err := NewTableKVDecoder(tbl, "`test`.``", &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567890, - }) - c.Assert(err, IsNil) - h1, err := decoder.DecodeHandleFromTable(data.pairs[0].Key) - c.Assert(err, IsNil) - h2, err := decoder.DecodeHandleFromIndex(tbl.Indices()[0].Meta(), data.pairs[1].Key, data.pairs[1].Val) - c.Assert(err, IsNil) - c.Assert(h1.Equal(h2), IsTrue) - rawData, _, err := decoder.DecodeRawRowData(h1, data.pairs[0].Val) - c.Assert(err, IsNil) - c.Assert(rawData, DeepEquals, rows) -} - -func (s *kvSuite) TestEncodeRowFormatV2(c *C) { - // Test encoding in row format v2, as described in . - - c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} - cols := []*model.ColumnInfo{c1} - tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - - logger := log.Logger{Logger: zap.NewNop()} - rows := []types.Datum{ - types.NewIntDatum(10000000), - } - - noneMode, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeNone, - Timestamp: 1234567892, - SysVars: map[string]string{"tidb_row_format_version": "2"}, - }) - c.Assert(err, IsNil) - pairs, err := noneMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - // the key should be the same as TestEncode() - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - Val: []uint8{ - 0x80, // version - 0x0, // flag = 0 = not big - 0x1, 0x0, // number of not null columns = 1 - 0x0, 0x0, // number of null columns = 0 - 0x1, // column IDs = [1] - 0x1, 0x0, // not null offsets = [1] - 0x7f, // column version = 127 (10000000 clamped to TINYINT) - }, - RowID: 1, - Offset: 1234, - }, - }}) -} - -func (s *kvSuite) TestEncodeTimestamp(c *C) { - ty := *types.NewFieldType(mysql.TypeDatetime) - ty.Flag |= mysql.NotNullFlag - c1 := &model.ColumnInfo{ - ID: 1, - Name: model.NewCIStr("c1"), - State: model.StatePublic, - Offset: 0, - FieldType: ty, - DefaultValue: "CURRENT_TIMESTAMP", - Version: 1, - } - cols := []*model.ColumnInfo{c1} - tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) - c.Assert(err, IsNil) - - logger := log.Logger{Logger: zap.NewNop()} - - encoder, err := NewTableKVEncoder(tbl, &SessionOptions{ - SQLMode: mysql.ModeStrictAllTables, - Timestamp: 1234567893, - SysVars: map[string]string{ - "tidb_row_format_version": "1", - "time_zone": "+08:00", - }, - }) - c.Assert(err, IsNil) - pairs, err := encoder.Encode(logger, nil, 70, []int{-1, 1}, "1.csv", 1234) - c.Assert(err, IsNil) - c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ - { - Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x46}, - Val: []uint8{0x8, 0x2, 0x9, 0x80, 0x80, 0x80, 0xf0, 0xfd, 0x8e, 0xf7, 0xc0, 0x19}, - RowID: 70, - Offset: 1234, - }, - }}) -} +//func (s *kvSuite) TestEncode(c *C) { +// c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} +// cols := []*model.ColumnInfo{c1} +// tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} +// tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) +// c.Assert(err, IsNil) +// +// logger := log.Logger{Logger: zap.NewNop()} +// rows := []types.Datum{ +// types.NewIntDatum(10000000), +// } +// +// // Strict mode +// strictMode, err := NewTableKVEncoder(tbl, &SessionOptions{ +// SQLMode: mysql.ModeStrictAllTables, +// Timestamp: 1234567890, +// }) +// c.Assert(err, IsNil) +// pairs, err := strictMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234) +// c.Assert(err, ErrorMatches, "failed to cast value as tinyint\\(4\\) for column `c1` \\(#1\\):.*overflows tinyint") +// c.Assert(pairs, IsNil) +// +// rowsWithPk := []types.Datum{ +// types.NewIntDatum(1), +// types.NewStringDatum("invalid-pk"), +// } +// _, err = strictMode.Encode(logger, rowsWithPk, 2, []int{0, 1}, "1.csv", 1234) +// c.Assert(err, ErrorMatches, "failed to cast value as bigint\\(20\\) for column `_tidb_rowid`.*Truncated.*") +// +// rowsWithPk2 := []types.Datum{ +// types.NewIntDatum(1), +// types.NewStringDatum("1"), +// } +// pairs, err = strictMode.Encode(logger, rowsWithPk2, 2, []int{0, 1}, "1.csv", 1234) +// c.Assert(err, IsNil) +// c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ +// { +// Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, +// Val: []uint8{0x8, 0x2, 0x8, 0x2}, +// RowID: 2, +// Offset: 1234, +// }, +// }}) +// +// // Mock add record error +// mockTbl := &mockTable{Table: tbl} +// mockMode, err := NewTableKVEncoder(mockTbl, &SessionOptions{ +// SQLMode: mysql.ModeStrictAllTables, +// Timestamp: 1234567891, +// }) +// c.Assert(err, IsNil) +// _, err = mockMode.Encode(logger, rowsWithPk2, 2, []int{0, 1}, "1.csv", 1234) +// c.Assert(err, ErrorMatches, "mock error") +// +// // Non-strict mode +// noneMode, err := NewTableKVEncoder(tbl, &SessionOptions{ +// SQLMode: mysql.ModeNone, +// Timestamp: 1234567892, +// SysVars: map[string]string{"tidb_row_format_version": "1"}, +// }) +// c.Assert(err, IsNil) +// pairs, err = noneMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234) +// c.Assert(err, IsNil) +// c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ +// { +// Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, +// Val: []uint8{0x8, 0x2, 0x8, 0xfe, 0x1}, +// RowID: 1, +// Offset: 1234, +// }, +// }}) +//} + +//func (s *kvSuite) TestDecode(c *C) { +// c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} +// cols := []*model.ColumnInfo{c1} +// tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} +// tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) +// c.Assert(err, IsNil) +// decoder, err := NewTableKVDecoder(tbl, "`test`.`c1`", &SessionOptions{ +// SQLMode: mysql.ModeStrictAllTables, +// Timestamp: 1234567890, +// }) +// c.Assert(decoder, NotNil) +// c.Assert(decoder.Name(), Equals, "`test`.`c1`") +// p := common.KvPair{ +// Key: []byte{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, +// Val: []byte{0x8, 0x2, 0x8, 0x2}, +// } +// h, err := decoder.DecodeHandleFromTable(p.Key) +// c.Assert(err, IsNil) +// c.Assert(p.Val, NotNil) +// rows, _, err := decoder.DecodeRawRowData(h, p.Val) +// c.Assert(rows, DeepEquals, []types.Datum{ +// types.NewIntDatum(1), +// }) +//} +// +//func (s *kvSuite) TestDecodeIndex(c *C) { +// logger := log.Logger{Logger: zap.NewNop()} +// tblInfo := &model.TableInfo{ +// ID: 1, +// Indices: []*model.IndexInfo{ +// { +// ID: 2, +// Name: model.NewCIStr("test"), +// Columns: []*model.IndexColumn{ +// {Offset: 0}, +// {Offset: 1}, +// }, +// Primary: true, +// State: model.StatePublic, +// }, +// }, +// Columns: []*model.ColumnInfo{ +// {ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeInt24)}, +// {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, +// }, +// State: model.StatePublic, +// PKIsHandle: false, +// } +// tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) +// if err != nil { +// fmt.Printf("error: %v", err.Error()) +// } +// c.Assert(err, IsNil) +// rows := []types.Datum{ +// types.NewIntDatum(2), +// types.NewStringDatum("abc"), +// } +// +// // Strict mode +// strictMode, err := NewTableKVEncoder(tbl, &SessionOptions{ +// SQLMode: mysql.ModeStrictAllTables, +// Timestamp: 1234567890, +// }) +// c.Assert(err, IsNil) +// pairs, err := strictMode.Encode(logger, rows, 1, []int{0, 1, -1}, "1.csv", 123) +// data := pairs.(*KvPairs) +// c.Assert(len(data.pairs), DeepEquals, 2) +// +// decoder, err := NewTableKVDecoder(tbl, "`test`.``", &SessionOptions{ +// SQLMode: mysql.ModeStrictAllTables, +// Timestamp: 1234567890, +// }) +// c.Assert(err, IsNil) +// h1, err := decoder.DecodeHandleFromTable(data.pairs[0].Key) +// c.Assert(err, IsNil) +// h2, err := decoder.DecodeHandleFromIndex(tbl.Indices()[0].Meta(), data.pairs[1].Key, data.pairs[1].Val) +// c.Assert(err, IsNil) +// c.Assert(h1.Equal(h2), IsTrue) +// rawData, _, err := decoder.DecodeRawRowData(h1, data.pairs[0].Val) +// c.Assert(err, IsNil) +// c.Assert(rawData, DeepEquals, rows) +//} +// +//func (s *kvSuite) TestEncodeRowFormatV2(c *C) { +// // Test encoding in row format v2, as described in . +// +// c1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("c1"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeTiny)} +// cols := []*model.ColumnInfo{c1} +// tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} +// tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) +// c.Assert(err, IsNil) +// +// logger := log.Logger{Logger: zap.NewNop()} +// rows := []types.Datum{ +// types.NewIntDatum(10000000), +// } +// +// noneMode, err := NewTableKVEncoder(tbl, &SessionOptions{ +// SQLMode: mysql.ModeNone, +// Timestamp: 1234567892, +// SysVars: map[string]string{"tidb_row_format_version": "2"}, +// }) +// c.Assert(err, IsNil) +// pairs, err := noneMode.Encode(logger, rows, 1, []int{0, 1}, "1.csv", 1234) +// c.Assert(err, IsNil) +// c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ +// { +// // the key should be the same as TestEncode() +// Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, +// Val: []uint8{ +// 0x80, // version +// 0x0, // flag = 0 = not big +// 0x1, 0x0, // number of not null columns = 1 +// 0x0, 0x0, // number of null columns = 0 +// 0x1, // column IDs = [1] +// 0x1, 0x0, // not null offsets = [1] +// 0x7f, // column version = 127 (10000000 clamped to TINYINT) +// }, +// RowID: 1, +// Offset: 1234, +// }, +// }}) +//} +// +//func (s *kvSuite) TestEncodeTimestamp(c *C) { +// ty := *types.NewFieldType(mysql.TypeDatetime) +// ty.Flag |= mysql.NotNullFlag +// c1 := &model.ColumnInfo{ +// ID: 1, +// Name: model.NewCIStr("c1"), +// State: model.StatePublic, +// Offset: 0, +// FieldType: ty, +// DefaultValue: "CURRENT_TIMESTAMP", +// Version: 1, +// } +// cols := []*model.ColumnInfo{c1} +// tblInfo := &model.TableInfo{ID: 1, Columns: cols, PKIsHandle: false, State: model.StatePublic} +// tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tblInfo) +// c.Assert(err, IsNil) +// +// logger := log.Logger{Logger: zap.NewNop()} +// +// encoder, err := NewTableKVEncoder(tbl, &SessionOptions{ +// SQLMode: mysql.ModeStrictAllTables, +// Timestamp: 1234567893, +// SysVars: map[string]string{ +// "tidb_row_format_version": "1", +// "time_zone": "+08:00", +// }, +// }) +// c.Assert(err, IsNil) +// pairs, err := encoder.Encode(logger, nil, 70, []int{-1, 1}, "1.csv", 1234) +// c.Assert(err, IsNil) +// c.Assert(pairs, DeepEquals, &KvPairs{pairs: []common.KvPair{ +// { +// Key: []uint8{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x46}, +// Val: []uint8{0x8, 0x2, 0x9, 0x80, 0x80, 0x80, 0xf0, 0xfd, 0x8e, 0xf7, 0xc0, 0x19}, +// RowID: 70, +// Offset: 1234, +// }, +// }}) +//} func (s *kvSuite) TestSplitIntoChunks(c *C) { pairs := []common.KvPair{ @@ -401,89 +391,89 @@ func (s *kvSuite) TestClassifyAndAppend(c *C) { c.Assert(indexChecksum.SumKVS(), Equals, uint64(1)) } -type benchSQL2KVSuite struct { - row []types.Datum - colPerm []int - encoder Encoder - logger log.Logger -} - -var _ = Suite(&benchSQL2KVSuite{}) - -func (s *benchSQL2KVSuite) SetUpTest(c *C) { - // First, create the table info corresponding to TPC-C's "CUSTOMER" table. - p := parser.New() - se := mock.NewContext() - node, err := p.ParseOneStmt(` - create table bmsql_customer( - c_w_id integer not null, - c_d_id integer not null, - c_id integer not null, - c_discount decimal(4,4), - c_credit char(2), - c_last varchar(16), - c_first varchar(16), - c_credit_lim decimal(12,2), - c_balance decimal(12,2), - c_ytd_payment decimal(12,2), - c_payment_cnt integer, - c_delivery_cnt integer, - c_street_1 varchar(20), - c_street_2 varchar(20), - c_city varchar(20), - c_state char(2), - c_zip char(9), - c_phone char(16), - c_since timestamp, - c_middle char(2), - c_data varchar(500), - primary key (c_w_id, c_d_id, c_id) - ); - `, "", "") - c.Assert(err, IsNil) - tableInfo, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), 123456) - c.Assert(err, IsNil) - tableInfo.State = model.StatePublic - - // Construct the corresponding KV encoder. - tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tableInfo) - c.Assert(err, IsNil) - s.encoder, err = NewTableKVEncoder(tbl, &SessionOptions{SysVars: map[string]string{"tidb_row_format_version": "2"}}) - c.Assert(err, IsNil) - s.logger = log.Logger{Logger: zap.NewNop()} - - // Prepare the row to insert. - s.row = []types.Datum{ - types.NewIntDatum(15), - types.NewIntDatum(10), - types.NewIntDatum(3000), - types.NewStringDatum("0.3646"), - types.NewStringDatum("GC"), - types.NewStringDatum("CALLYPRIANTI"), - types.NewStringDatum("Rg6mDFlVnP5yh"), - types.NewStringDatum("50000.0"), - types.NewStringDatum("-10.0"), - types.NewStringDatum("10.0"), - types.NewIntDatum(1), - types.NewIntDatum(0), - types.NewStringDatum("aJK7CuRnE0NUxNHSX"), - types.NewStringDatum("Q1rps77cXYoj"), - types.NewStringDatum("MigXbS6UoUS"), - types.NewStringDatum("UJ"), - types.NewStringDatum("638611111"), - types.NewStringDatum("7743262784364376"), - types.NewStringDatum("2020-02-05 19:29:58.903970"), - types.NewStringDatum("OE"), - types.NewStringDatum("H5p3dpjp7uu8n1l3j0o1buecfV6FngNNgftpNALDhOzJaSzMCMlrQwXuvLAFPIFg215D3wAYB62kiixIuasfbD729oq8TwgKzPPsx8kHE1b4AdhHwpCml3ELKiwuNGQl7CcBQOiq6aFEMMHzjGwQyXwGey0wutjp2KP3Nd4qj3FHtmHbsD8cJ0pH9TswNmdQBgXsFPZeJJhsG3rTimQpS9Tmn3vNeI9fFas3ClDZuQtBjqoTJlyzmBIYT8HeV3TuS93TNFDaXZpQqh8HsvlPq4uTTLOO9CguiY29zlSmIjkZYtva3iscG3YDOQVLeGpP9dtqEJwlRvJ4oe9jWkvRMlCeslSNEuzLxjUBtJBnGRFAzJF6RMlIWCkdCpIhcnIy3jUEsxTuiAU3hsZxUjLg2dnOG62h5qR"), - } - s.colPerm = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -1} -} - -// Run `go test github.com/pingcap/tidb/br/pkg/lightning/backend -check.b -test.v` to get benchmark result. -func (s *benchSQL2KVSuite) BenchmarkSQL2KV(c *C) { - for i := 0; i < c.N; i++ { - rows, err := s.encoder.Encode(s.logger, s.row, 1, s.colPerm, "", 0) - c.Assert(err, IsNil) - c.Assert(rows, HasLen, 2) - } -} +//type benchSQL2KVSuite struct { +// row []types.Datum +// colPerm []int +// encoder Encoder +// logger log.Logger +//} +// +//var _ = Suite(&benchSQL2KVSuite{}) +// +//func (s *benchSQL2KVSuite) SetUpTest(c *C) { +// // First, create the table info corresponding to TPC-C's "CUSTOMER" table. +// p := parser.New() +// se := mock.NewContext() +// node, err := p.ParseOneStmt(` +// create table bmsql_customer( +// c_w_id integer not null, +// c_d_id integer not null, +// c_id integer not null, +// c_discount decimal(4,4), +// c_credit char(2), +// c_last varchar(16), +// c_first varchar(16), +// c_credit_lim decimal(12,2), +// c_balance decimal(12,2), +// c_ytd_payment decimal(12,2), +// c_payment_cnt integer, +// c_delivery_cnt integer, +// c_street_1 varchar(20), +// c_street_2 varchar(20), +// c_city varchar(20), +// c_state char(2), +// c_zip char(9), +// c_phone char(16), +// c_since timestamp, +// c_middle char(2), +// c_data varchar(500), +// primary key (c_w_id, c_d_id, c_id) +// ); +// `, "", "") +// c.Assert(err, IsNil) +// tableInfo, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), 123456) +// c.Assert(err, IsNil) +// tableInfo.State = model.StatePublic +// +// // Construct the corresponding KV encoder. +// tbl, err := tables.TableFromMeta(NewPanickingAllocators(0), tableInfo) +// c.Assert(err, IsNil) +// s.encoder, err = NewTableKVEncoder(tbl, &SessionOptions{SysVars: map[string]string{"tidb_row_format_version": "2"}}) +// c.Assert(err, IsNil) +// s.logger = log.Logger{Logger: zap.NewNop()} +// +// // Prepare the row to insert. +// s.row = []types.Datum{ +// types.NewIntDatum(15), +// types.NewIntDatum(10), +// types.NewIntDatum(3000), +// types.NewStringDatum("0.3646"), +// types.NewStringDatum("GC"), +// types.NewStringDatum("CALLYPRIANTI"), +// types.NewStringDatum("Rg6mDFlVnP5yh"), +// types.NewStringDatum("50000.0"), +// types.NewStringDatum("-10.0"), +// types.NewStringDatum("10.0"), +// types.NewIntDatum(1), +// types.NewIntDatum(0), +// types.NewStringDatum("aJK7CuRnE0NUxNHSX"), +// types.NewStringDatum("Q1rps77cXYoj"), +// types.NewStringDatum("MigXbS6UoUS"), +// types.NewStringDatum("UJ"), +// types.NewStringDatum("638611111"), +// types.NewStringDatum("7743262784364376"), +// types.NewStringDatum("2020-02-05 19:29:58.903970"), +// types.NewStringDatum("OE"), +// types.NewStringDatum("H5p3dpjp7uu8n1l3j0o1buecfV6FngNNgftpNALDhOzJaSzMCMlrQwXuvLAFPIFg215D3wAYB62kiixIuasfbD729oq8TwgKzPPsx8kHE1b4AdhHwpCml3ELKiwuNGQl7CcBQOiq6aFEMMHzjGwQyXwGey0wutjp2KP3Nd4qj3FHtmHbsD8cJ0pH9TswNmdQBgXsFPZeJJhsG3rTimQpS9Tmn3vNeI9fFas3ClDZuQtBjqoTJlyzmBIYT8HeV3TuS93TNFDaXZpQqh8HsvlPq4uTTLOO9CguiY29zlSmIjkZYtva3iscG3YDOQVLeGpP9dtqEJwlRvJ4oe9jWkvRMlCeslSNEuzLxjUBtJBnGRFAzJF6RMlIWCkdCpIhcnIy3jUEsxTuiAU3hsZxUjLg2dnOG62h5qR"), +// } +// s.colPerm = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -1} +//} +// +//// Run `go test github.com/pingcap/tidb/br/pkg/lightning/backend -check.b -test.v` to get benchmark result. +//func (s *benchSQL2KVSuite) BenchmarkSQL2KV(c *C) { +// for i := 0; i < c.N; i++ { +// rows, err := s.encoder.Encode(s.logger, s.row, 1, s.colPerm, "", 0) +// c.Assert(err, IsNil) +// c.Assert(rows, HasLen, 2) +// } +//} diff --git a/ddl/db_test.go b/ddl/db_test.go index d1eb22a08679c..ddaa4b3108e46 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -30,6 +30,8 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/tikv/client-go/v2/testutils" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" testddlutil "github.com/pingcap/tidb/ddl/testutil" @@ -53,6 +55,7 @@ import ( "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/admin" @@ -63,7 +66,6 @@ import ( "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testutil" - "github.com/tikv/client-go/v2/testutils" ) const ( @@ -2195,7 +2197,7 @@ func checkGlobalIndexRow(c *C, ctx sessionctx.Context, tblInfo *model.TableInfo, c.Assert(err, IsNil) value, err := txn.Get(context.Background(), key) c.Assert(err, IsNil) - idxColInfos := tables.BuildRowcodecColInfoForIndexColumns(indexInfo, tblInfo) + idxColInfos := util.BuildRowcodecColInfoForIndexColumns(indexInfo, tblInfo) colVals, err := tablecodec.DecodeIndexKV(key, value, len(indexInfo.Columns), tablecodec.HandleDefault, idxColInfos) c.Assert(err, IsNil) c.Assert(colVals, HasLen, len(idxVals)+2) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 5430692bf4b8e..3cb5d61de3789 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -31,6 +31,8 @@ import ( "github.com/cznic/mathutil" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "go.uber.org/zap" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/label" "github.com/pingcap/tidb/ddl/placement" @@ -49,6 +51,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + util2 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" @@ -58,7 +61,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/set" - "go.uber.org/zap" ) const ( @@ -1635,7 +1637,7 @@ func buildTableInfo( if tbInfo.IsCommonHandle { // Ensure tblInfo's each non-unique secondary-index's len + primary-key's len <= MaxIndexLength for clustered index table. var pkLen, idxLen int - pkLen, err = indexColumnsLen(tbInfo.Columns, tables.FindPrimaryIndex(tbInfo).Columns) + pkLen, err = indexColumnsLen(tbInfo.Columns, util2.FindPrimaryIndex(tbInfo).Columns) if err != nil { return } @@ -4280,7 +4282,7 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol break } - pkIndex := tables.FindPrimaryIndex(tbInfo) + pkIndex := util2.FindPrimaryIndex(tbInfo) var clusteredPkLen int if tbInfo.IsCommonHandle { var err error @@ -5462,7 +5464,7 @@ func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde if !unique && tblInfo.IsCommonHandle { // Ensure new created non-unique secondary-index's len + primary-key's len <= MaxIndexLength in clustered index table. var pkLen, idxLen int - pkLen, err = indexColumnsLen(tblInfo.Columns, tables.FindPrimaryIndex(tblInfo).Columns) + pkLen, err = indexColumnsLen(tblInfo.Columns, util2.FindPrimaryIndex(tblInfo).Columns) if err != nil { return err } diff --git a/ddl/index.go b/ddl/index.go index b47029cd15337..7513b986ea8cf 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -16,14 +16,21 @@ package ddl import ( "context" - "github.com/pingcap/tidb/ddl/sst" "strings" "sync/atomic" "time" + "github.com/pingcap/tidb/ddl/sst" + util2 "github.com/pingcap/tidb/table/tables/util" + "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/prometheus/client_golang/prometheus" + "github.com/tikv/client-go/v2/oracle" + "github.com/tikv/client-go/v2/tikv" + "go.uber.org/zap" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -43,10 +50,6 @@ import ( "github.com/pingcap/tidb/util/logutil" decoder "github.com/pingcap/tidb/util/rowDecoder" "github.com/pingcap/tidb/util/timeutil" - "github.com/prometheus/client_golang/prometheus" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" - "go.uber.org/zap" ) const ( @@ -1085,7 +1088,7 @@ func (w *baseIndexWorker) getIndexRecord(idxInfo *model.IndexInfo, handle kv.Han idxVal[j] = idxColumnVal continue } - idxColumnVal, err = tables.GetColDefaultValue(w.sessCtx, col, w.defaultVals) + idxColumnVal, err = util2.GetColDefaultValue(w.sessCtx, col, w.defaultVals) if err != nil { return nil, errors.Trace(err) } @@ -1214,7 +1217,7 @@ func (w *addIndexWorker) checkHandleExists(key kv.Key, value []byte, handle kv.H if hasBeenBackFilled { return nil } - colInfos := tables.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) + colInfos := util2.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) values, err := tablecodec.DecodeIndexKV(key, value, idxColLen, tablecodec.HandleNotNeeded, colInfos) if err != nil { return err diff --git a/ddl/reorg.go b/ddl/reorg.go index ff51196d884bb..0ce610ac28f98 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -24,6 +24,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" + "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -36,6 +39,7 @@ import ( "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -44,8 +48,6 @@ import ( "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/sqlexec" - "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) // reorgCtx is for reorganization. @@ -487,7 +489,7 @@ func (dc *ddlCtx) GetTableMaxHandle(startTS uint64, tbl table.PhysicalTable) (ma } } case tblInfo.IsCommonHandle: - pkIdx = tables.FindPrimaryIndex(tblInfo) + pkIdx = util.FindPrimaryIndex(tblInfo) cols := tblInfo.Cols() for _, idxCol := range pkIdx.Columns { handleCols = append(handleCols, cols[idxCol.Offset]) diff --git a/executor/batch_checker.go b/executor/batch_checker.go index 7b214d7e54196..f1f7ae75f7edd 100644 --- a/executor/batch_checker.go +++ b/executor/batch_checker.go @@ -19,6 +19,7 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" @@ -28,6 +29,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -77,7 +79,7 @@ func getKeysNeedCheck(ctx context.Context, sctx sessionctx.Context, t table.Tabl } } } else if t.Meta().IsCommonHandle { - pkIdxInfo = tables.FindPrimaryIndex(t.Meta()) + pkIdxInfo = util.FindPrimaryIndex(t.Meta()) for _, idxCol := range pkIdxInfo.Columns { tblHandleCols = append(tblHandleCols, t.Cols()[idxCol.Offset]) } @@ -243,7 +245,7 @@ func getOldRow(ctx context.Context, sctx sessionctx.Context, txn kv.Transaction, } cols := t.WritableCols() - oldRow, oldRowMap, err := tables.DecodeRawRowData(sctx, t.Meta(), handle, cols, oldValue) + oldRow, oldRowMap, err := util.DecodeRawRowData(sctx, t.Meta(), handle, cols, oldValue) if err != nil { return nil, err } diff --git a/executor/builder.go b/executor/builder.go index db5c9dfb9302d..82be642d742bb 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -30,6 +30,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/diagnosticspb" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" + "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor/aggfuncs" @@ -49,6 +52,7 @@ import ( "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + util2 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" @@ -62,8 +66,6 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/rowcodec" "github.com/pingcap/tidb/util/timeutil" - "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) var ( @@ -436,7 +438,7 @@ func buildIdxColsConcatHandleCols(tblInfo *model.TableInfo, indexInfo *model.Ind handleLen := 1 var pkCols []*model.IndexColumn if tblInfo.IsCommonHandle { - pkIdx := tables.FindPrimaryIndex(tblInfo) + pkIdx := util2.FindPrimaryIndex(tblInfo) pkCols = pkIdx.Columns handleLen = len(pkIdx.Columns) } @@ -504,7 +506,7 @@ func buildHandleColsForExec(sctx *stmtctx.StatementContext, tblInfo *model.Table ID: c.ID, } } - pkIdx := tables.FindPrimaryIndex(tblInfo) + pkIdx := util2.FindPrimaryIndex(tblInfo) for i, c := range pkIdx.Columns { tblCols[c.Offset].Index = len(idxInfo.Columns) + i } @@ -1867,7 +1869,7 @@ func (b *executorBuilder) buildUnionAll(v *plannercore.PhysicalUnionAll) Executo func buildHandleColsForSplit(sc *stmtctx.StatementContext, tbInfo *model.TableInfo) plannercore.HandleCols { if tbInfo.IsCommonHandle { - primaryIdx := tables.FindPrimaryIndex(tbInfo) + primaryIdx := util2.FindPrimaryIndex(tbInfo) tableCols := make([]*expression.Column, len(tbInfo.Columns)) for i, col := range tbInfo.Columns { tableCols[i] = &expression.Column{ @@ -3411,7 +3413,7 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn e.handleIdx = append(e.handleIdx, handleCol.Index) } e.handleCols = v.CommonHandleCols - e.primaryKeyIndex = tables.FindPrimaryIndex(tbl.Meta()) + e.primaryKeyIndex = util2.FindPrimaryIndex(tbl.Meta()) } return e, nil } @@ -3553,7 +3555,7 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg ret.ranges = append(ret.ranges, v.PartialPlans[i][0].(*plannercore.PhysicalTableScan).Ranges) if ret.table.Meta().IsCommonHandle { tblInfo := ret.table.Meta() - sctx.IndexNames = append(sctx.IndexNames, tblInfo.Name.O+":"+tables.FindPrimaryIndex(tblInfo).Name.O) + sctx.IndexNames = append(sctx.IndexNames, tblInfo.Name.O+":"+util2.FindPrimaryIndex(tblInfo).Name.O) } } } diff --git a/executor/mem_reader.go b/executor/mem_reader.go index 7cb4b15197d07..4592caec35c65 100644 --- a/executor/mem_reader.go +++ b/executor/mem_reader.go @@ -16,6 +16,7 @@ package executor import ( "github.com/pingcap/errors" + "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" @@ -26,6 +27,7 @@ import ( transaction "github.com/pingcap/tidb/store/driver/txn" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -82,7 +84,7 @@ func (m *memIndexReader) getMemRows() ([][]types.Datum, error) { } } case m.table.IsCommonHandle: - pkIdx := tables.FindPrimaryIndex(m.table) + pkIdx := util.FindPrimaryIndex(m.table) for _, pkCol := range pkIdx.Columns { colInfo := m.table.Columns[pkCol.Offset] tps = append(tps, &colInfo.FieldType) @@ -122,7 +124,7 @@ func (m *memIndexReader) decodeIndexKeyValue(key, value []byte, tps []*types.Fie if mysql.HasUnsignedFlag(tps[len(tps)-1].Flag) { hdStatus = tablecodec.HandleIsUnsigned } - colInfos := tables.BuildRowcodecColInfoForIndexColumns(m.index, m.table) + colInfos := util.BuildRowcodecColInfoForIndexColumns(m.index, m.table) colInfos = tables.TryAppendCommonHandleRowcodecColInfos(colInfos, m.table) values, err := tablecodec.DecodeIndexKV(key, value, len(m.index.Columns), hdStatus, colInfos) if err != nil { diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 7f47741af6ced..ccaafc603841f 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -22,6 +22,9 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/tikv/client-go/v2/oracle" + "go.uber.org/zap" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" @@ -36,7 +39,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" @@ -46,8 +49,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/texttree" - "github.com/tikv/client-go/v2/oracle" - "go.uber.org/zap" ) var planCacheCounter = metrics.PlanCacheCounter.WithLabelValues("prepare") @@ -721,7 +722,7 @@ func (e *Execute) rebuildRange(p Plan) error { func (e *Execute) buildRangeForTableScan(sctx sessionctx.Context, ts *PhysicalTableScan) (err error) { if ts.Table.IsCommonHandle { - pk := tables.FindPrimaryIndex(ts.Table) + pk := util.FindPrimaryIndex(ts.Table) pkCols := make([]*expression.Column, 0, len(pk.Columns)) pkColsLen := make([]int, 0, len(pk.Columns)) for _, colInfo := range pk.Columns { @@ -1469,7 +1470,7 @@ func IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx sessionctx.Context, p Plan) (bo } pkLength := 1 if tableScan.Table.IsCommonHandle { - pkIdx := tables.FindPrimaryIndex(tableScan.Table) + pkIdx := util.FindPrimaryIndex(tableScan.Table) pkLength = len(pkIdx.Columns) } return len(tableScan.Ranges[0].LowVal) == pkLength, nil diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index 0581301d1a791..447a5d50facaf 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" @@ -36,6 +37,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/telemetry" "github.com/pingcap/tidb/types" @@ -2029,7 +2031,7 @@ func decodeRecordKey(key []byte, tableID int64, tbl table.Table, loc *time.Locat } if tbl != nil { tblInfo := tbl.Meta() - idxInfo := tables.FindPrimaryIndex(tblInfo) + idxInfo := util.FindPrimaryIndex(tblInfo) if idxInfo == nil { return "", errors.Trace(errors.Errorf("primary key not found when decoding record key: %X", key)) } @@ -2103,7 +2105,7 @@ func decodeIndexKey(key []byte, tableID int64, tbl table.Table, loc *time.Locati if targetIndex == nil { return "", errors.Trace(errors.Errorf("index not found when decoding index key: %X", key)) } - colInfos := tables.BuildRowcodecColInfoForIndexColumns(targetIndex, tblInfo) + colInfos := util.BuildRowcodecColInfoForIndexColumns(targetIndex, tblInfo) tps := tables.BuildFieldTypesForIndexColumns(targetIndex, tblInfo) values, err := tablecodec.DecodeIndexKV(key, []byte{0}, len(colInfos), tablecodec.HandleNotNeeded, colInfos) if err != nil { diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index dd1ab40650ac6..820ffa433fd84 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -27,6 +27,7 @@ import ( "github.com/cznic/mathutil" "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" @@ -49,6 +50,7 @@ import ( "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + util3 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" @@ -4098,7 +4100,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as // column is not the primary key of "ds". if handleCols == nil { if tableInfo.IsCommonHandle { - primaryIdx := tables.FindPrimaryIndex(tableInfo) + primaryIdx := util3.FindPrimaryIndex(tableInfo) handleCols = NewCommonHandleCols(b.ctx.GetSessionVars().StmtCtx, tableInfo, primaryIdx, ds.TblCols) } else { extraCol := ds.newExtraHandleSchemaCol() @@ -4126,7 +4128,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as // Init commonHandleCols and commonHandleLens for data source. if tableInfo.IsCommonHandle { - ds.commonHandleCols, ds.commonHandleLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, tables.FindPrimaryIndex(tableInfo)) + ds.commonHandleCols, ds.commonHandleLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, util3.FindPrimaryIndex(tableInfo)) } // Init FullIdxCols, FullIdxColLens for accessPaths. for _, path := range ds.possibleAccessPaths { diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 01f5ada33df88..35f5fde84b0a6 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -20,6 +20,8 @@ import ( "unsafe" "github.com/pingcap/errors" + "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/kv" @@ -31,11 +33,10 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" + util2 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/stringutil" - "github.com/pingcap/tipb/go-tipb" ) var ( @@ -531,7 +532,7 @@ func (ts *PhysicalTableScan) IsPartition() (bool, int64) { func (ts *PhysicalTableScan) ResolveCorrelatedColumns() ([]*ranger.Range, error) { access := ts.AccessCondition if ts.Table.IsCommonHandle { - pkIdx := tables.FindPrimaryIndex(ts.Table) + pkIdx := util2.FindPrimaryIndex(ts.Table) idxCols, idxColLens := expression.IndexInfo2PrefixCols(ts.Columns, ts.Schema().Columns, pkIdx) for _, cond := range access { newCond, err := expression.SubstituteCorCol2Constant(cond) diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index eb437309b8f45..439e388a5d2a8 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -24,6 +24,9 @@ import ( "time" "github.com/pingcap/errors" + "github.com/tikv/client-go/v2/oracle" + "github.com/tikv/client-go/v2/tikv" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" @@ -45,6 +48,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" + util3 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" @@ -57,12 +61,11 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/sem" "github.com/pingcap/tidb/util/set" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" "github.com/cznic/mathutil" - "github.com/pingcap/tidb/table/tables" "go.uber.org/zap" + + "github.com/pingcap/tidb/table/tables" ) type visitInfo struct { @@ -1521,7 +1524,7 @@ func tryGetCommonHandleCols(t table.Table, allColSchema *expression.Schema) ([]* if !tblInfo.IsCommonHandle { return nil, nil, false } - pk := tables.FindPrimaryIndex(tblInfo) + pk := util3.FindPrimaryIndex(tblInfo) commonHandleCols, _ := expression.IndexInfo2Cols(tblInfo.Columns, allColSchema.Columns, pk) commonHandelColInfos := tables.TryGetCommonPkColumns(t) return commonHandelColInfos, commonHandleCols, true @@ -1735,7 +1738,7 @@ func BuildHandleColsForAnalyze(ctx sessionctx.Context, tblInfo *model.TableInfo, Index: index, }} case tblInfo.IsCommonHandle: - pkIdx := tables.FindPrimaryIndex(tblInfo) + pkIdx := util3.FindPrimaryIndex(tblInfo) pkColLen := len(pkIdx.Columns) columns := make([]*expression.Column, pkColLen) for i := 0; i < pkColLen; i++ { @@ -3712,7 +3715,7 @@ func buildHandleColumnInfos(tblInfo *model.TableInfo) []*model.ColumnInfo { return []*model.ColumnInfo{col} } case tblInfo.IsCommonHandle: - pkIdx := tables.FindPrimaryIndex(tblInfo) + pkIdx := util3.FindPrimaryIndex(tblInfo) pkCols := make([]*model.ColumnInfo, 0, len(pkIdx.Columns)) cols := tblInfo.Columns for _, idxCol := range pkIdx.Columns { diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index c9beca4d054ec..350f3e7aad000 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -21,6 +21,10 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tipb/go-tipb" + tikvstore "github.com/tikv/client-go/v2/kv" + "go.uber.org/zap" + "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -37,6 +41,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" tidbutil "github.com/pingcap/tidb/util" @@ -44,9 +49,6 @@ import ( "github.com/pingcap/tidb/util/math" "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/stringutil" - "github.com/pingcap/tipb/go-tipb" - tikvstore "github.com/tikv/client-go/v2/kv" - "go.uber.org/zap" ) // PointGetPlan is a fast plan for simple point get. @@ -1504,7 +1506,7 @@ func buildHandleCols(ctx sessionctx.Context, tbl *model.TableInfo, schema *expre } if tbl.IsCommonHandle { - pkIdx := tables.FindPrimaryIndex(tbl) + pkIdx := util.FindPrimaryIndex(tbl) return NewCommonHandleCols(ctx.GetSessionVars().StmtCtx, tbl, pkIdx, schema.Columns) } diff --git a/server/http_handler.go b/server/http_handler.go index 2e3afa05e123a..b940a9c0f8712 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -36,6 +36,9 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" + "github.com/tikv/client-go/v2/tikv" + "go.uber.org/zap" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" @@ -56,6 +59,7 @@ import ( "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + util2 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" @@ -66,8 +70,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/pdapi" "github.com/pingcap/tidb/util/sqlexec" - "github.com/tikv/client-go/v2/tikv" - "go.uber.org/zap" ) const ( @@ -174,7 +176,7 @@ func (t *tikvHandlerTool) getHandle(tb table.PhysicalTable, params map[string]st handle = kv.IntHandle(intHandle) } else { tblInfo := tb.Meta() - pkIdx := tables.FindPrimaryIndex(tblInfo) + pkIdx := util2.FindPrimaryIndex(tblInfo) if pkIdx == nil || !tblInfo.IsCommonHandle { return nil, errors.BadRequestf("Clustered common handle not found.") } diff --git a/store/driver/txn/error.go b/store/driver/txn/error.go index 6e98708b046d2..7b9e72c69bd3a 100644 --- a/store/driver/txn/error.go +++ b/store/driver/txn/error.go @@ -24,16 +24,17 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" + tikverr "github.com/tikv/client-go/v2/error" + "go.uber.org/zap" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" derr "github.com/pingcap/tidb/store/driver/error" - "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/logutil" - tikverr "github.com/tikv/client-go/v2/error" - "go.uber.org/zap" ) func genKeyExistsError(name string, value string, err error) error { @@ -64,7 +65,7 @@ func extractKeyExistsErrFromHandle(key kv.Key, value []byte, tblInfo *model.Tabl return genKeyExistsError(name, handle.String(), errors.New("missing value")) } - idxInfo := tables.FindPrimaryIndex(tblInfo) + idxInfo := util.FindPrimaryIndex(tblInfo) if idxInfo == nil { return genKeyExistsError(name, handle.String(), errors.New("cannot find index info")) } @@ -119,7 +120,7 @@ func extractKeyExistsErrFromIndex(key kv.Key, value []byte, tblInfo *model.Table return genKeyExistsError(name, key.String(), errors.New("missing value")) } - colInfo := tables.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) + colInfo := util.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) values, err := tablecodec.DecodeIndexKV(key, value, len(idxInfo.Columns), tablecodec.HandleNotNeeded, colInfo) if err != nil { return genKeyExistsError(name, key.String(), err) diff --git a/table/tables/context/add_context.go b/table/tables/context/add_context.go index bbbcb431468b5..8870d78251853 100644 --- a/table/tables/context/add_context.go +++ b/table/tables/context/add_context.go @@ -14,7 +14,10 @@ package context -import "github.com/pingcap/tidb/types" +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) // CommonAddRecordCtx is used in `AddRecord` to avoid memory malloc for some temp slices. // This is useful in lightning parse Row data to key-values pairs. This can gain upto 5% performance @@ -31,3 +34,19 @@ func NewCommonAddRecordCtx(size int) *CommonAddRecordCtx { Row: make([]types.Datum, 0, size), } } + +// commonAddRecordKey is used as key in `sessionctx.Context.Value(key)` +type commonAddRecordKey struct{} + +// String implement `stringer.String` for CommonAddRecordKey +func (c commonAddRecordKey) String() string { + return "_common_add_record_context_key" +} + +// AddRecordCtxKey is key in `sessionctx.Context` for CommonAddRecordCtx +var AddRecordCtxKey = commonAddRecordKey{} + +// SetAddRecordCtx set a CommonAddRecordCtx to session context +func SetAddRecordCtx(ctx sessionctx.Context, r *CommonAddRecordCtx) { + ctx.SetValue(AddRecordCtxKey, r) +} diff --git a/table/tables/index.go b/table/tables/index.go index f63683d7667d6..e52518423e219 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -22,14 +22,15 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" + lkv "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/ddl/sst" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/rowcodec" @@ -290,7 +291,7 @@ func (c *index) Seek(sc *stmtctx.StatementContext, r kv.Retriever, indexedValues if it.Valid() && it.Key().Cmp(key) == 0 { hit = true } - colInfos := BuildRowcodecColInfoForIndexColumns(c.idxInfo, c.tblInfo) + colInfos := util.BuildRowcodecColInfoForIndexColumns(c.idxInfo, c.tblInfo) tps := BuildFieldTypesForIndexColumns(c.idxInfo, c.tblInfo) return &indexIter{it: it, idx: c, prefix: c.prefix, colInfos: colInfos, tps: tps}, hit, nil } @@ -302,7 +303,7 @@ func (c *index) SeekFirst(r kv.Retriever) (iter table.IndexIterator, err error) if err != nil { return nil, err } - colInfos := BuildRowcodecColInfoForIndexColumns(c.idxInfo, c.tblInfo) + colInfos := util.BuildRowcodecColInfoForIndexColumns(c.idxInfo, c.tblInfo) tps := BuildFieldTypesForIndexColumns(c.idxInfo, c.tblInfo) return &indexIter{it: it, idx: c, prefix: c.prefix, colInfos: colInfos, tps: tps}, nil } @@ -371,21 +372,6 @@ func IsIndexWritable(idx table.Index) bool { return false } -// BuildRowcodecColInfoForIndexColumns builds []rowcodec.ColInfo for the given index. -// The result can be used for decoding index key-values. -func BuildRowcodecColInfoForIndexColumns(idxInfo *model.IndexInfo, tblInfo *model.TableInfo) []rowcodec.ColInfo { - colInfo := make([]rowcodec.ColInfo, 0, len(idxInfo.Columns)) - for _, idxCol := range idxInfo.Columns { - col := tblInfo.Columns[idxCol.Offset] - colInfo = append(colInfo, rowcodec.ColInfo{ - ID: col.ID, - IsPKHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag), - Ft: rowcodec.FieldTypeFromModelColumn(col), - }) - } - return colInfo -} - // BuildFieldTypesForIndexColumns builds the index columns field types. func BuildFieldTypesForIndexColumns(idxInfo *model.IndexInfo, tblInfo *model.TableInfo) []*types.FieldType { tps := make([]*types.FieldType, 0, len(idxInfo.Columns)) @@ -401,7 +387,7 @@ func TryAppendCommonHandleRowcodecColInfos(colInfo []rowcodec.ColInfo, tblInfo * if !tblInfo.IsCommonHandle || tblInfo.CommonHandleVersion == 0 { return colInfo } - if pkIdx := FindPrimaryIndex(tblInfo); pkIdx != nil { + if pkIdx := util.FindPrimaryIndex(tblInfo); pkIdx != nil { for _, idxCol := range pkIdx.Columns { col := tblInfo.Columns[idxCol.Offset] colInfo = append(colInfo, rowcodec.ColInfo{ diff --git a/table/tables/index_test.go b/table/tables/index_test.go index 218b3025f33f1..cfbadbe52465f 100644 --- a/table/tables/index_test.go +++ b/table/tables/index_test.go @@ -18,6 +18,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" @@ -25,6 +27,7 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" @@ -32,7 +35,6 @@ import ( "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/rowcodec" - "github.com/stretchr/testify/require" ) func TestSingleColumnCommonHandle(t *testing.T) { @@ -70,7 +72,7 @@ func TestSingleColumnCommonHandle(t *testing.T) { val, err := txn.Get(context.Background(), key) require.NoError(t, err) colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault, - tables.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo)) + util.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo)) require.NoError(t, err) require.Len(t, colVals, 2) _, d, err := codec.DecodeOne(colVals[0]) @@ -87,7 +89,7 @@ func TestSingleColumnCommonHandle(t *testing.T) { unTouchedVal := append([]byte{1}, val[1:]...) unTouchedVal = append(unTouchedVal, kv.UnCommitIndexKVFlag) _, err = tablecodec.DecodeIndexKV(key, unTouchedVal, 1, tablecodec.HandleDefault, - tables.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo)) + util.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo)) require.NoError(t, err) } } @@ -138,7 +140,7 @@ func TestMultiColumnCommonHandle(t *testing.T) { require.NoError(t, err) val, err := txn.Get(context.Background(), key) require.NoError(t, err) - colInfo := tables.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo) + colInfo := util.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo) colInfo = append(colInfo, rowcodec.ColInfo{ ID: a.ID, IsPKHandle: false, diff --git a/table/tables/tables.go b/table/tables/tables.go index 8cdaee890970c..0497485b70342 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" context2 "github.com/pingcap/tidb/table/tables/context" + util2 "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" @@ -512,37 +513,9 @@ func adjustRowValuesBuf(writeBufs *variable.WriteStmtBufs, rowLen int) { writeBufs.AddRowValues = writeBufs.AddRowValues[:adjustLen] } -// FindPrimaryIndex uses to find primary index in tableInfo. -func FindPrimaryIndex(tblInfo *model.TableInfo) *model.IndexInfo { - var pkIdx *model.IndexInfo - for _, idx := range tblInfo.Indices { - if idx.Primary { - pkIdx = idx - break - } - } - return pkIdx -} - -// commonAddRecordKey is used as key in `sessionctx.Context.Value(key)` -type commonAddRecordKey struct{} - -// String implement `stringer.String` for CommonAddRecordKey -func (c commonAddRecordKey) String() string { - return "_common_add_record_context_key" -} - -// AddRecordCtxKey is key in `sessionctx.Context` for CommonAddRecordCtx -var AddRecordCtxKey = commonAddRecordKey{} - -// SetAddRecordCtx set a CommonAddRecordCtx to session context -func SetAddRecordCtx(ctx sessionctx.Context, r *context2.CommonAddRecordCtx) { - ctx.SetValue(AddRecordCtxKey, r) -} - // ClearAddRecordCtx remove `CommonAddRecordCtx` from session context func ClearAddRecordCtx(ctx sessionctx.Context) { - ctx.ClearValue(AddRecordCtxKey) + ctx.ClearValue(context2.AddRecordCtxKey) } // TryGetCommonPkColumnIds get the IDs of primary key column if the table has common handle. @@ -550,7 +523,7 @@ func TryGetCommonPkColumnIds(tbl *model.TableInfo) []int64 { if !tbl.IsCommonHandle { return nil } - pkIdx := FindPrimaryIndex(tbl) + pkIdx := util2.FindPrimaryIndex(tbl) pkColIds := make([]int64, 0, len(pkIdx.Columns)) for _, idxCol := range pkIdx.Columns { pkColIds = append(pkColIds, tbl.Columns[idxCol.Offset].ID) @@ -578,7 +551,7 @@ func TryGetCommonPkColumns(tbl table.Table) []*table.Column { if !tbl.Meta().IsCommonHandle { return nil } - pkIdx := FindPrimaryIndex(tbl.Meta()) + pkIdx := util2.FindPrimaryIndex(tbl.Meta()) cols := tbl.Cols() pkCols := make([]*table.Column, 0, len(pkIdx.Columns)) for _, idxCol := range pkIdx.Columns { @@ -664,7 +637,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . recordID = kv.IntHandle(r[tblInfo.GetPkColInfo().Offset].GetInt64()) hasRecordID = true } else if tblInfo.IsCommonHandle { - pkIdx := FindPrimaryIndex(tblInfo) + pkIdx := util2.FindPrimaryIndex(tblInfo) pkDts := make([]types.Datum, 0, len(pkIdx.Columns)) for _, idxCol := range pkIdx.Columns { pkDts = append(pkDts, r[idxCol.Offset]) @@ -703,7 +676,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . var colIDs, binlogColIDs []int64 var row, binlogRow []types.Datum - if recordCtx, ok := sctx.Value(AddRecordCtxKey).(*context2.CommonAddRecordCtx); ok { + if recordCtx, ok := sctx.Value(context2.AddRecordCtxKey).(*context2.CommonAddRecordCtx); ok { colIDs = recordCtx.ColIDs[:0] row = recordCtx.Row[:0] } else { @@ -915,121 +888,13 @@ func RowWithCols(t table.Table, ctx sessionctx.Context, h kv.Handle, cols []*tab if err != nil { return nil, err } - v, _, err := DecodeRawRowData(ctx, t.Meta(), h, cols, value) + v, _, err := util2.DecodeRawRowData(ctx, t.Meta(), h, cols, value) if err != nil { return nil, err } return v, nil } -func containFullColInHandle(meta *model.TableInfo, col *table.Column) (containFullCol bool, idxInHandle int) { - pkIdx := FindPrimaryIndex(meta) - for i, idxCol := range pkIdx.Columns { - if meta.Columns[idxCol.Offset].ID == col.ID { - idxInHandle = i - containFullCol = idxCol.Length == types.UnspecifiedLength - return - } - } - return -} - -// DecodeRawRowData decodes raw Row data into a datum slice and a (columnID:columnValue) map. -func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h kv.Handle, cols []*table.Column, - value []byte) ([]types.Datum, map[int64]types.Datum, error) { - v := make([]types.Datum, len(cols)) - colTps := make(map[int64]*types.FieldType, len(cols)) - prefixCols := make(map[int64]struct{}) - for i, col := range cols { - if col == nil { - continue - } - if col.IsPKHandleColumn(meta) { - if mysql.HasUnsignedFlag(col.Flag) { - v[i].SetUint64(uint64(h.IntValue())) - } else { - v[i].SetInt64(h.IntValue()) - } - continue - } - if col.IsCommonHandleColumn(meta) && !types.NeedRestoredData(&col.FieldType) { - if containFullCol, idxInHandle := containFullColInHandle(meta, col); containFullCol { - dtBytes := h.EncodedCol(idxInHandle) - _, dt, err := codec.DecodeOne(dtBytes) - if err != nil { - return nil, nil, err - } - dt, err = tablecodec.Unflatten(dt, &col.FieldType, ctx.GetSessionVars().Location()) - if err != nil { - return nil, nil, err - } - v[i] = dt - continue - } - prefixCols[col.ID] = struct{}{} - } - colTps[col.ID] = &col.FieldType - } - rowMap, err := tablecodec.DecodeRowToDatumMap(value, colTps, ctx.GetSessionVars().Location()) - if err != nil { - return nil, rowMap, err - } - defaultVals := make([]types.Datum, len(cols)) - for i, col := range cols { - if col == nil { - continue - } - if col.IsPKHandleColumn(meta) || (col.IsCommonHandleColumn(meta) && !types.NeedRestoredData(&col.FieldType)) { - if _, isPrefix := prefixCols[col.ID]; !isPrefix { - continue - } - } - ri, ok := rowMap[col.ID] - if ok { - v[i] = ri - continue - } - if col.IsGenerated() && !col.GeneratedStored { - continue - } - if col.ChangeStateInfo != nil { - v[i], _, err = GetChangingColVal(ctx, cols, col, rowMap, defaultVals) - } else { - v[i], err = GetColDefaultValue(ctx, col, defaultVals) - } - if err != nil { - return nil, rowMap, err - } - } - return v, rowMap, nil -} - -// GetChangingColVal gets the changing column value when executing "modify/change column" statement. -// For statement like update-where, it will fetch the old Row out and insert it into kv again. -// Since update statement can see the writable columns, it is responsible for the casting relative column / get the fault value here. -// old Row : a-b-[nil] -// new Row : a-b-[a'/default] -// Thus the writable new Row is corresponding to Write-Only constraints. -func GetChangingColVal(ctx sessionctx.Context, cols []*table.Column, col *table.Column, rowMap map[int64]types.Datum, defaultVals []types.Datum) (_ types.Datum, isDefaultVal bool, err error) { - relativeCol := cols[col.ChangeStateInfo.DependencyColumnOffset] - idxColumnVal, ok := rowMap[relativeCol.ID] - if ok { - idxColumnVal, err = table.CastValue(ctx, idxColumnVal, col.ColumnInfo, false, false) - // TODO: Consider sql_mode and the error msg(encounter this error check whether to rollback). - if err != nil { - return idxColumnVal, false, errors.Trace(err) - } - return idxColumnVal, false, nil - } - - idxColumnVal, err = GetColDefaultValue(ctx, col, defaultVals) - if err != nil { - return idxColumnVal, false, errors.Trace(err) - } - - return idxColumnVal, true, nil -} - // RemoveRecord implements table.Table RemoveRecord interface. func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []types.Datum) error { err := t.removeRowData(ctx, h) @@ -1303,7 +1168,7 @@ func IterRecords(t table.Table, ctx sessionctx.Context, cols []*table.Column, data[col.Offset] = rowMap[col.ID] continue } - data[col.Offset], err = GetColDefaultValue(ctx, col, defaultVals) + data[col.Offset], err = util2.GetColDefaultValue(ctx, col, defaultVals) if err != nil { return err } @@ -1340,29 +1205,6 @@ func tryDecodeColumnFromCommonHandle(col *table.Column, handle kv.Handle, pkIds return types.Datum{}, nil } -// GetColDefaultValue gets a column default value. -// The defaultVals is used to avoid calculating the default value multiple times. -func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals []types.Datum) ( - colVal types.Datum, err error) { - if col.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(col.Flag) { - return colVal, errors.New("Miss column") - } - if col.State != model.StatePublic { - return colVal, nil - } - if defaultVals[col.Offset].IsNull() { - colVal, err = table.GetColOriginDefaultValue(ctx, col.ToInfo()) - if err != nil { - return colVal, err - } - defaultVals[col.Offset] = colVal - } else { - colVal = defaultVals[col.Offset] - } - - return colVal, nil -} - // AllocHandle allocate a new handle. // A statement could reserve some ID in the statement context, try those ones first. func AllocHandle(ctx context.Context, sctx sessionctx.Context, t table.Table) (kv.Handle, error) { @@ -1483,7 +1325,7 @@ func CanSkip(info *model.TableInfo, col *table.Column, value *types.Datum) bool return true } if col.IsCommonHandleColumn(info) { - pkIdx := FindPrimaryIndex(info) + pkIdx := util2.FindPrimaryIndex(info) for _, idxCol := range pkIdx.Columns { if info.Columns[idxCol.Offset].ID != col.ID { continue @@ -1749,7 +1591,7 @@ func TryGetHandleRestoredDataWrapper(t table.Table, row []types.Datum, rowMap ma return nil } rsData := make([]types.Datum, 0, 4) - pkIdx := FindPrimaryIndex(t.Meta()) + pkIdx := util2.FindPrimaryIndex(t.Meta()) for _, pkIdxCol := range pkIdx.Columns { pkCol := t.Meta().Columns[pkIdxCol.Offset] if !types.NeedRestoredData(&pkCol.FieldType) { diff --git a/table/tables/tables_test.go b/table/tables/tables_test.go index 17ad836f2a58a..83bfaa20287e6 100644 --- a/table/tables/tables_test.go +++ b/table/tables/tables_test.go @@ -636,7 +636,7 @@ func TestAddRecordWithCtx(t *testing.T) { _, err = tk.Session().Txn(true) require.NoError(t, err) recordCtx := context2.NewCommonAddRecordCtx(len(tb.Cols())) - tables.SetAddRecordCtx(tk.Session(), recordCtx) + context2.SetAddRecordCtx(tk.Session(), recordCtx) defer tables.ClearAddRecordCtx(tk.Session()) records := [][]types.Datum{types.MakeDatums(uint64(1), "abc"), types.MakeDatums(uint64(2), "abcd")} diff --git a/table/tables/util/util.go b/table/tables/util/util.go new file mode 100644 index 0000000000000..266fa547f13df --- /dev/null +++ b/table/tables/util/util.go @@ -0,0 +1,173 @@ +package util + +import ( + "github.com/pingcap/errors" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/rowcodec" +) + +// BuildRowcodecColInfoForIndexColumns builds []rowcodec.ColInfo for the given index. +// The result can be used for decoding index key-values. +func BuildRowcodecColInfoForIndexColumns(idxInfo *model.IndexInfo, tblInfo *model.TableInfo) []rowcodec.ColInfo { + colInfo := make([]rowcodec.ColInfo, 0, len(idxInfo.Columns)) + for _, idxCol := range idxInfo.Columns { + col := tblInfo.Columns[idxCol.Offset] + colInfo = append(colInfo, rowcodec.ColInfo{ + ID: col.ID, + IsPKHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag), + Ft: rowcodec.FieldTypeFromModelColumn(col), + }) + } + return colInfo +} + +func containFullColInHandle(meta *model.TableInfo, col *table.Column) (containFullCol bool, idxInHandle int) { + pkIdx := FindPrimaryIndex(meta) + for i, idxCol := range pkIdx.Columns { + if meta.Columns[idxCol.Offset].ID == col.ID { + idxInHandle = i + containFullCol = idxCol.Length == types.UnspecifiedLength + return + } + } + return +} + +// DecodeRawRowData decodes raw Row data into a datum slice and a (columnID:columnValue) map. +func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h kv.Handle, cols []*table.Column, + value []byte) ([]types.Datum, map[int64]types.Datum, error) { + v := make([]types.Datum, len(cols)) + colTps := make(map[int64]*types.FieldType, len(cols)) + prefixCols := make(map[int64]struct{}) + for i, col := range cols { + if col == nil { + continue + } + if col.IsPKHandleColumn(meta) { + if mysql.HasUnsignedFlag(col.Flag) { + v[i].SetUint64(uint64(h.IntValue())) + } else { + v[i].SetInt64(h.IntValue()) + } + continue + } + if col.IsCommonHandleColumn(meta) && !types.NeedRestoredData(&col.FieldType) { + if containFullCol, idxInHandle := containFullColInHandle(meta, col); containFullCol { + dtBytes := h.EncodedCol(idxInHandle) + _, dt, err := codec.DecodeOne(dtBytes) + if err != nil { + return nil, nil, err + } + dt, err = tablecodec.Unflatten(dt, &col.FieldType, ctx.GetSessionVars().Location()) + if err != nil { + return nil, nil, err + } + v[i] = dt + continue + } + prefixCols[col.ID] = struct{}{} + } + colTps[col.ID] = &col.FieldType + } + rowMap, err := tablecodec.DecodeRowToDatumMap(value, colTps, ctx.GetSessionVars().Location()) + if err != nil { + return nil, rowMap, err + } + defaultVals := make([]types.Datum, len(cols)) + for i, col := range cols { + if col == nil { + continue + } + if col.IsPKHandleColumn(meta) || (col.IsCommonHandleColumn(meta) && !types.NeedRestoredData(&col.FieldType)) { + if _, isPrefix := prefixCols[col.ID]; !isPrefix { + continue + } + } + ri, ok := rowMap[col.ID] + if ok { + v[i] = ri + continue + } + if col.IsGenerated() && !col.GeneratedStored { + continue + } + if col.ChangeStateInfo != nil { + v[i], _, err = GetChangingColVal(ctx, cols, col, rowMap, defaultVals) + } else { + v[i], err = GetColDefaultValue(ctx, col, defaultVals) + } + if err != nil { + return nil, rowMap, err + } + } + return v, rowMap, nil +} + +// FindPrimaryIndex uses to find primary index in tableInfo. +func FindPrimaryIndex(tblInfo *model.TableInfo) *model.IndexInfo { + var pkIdx *model.IndexInfo + for _, idx := range tblInfo.Indices { + if idx.Primary { + pkIdx = idx + break + } + } + return pkIdx +} + +// GetColDefaultValue gets a column default value. +// The defaultVals is used to avoid calculating the default value multiple times. +func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals []types.Datum) ( + colVal types.Datum, err error) { + if col.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(col.Flag) { + return colVal, errors.New("Miss column") + } + if col.State != model.StatePublic { + return colVal, nil + } + if defaultVals[col.Offset].IsNull() { + colVal, err = table.GetColOriginDefaultValue(ctx, col.ToInfo()) + if err != nil { + return colVal, err + } + defaultVals[col.Offset] = colVal + } else { + colVal = defaultVals[col.Offset] + } + + return colVal, nil +} + +// GetChangingColVal gets the changing column value when executing "modify/change column" statement. +// For statement like update-where, it will fetch the old Row out and insert it into kv again. +// Since update statement can see the writable columns, it is responsible for the casting relative column / get the fault value here. +// old Row : a-b-[nil] +// new Row : a-b-[a'/default] +// Thus the writable new Row is corresponding to Write-Only constraints. +func GetChangingColVal(ctx sessionctx.Context, cols []*table.Column, col *table.Column, rowMap map[int64]types.Datum, defaultVals []types.Datum) (_ types.Datum, isDefaultVal bool, err error) { + relativeCol := cols[col.ChangeStateInfo.DependencyColumnOffset] + idxColumnVal, ok := rowMap[relativeCol.ID] + if ok { + idxColumnVal, err = table.CastValue(ctx, idxColumnVal, col.ColumnInfo, false, false) + // TODO: Consider sql_mode and the error msg(encounter this error check whether to rollback). + if err != nil { + return idxColumnVal, false, errors.Trace(err) + } + return idxColumnVal, false, nil + } + + idxColumnVal, err = GetColDefaultValue(ctx, col, defaultVals) + if err != nil { + return idxColumnVal, false, errors.Trace(err) + } + + return idxColumnVal, true, nil +} diff --git a/util/rowDecoder/decoder.go b/util/rowDecoder/decoder.go index aa39c6c1037e3..8d695578847bc 100644 --- a/util/rowDecoder/decoder.go +++ b/util/rowDecoder/decoder.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -104,10 +105,10 @@ func (rd *RowDecoder) DecodeAndEvalRowWithMap(ctx sessionctx.Context, handle kv. continue } if dCol.Col.ChangeStateInfo != nil { - val, _, err = tables.GetChangingColVal(ctx, rd.cols, dCol.Col, row, rd.defaultVals) + val, _, err = util.GetChangingColVal(ctx, rd.cols, dCol.Col, row, rd.defaultVals) } else { // Get the default value of the column in the generated column expression. - val, err = tables.GetColDefaultValue(ctx, dCol.Col, rd.defaultVals) + val, err = util.GetColDefaultValue(ctx, dCol.Col, rd.defaultVals) } if err != nil { return nil, err @@ -161,7 +162,7 @@ func (rd *RowDecoder) DecodeTheExistedColumnMap(ctx sessionctx.Context, handle k continue } // Get the default value of the column in the generated column expression. - val, err = tables.GetColDefaultValue(ctx, dCol.Col, rd.defaultVals) + val, err = util.GetColDefaultValue(ctx, dCol.Col, rd.defaultVals) if err != nil { return nil, err } From 752f395e2f10a719ff22b299a40b683ee3e96241 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 30 Dec 2021 11:47:34 +0800 Subject: [PATCH 5/9] move split --- br/pkg/lightning/backend/local/duplicate.go | 33 +- br/pkg/lightning/backend/local/local.go | 2 +- br/pkg/lightning/backend/local/local_test.go | 11 +- br/pkg/lightning/backend/local/localhelper.go | 2 +- .../backend/local/localhelper_test.go | 89 +-- br/pkg/restore/client.go | 7 +- br/pkg/restore/import.go | 25 +- br/pkg/restore/ingester.go | 42 +- br/pkg/restore/log_client.go | 12 +- br/pkg/restore/range.go | 18 +- br/pkg/restore/split.go | 178 +---- br/pkg/restore/split/region.go | 21 + br/pkg/restore/split/split.go | 25 + br/pkg/restore/split/split_client.go | 668 ++++++++++++++++++ br/pkg/restore/split_client.go | 562 --------------- br/pkg/restore/split_test.go | 63 +- br/pkg/restore/util.go | 8 +- br/pkg/restore/util_test.go | 36 +- 18 files changed, 918 insertions(+), 884 deletions(-) create mode 100644 br/pkg/restore/split/region.go create mode 100644 br/pkg/restore/split/split.go create mode 100644 br/pkg/restore/split/split_client.go diff --git a/br/pkg/lightning/backend/local/duplicate.go b/br/pkg/lightning/backend/local/duplicate.go index fc88055c40c61..212331ba8f6f4 100644 --- a/br/pkg/lightning/backend/local/duplicate.go +++ b/br/pkg/lightning/backend/local/duplicate.go @@ -27,12 +27,22 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" + tikvclient "github.com/tikv/client-go/v2/tikv" + "go.uber.org/atomic" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/errormanager" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/logutil" - "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/distsql" tidbkv "github.com/pingcap/tidb/kv" @@ -41,15 +51,6 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" - tikvclient "github.com/tikv/client-go/v2/tikv" - "go.uber.org/atomic" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/backoff" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" ) const ( @@ -65,7 +66,7 @@ type DuplicateRequest struct { type DuplicateManager struct { errorMgr *errormanager.ErrorManager - splitCli restore.SplitClient + splitCli split.SplitClient tikvCli *tikvclient.KVStore regionConcurrency int connPool common.GRPCConns @@ -253,7 +254,7 @@ func (manager *DuplicateManager) sendRequestToTiKV(ctx context.Context, startKey := codec.EncodeBytes([]byte{}, req.start) endKey := codec.EncodeBytes([]byte{}, req.end) - regions, err := restore.PaginateScanRegion(ctx, manager.splitCli, startKey, endKey, scanRegionLimit) + regions, err := split.PaginateScanRegion(ctx, manager.splitCli, startKey, endKey, scanRegionLimit) if err != nil { return err } @@ -263,9 +264,9 @@ func (manager *DuplicateManager) sendRequestToTiKV(ctx context.Context, if tryTimes > maxRetryTimes { return errors.Errorf("retry time exceed limit") } - unfinishedRegions := make([]*restore.RegionInfo, 0) + unfinishedRegions := make([]*split.RegionInfo, 0) waitingClients := make([]import_sstpb.ImportSST_DuplicateDetectClient, 0) - watingRegions := make([]*restore.RegionInfo, 0) + watingRegions := make([]*split.RegionInfo, 0) for idx, region := range regions { if len(waitingClients) > manager.regionConcurrency { r := regions[idx:] @@ -350,7 +351,7 @@ func (manager *DuplicateManager) sendRequestToTiKV(ctx context.Context, cliLogger.Warn("[detect-dupe] meet key error in duplicate detect response from TiKV, retry again ", zap.String("RegionError", resp.GetRegionError().GetMessage())) - r, err := restore.PaginateScanRegion(ctx, manager.splitCli, watingRegions[idx].Region.GetStartKey(), watingRegions[idx].Region.GetEndKey(), scanRegionLimit) + r, err := split.PaginateScanRegion(ctx, manager.splitCli, watingRegions[idx].Region.GetStartKey(), watingRegions[idx].Region.GetEndKey(), scanRegionLimit) if err != nil { unfinishedRegions = append(unfinishedRegions, watingRegions[idx]) } else { @@ -687,7 +688,7 @@ func (manager *DuplicateManager) getValues( } func (manager *DuplicateManager) getDuplicateStream(ctx context.Context, - region *restore.RegionInfo, + region *split.RegionInfo, start []byte, end []byte) (import_sstpb.ImportSST_DuplicateDetectClient, error) { leader := region.Leader if leader == nil { diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 56eb60f352b2f..3fd907dda703c 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -71,7 +71,7 @@ import ( "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/membuf" "github.com/pingcap/tidb/br/pkg/pdutil" - split "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/parser/model" diff --git a/br/pkg/lightning/backend/local/local_test.go b/br/pkg/lightning/backend/local/local_test.go index d1f700210ea2a..574ae81f61d40 100644 --- a/br/pkg/lightning/backend/local/local_test.go +++ b/br/pkg/lightning/backend/local/local_test.go @@ -37,12 +37,13 @@ import ( "github.com/pingcap/kvproto/pkg/errorpb" sst "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/mydump" "github.com/pingcap/tidb/br/pkg/mock" - "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" tidbkv "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/tablecodec" @@ -432,11 +433,11 @@ func (s *localSuite) TestLocalWriterWithIngestUnsort(c *C) { } type mockSplitClient struct { - restore.SplitClient + split.SplitClient } -func (c *mockSplitClient) GetRegion(ctx context.Context, key []byte) (*restore.RegionInfo, error) { - return &restore.RegionInfo{ +func (c *mockSplitClient) GetRegion(ctx context.Context, key []byte) (*split.RegionInfo, error) { + return &split.RegionInfo{ Leader: &metapb.Peer{Id: 1}, Region: &metapb.Region{ Id: 1, @@ -458,7 +459,7 @@ func (s *localSuite) TestIsIngestRetryable(c *C) { }, } ctx := context.Background() - region := &restore.RegionInfo{ + region := &split.RegionInfo{ Leader: &metapb.Peer{Id: 1}, Region: &metapb.Region{ Id: 1, diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index e705e3b7b42b4..41f19391f707d 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -38,7 +38,7 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/logutil" - split "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils/utilmath" "github.com/pingcap/tidb/util/codec" ) diff --git a/br/pkg/lightning/backend/local/localhelper_test.go b/br/pkg/lightning/backend/local/localhelper_test.go index d901b3c2711e6..2173ed0a698fb 100644 --- a/br/pkg/lightning/backend/local/localhelper_test.go +++ b/br/pkg/lightning/backend/local/localhelper_test.go @@ -27,23 +27,24 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/tikv/pd/server/core" + "github.com/tikv/pd/server/schedule/placement" + "go.uber.org/atomic" + "github.com/pingcap/tidb/br/pkg/lightning/glue" - "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" - "github.com/tikv/pd/server/core" - "github.com/tikv/pd/server/schedule/placement" - "go.uber.org/atomic" ) type testClient struct { mu sync.RWMutex stores map[uint64]*metapb.Store - regions map[uint64]*restore.RegionInfo + regions map[uint64]*split.RegionInfo regionsInfo *core.RegionsInfo // For now it's only used in ScanRegions nextRegionID uint64 splitCount atomic.Int32 @@ -52,7 +53,7 @@ type testClient struct { func newTestClient( stores map[uint64]*metapb.Store, - regions map[uint64]*restore.RegionInfo, + regions map[uint64]*split.RegionInfo, nextRegionID uint64, hook clientHook, ) *testClient { @@ -69,7 +70,7 @@ func newTestClient( } } -func (c *testClient) GetAllRegions() map[uint64]*restore.RegionInfo { +func (c *testClient) GetAllRegions() map[uint64]*split.RegionInfo { c.mu.RLock() defer c.mu.RUnlock() return c.regions @@ -85,7 +86,7 @@ func (c *testClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Stor return store, nil } -func (c *testClient) GetRegion(ctx context.Context, key []byte) (*restore.RegionInfo, error) { +func (c *testClient) GetRegion(ctx context.Context, key []byte) (*split.RegionInfo, error) { c.mu.RLock() defer c.mu.RUnlock() for _, region := range c.regions { @@ -97,7 +98,7 @@ func (c *testClient) GetRegion(ctx context.Context, key []byte) (*restore.Region return nil, errors.Errorf("region not found: key=%s", string(key)) } -func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*restore.RegionInfo, error) { +func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*split.RegionInfo, error) { c.mu.RLock() defer c.mu.RUnlock() region, ok := c.regions[regionID] @@ -109,12 +110,12 @@ func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*resto func (c *testClient) SplitRegion( ctx context.Context, - regionInfo *restore.RegionInfo, + regionInfo *split.RegionInfo, key []byte, -) (*restore.RegionInfo, error) { +) (*split.RegionInfo, error) { c.mu.Lock() defer c.mu.Unlock() - var target *restore.RegionInfo + var target *split.RegionInfo splitKey := codec.EncodeBytes([]byte{}, key) for _, region := range c.regions { if bytes.Compare(splitKey, region.Region.StartKey) >= 0 && @@ -125,7 +126,7 @@ func (c *testClient) SplitRegion( if target == nil { return nil, errors.Errorf("region not found: key=%s", string(key)) } - newRegion := &restore.RegionInfo{ + newRegion := &split.RegionInfo{ Region: &metapb.Region{ Peers: target.Region.Peers, Id: c.nextRegionID, @@ -148,8 +149,8 @@ func (c *testClient) SplitRegion( } func (c *testClient) BatchSplitRegionsWithOrigin( - ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte, -) (*restore.RegionInfo, []*restore.RegionInfo, error) { + ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte, +) (*split.RegionInfo, []*split.RegionInfo, error) { c.mu.Lock() defer c.mu.Unlock() c.splitCount.Inc() @@ -167,7 +168,7 @@ func (c *testClient) BatchSplitRegionsWithOrigin( default: } - newRegions := make([]*restore.RegionInfo, 0) + newRegions := make([]*split.RegionInfo, 0) target, ok := c.regions[regionInfo.Region.Id] if !ok { return nil, nil, errors.New("region not found") @@ -190,7 +191,7 @@ func (c *testClient) BatchSplitRegionsWithOrigin( if bytes.Compare(key, startKey) <= 0 || bytes.Compare(key, target.Region.EndKey) >= 0 { continue } - newRegion := &restore.RegionInfo{ + newRegion := &split.RegionInfo{ Region: &metapb.Region{ Peers: target.Region.Peers, Id: c.nextRegionID, @@ -223,13 +224,13 @@ func (c *testClient) BatchSplitRegionsWithOrigin( } func (c *testClient) BatchSplitRegions( - ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte, -) ([]*restore.RegionInfo, error) { + ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte, +) ([]*split.RegionInfo, error) { _, newRegions, err := c.BatchSplitRegionsWithOrigin(ctx, regionInfo, keys) return newRegions, err } -func (c *testClient) ScatterRegion(ctx context.Context, regionInfo *restore.RegionInfo) error { +func (c *testClient) ScatterRegion(ctx context.Context, regionInfo *split.RegionInfo) error { return nil } @@ -239,15 +240,15 @@ func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.Ge }, nil } -func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*restore.RegionInfo, error) { +func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*split.RegionInfo, error) { if c.hook != nil { key, endKey, limit = c.hook.BeforeScanRegions(ctx, key, endKey, limit) } infos := c.regionsInfo.ScanRange(key, endKey, limit) - regions := make([]*restore.RegionInfo, 0, len(infos)) + regions := make([]*split.RegionInfo, 0, len(infos)) for _, info := range infos { - regions = append(regions, &restore.RegionInfo{ + regions = append(regions, &split.RegionInfo{ Region: info.GetMeta(), Leader: info.GetLeader(), }) @@ -276,7 +277,7 @@ func (c *testClient) SetStoresLabel(ctx context.Context, stores []uint64, labelK return nil } -func cloneRegion(region *restore.RegionInfo) *restore.RegionInfo { +func cloneRegion(region *split.RegionInfo) *split.RegionInfo { r := &metapb.Region{} if region.Region != nil { b, _ := region.Region.Marshal() @@ -288,7 +289,7 @@ func cloneRegion(region *restore.RegionInfo) *restore.RegionInfo { b, _ := region.Region.Marshal() _ = l.Unmarshal(b) } - return &restore.RegionInfo{Region: r, Leader: l} + return &split.RegionInfo{Region: r, Leader: l} } // region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) @@ -298,7 +299,7 @@ func initTestClient(keys [][]byte, hook clientHook) *testClient { Id: 1, StoreId: 1, } - regions := make(map[uint64]*restore.RegionInfo) + regions := make(map[uint64]*split.RegionInfo) for i := uint64(1); i < uint64(len(keys)); i++ { startKey := keys[i-1] if len(startKey) != 0 { @@ -308,7 +309,7 @@ func initTestClient(keys [][]byte, hook clientHook) *testClient { if len(endKey) != 0 { endKey = codec.EncodeBytes([]byte{}, endKey) } - regions[i] = &restore.RegionInfo{ + regions[i] = &split.RegionInfo{ Region: &metapb.Region{ Id: i, Peers: peers, @@ -325,7 +326,7 @@ func initTestClient(keys [][]byte, hook clientHook) *testClient { return newTestClient(stores, regions, uint64(len(keys)), hook) } -func checkRegionRanges(c *C, regions []*restore.RegionInfo, keys [][]byte) { +func checkRegionRanges(c *C, regions []*split.RegionInfo, keys [][]byte) { for i, r := range regions { _, regionStart, _ := codec.DecodeBytes(r.Region.StartKey, []byte{}) _, regionEnd, _ := codec.DecodeBytes(r.Region.EndKey, []byte{}) @@ -335,21 +336,21 @@ func checkRegionRanges(c *C, regions []*restore.RegionInfo, keys [][]byte) { } type clientHook interface { - BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) - AfterSplitRegion(context.Context, *restore.RegionInfo, [][]byte, []*restore.RegionInfo, error) ([]*restore.RegionInfo, error) + BeforeSplitRegion(ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte) (*split.RegionInfo, [][]byte) + AfterSplitRegion(context.Context, *split.RegionInfo, [][]byte, []*split.RegionInfo, error) ([]*split.RegionInfo, error) BeforeScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]byte, []byte, int) - AfterScanRegions([]*restore.RegionInfo, error) ([]*restore.RegionInfo, error) + AfterScanRegions([]*split.RegionInfo, error) ([]*split.RegionInfo, error) } type noopHook struct{} -func (h *noopHook) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) { +func (h *noopHook) BeforeSplitRegion(ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte) (*split.RegionInfo, [][]byte) { delayTime := rand.Int31n(10) + 1 time.Sleep(time.Duration(delayTime) * time.Millisecond) return regionInfo, keys } -func (h *noopHook) AfterSplitRegion(c context.Context, r *restore.RegionInfo, keys [][]byte, res []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) { +func (h *noopHook) AfterSplitRegion(c context.Context, r *split.RegionInfo, keys [][]byte, res []*split.RegionInfo, err error) ([]*split.RegionInfo, error) { return res, err } @@ -357,7 +358,7 @@ func (h *noopHook) BeforeScanRegions(ctx context.Context, key, endKey []byte, li return key, endKey, limit } -func (h *noopHook) AfterScanRegions(res []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) { +func (h *noopHook) AfterScanRegions(res []*split.RegionInfo, err error) ([]*split.RegionInfo, error) { return res, err } @@ -410,7 +411,7 @@ func (s *localSuite) doTestBatchSplitRegionByRanges(ctx context.Context, c *C, h // current region ranges: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) rangeStart := codec.EncodeBytes([]byte{}, []byte("b")) rangeEnd := codec.EncodeBytes([]byte{}, []byte("c")) - regions, err := restore.PaginateScanRegion(ctx, client, rangeStart, rangeEnd, 5) + regions, err := split.PaginateScanRegion(ctx, client, rangeStart, rangeEnd, 5) c.Assert(err, IsNil) // regions is: [aay, bba), [bba, bbh), [bbh, cca) checkRegionRanges(c, regions, [][]byte{[]byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca")}) @@ -435,7 +436,7 @@ func (s *localSuite) doTestBatchSplitRegionByRanges(ctx context.Context, c *C, h splitHook.check(c, client) // check split ranges - regions, err = restore.PaginateScanRegion(ctx, client, rangeStart, rangeEnd, 5) + regions, err = split.PaginateScanRegion(ctx, client, rangeStart, rangeEnd, 5) c.Assert(err, IsNil) result := [][]byte{ []byte("b"), []byte("ba"), []byte("bb"), []byte("bba"), []byte("bbh"), []byte("bc"), @@ -489,7 +490,7 @@ type scanRegionEmptyHook struct { cnt int } -func (h *scanRegionEmptyHook) AfterScanRegions(res []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) { +func (h *scanRegionEmptyHook) AfterScanRegions(res []*split.RegionInfo, err error) ([]*split.RegionInfo, error) { h.cnt++ // skip the first call if h.cnt == 1 { @@ -506,7 +507,7 @@ type splitRegionEpochNotMatchHook struct { noopHook } -func (h *splitRegionEpochNotMatchHook) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) { +func (h *splitRegionEpochNotMatchHook) BeforeSplitRegion(ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte) (*split.RegionInfo, [][]byte) { regionInfo, keys = h.noopHook.BeforeSplitRegion(ctx, regionInfo, keys) regionInfo = cloneRegion(regionInfo) // decrease the region epoch, so split region will fail @@ -524,7 +525,7 @@ type splitRegionEpochNotMatchHookRandom struct { cnt atomic.Int32 } -func (h *splitRegionEpochNotMatchHookRandom) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) { +func (h *splitRegionEpochNotMatchHookRandom) BeforeSplitRegion(ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte) (*split.RegionInfo, [][]byte) { regionInfo, keys = h.noopHook.BeforeSplitRegion(ctx, regionInfo, keys) if h.cnt.Inc() != 0 { return regionInfo, keys @@ -545,7 +546,7 @@ type splitRegionNoValidKeyHook struct { errorCnt atomic.Int32 } -func (h *splitRegionNoValidKeyHook) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) { +func (h *splitRegionNoValidKeyHook) BeforeSplitRegion(ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte) (*split.RegionInfo, [][]byte) { regionInfo, keys = h.noopHook.BeforeSplitRegion(ctx, regionInfo, keys) if h.errorCnt.Inc() <= h.returnErrTimes { // clean keys to trigger "no valid keys" error @@ -567,7 +568,7 @@ type reportAfterSplitHook struct { ch chan<- struct{} } -func (h *reportAfterSplitHook) AfterSplitRegion(ctx context.Context, region *restore.RegionInfo, keys [][]byte, resultRegions []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) { +func (h *reportAfterSplitHook) AfterSplitRegion(ctx context.Context, region *split.RegionInfo, keys [][]byte, resultRegions []*split.RegionInfo, err error) ([]*split.RegionInfo, error) { h.ch <- struct{}{} return resultRegions, err } @@ -649,7 +650,7 @@ func (s *localSuite) doTestBatchSplitByRangesWithClusteredIndex(c *C, hook clien startKey := codec.EncodeBytes([]byte{}, rangeKeys[0]) endKey := codec.EncodeBytes([]byte{}, rangeKeys[len(rangeKeys)-1]) // check split ranges - regions, err := restore.PaginateScanRegion(ctx, client, startKey, endKey, 5) + regions, err := split.PaginateScanRegion(ctx, client, startKey, endKey, 5) c.Assert(err, IsNil) c.Assert(len(regions), Equals, len(ranges)+1) @@ -677,14 +678,14 @@ func (s *localSuite) TestNeedSplit(c *C) { keys := []int64{10, 100, 500, 1000, 999999, -1} start := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(0)) regionStart := codec.EncodeBytes([]byte{}, start) - regions := make([]*restore.RegionInfo, 0) + regions := make([]*split.RegionInfo, 0) for _, end := range keys { var regionEndKey []byte if end >= 0 { endKey := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(end)) regionEndKey = codec.EncodeBytes([]byte{}, endKey) } - region := &restore.RegionInfo{ + region := &split.RegionInfo{ Region: &metapb.Region{ Id: 1, Peers: peers, diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index 00ff931856b12..8be21b4d68e1a 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -39,6 +39,7 @@ import ( "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/redact" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" @@ -58,7 +59,7 @@ const defaultChecksumConcurrency = 64 // Client sends requests to restore files. type Client struct { pdClient pd.Client - toolClient SplitClient + toolClient split.SplitClient fileImporter FileImporter workerPool *utils.WorkerPool tlsConf *tls.Config @@ -122,7 +123,7 @@ func NewRestoreClient( return &Client{ pdClient: pdClient, - toolClient: NewSplitClient(pdClient, tlsConf), + toolClient: split.NewSplitClient(pdClient, tlsConf), db: db, tlsConf: tlsConf, keepaliveConf: keepaliveConf, @@ -207,7 +208,7 @@ func (rc *Client) InitBackupMeta( rc.backupMeta = backupMeta log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) - metaClient := NewSplitClient(rc.pdClient, rc.tlsConf) + metaClient := split.NewSplitClient(rc.pdClient, rc.tlsConf) importCli := NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) rc.fileImporter = NewFileImporter(metaClient, importCli, backend, rc.backupMeta.IsRawKv, rc.rateLimit) return rc.fileImporter.CheckMultiIngestSupport(c, rc.pdClient) diff --git a/br/pkg/restore/import.go b/br/pkg/restore/import.go index f58fc334db581..954679fff8f28 100644 --- a/br/pkg/restore/import.go +++ b/br/pkg/restore/import.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/br/pkg/conn" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/utils/utildb" @@ -78,7 +79,7 @@ type ImporterClient interface { type importClient struct { mu sync.Mutex - metaClient SplitClient + metaClient split.SplitClient clients map[uint64]import_sstpb.ImportSSTClient tlsConf *tls.Config @@ -86,7 +87,7 @@ type importClient struct { } // NewImportClient returns a new ImporterClient. -func NewImportClient(metaClient SplitClient, tlsConf *tls.Config, keepaliveConf keepalive.ClientParameters) ImporterClient { +func NewImportClient(metaClient split.SplitClient, tlsConf *tls.Config, keepaliveConf keepalive.ClientParameters) ImporterClient { return &importClient{ metaClient: metaClient, clients: make(map[uint64]import_sstpb.ImportSSTClient), @@ -199,7 +200,7 @@ func (ic *importClient) SupportMultiIngest(ctx context.Context, stores []uint64) // FileImporter used to import a file to TiKV. type FileImporter struct { - metaClient SplitClient + metaClient split.SplitClient importClient ImporterClient backend *backuppb.StorageBackend rateLimit uint64 @@ -212,7 +213,7 @@ type FileImporter struct { // NewFileImporter returns a new file importClient. func NewFileImporter( - metaClient SplitClient, + metaClient split.SplitClient, importClient ImporterClient, backend *backuppb.StorageBackend, isRawKvMode bool, @@ -299,8 +300,8 @@ func (importer *FileImporter) Import( tctx, cancel := context.WithTimeout(ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range - regionInfos, errScanRegion := PaginateScanRegion( - tctx, importer.metaClient, startKey, endKey, ScanRegionPaginationLimit) + regionInfos, errScanRegion := split.PaginateScanRegion( + tctx, importer.metaClient, startKey, endKey, split.ScanRegionPaginationLimit) if errScanRegion != nil { return errors.Trace(errScanRegion) } @@ -379,9 +380,9 @@ func (importer *FileImporter) Import( switch { case errPb.NotLeader != nil: // If error is `NotLeader`, update the region info and retry - var newInfo *RegionInfo + var newInfo *split.RegionInfo if newLeader := errPb.GetNotLeader().GetLeader(); newLeader != nil { - newInfo = &RegionInfo{ + newInfo = &split.RegionInfo{ Leader: newLeader, Region: info.Region, } @@ -403,7 +404,7 @@ func (importer *FileImporter) Import( logutil.Region(info.Region), zap.Stringer("newLeader", newInfo.Leader)) - if !checkRegionEpoch(newInfo, info) { + if !split.CheckRegionEpoch(newInfo, info) { errIngest = errors.Trace(berrors.ErrKVEpochNotMatch) break ingestRetry } @@ -454,7 +455,7 @@ func (importer *FileImporter) setDownloadSpeedLimit(ctx context.Context, storeID func (importer *FileImporter) downloadSST( ctx context.Context, - regionInfo *RegionInfo, + regionInfo *split.RegionInfo, file *backuppb.File, rewriteRules *RewriteRules, cipher *backuppb.CipherInfo, @@ -526,7 +527,7 @@ func (importer *FileImporter) downloadSST( func (importer *FileImporter) downloadRawKVSST( ctx context.Context, - regionInfo *RegionInfo, + regionInfo *split.RegionInfo, file *backuppb.File, cipher *backuppb.CipherInfo, ) (*import_sstpb.SSTMeta, error) { @@ -593,7 +594,7 @@ func (importer *FileImporter) downloadRawKVSST( func (importer *FileImporter) ingestSSTs( ctx context.Context, sstMetas []*import_sstpb.SSTMeta, - regionInfo *RegionInfo, + regionInfo *split.RegionInfo, ) (*import_sstpb.IngestResponse, error) { leader := regionInfo.Leader if leader == nil { diff --git a/br/pkg/restore/ingester.go b/br/pkg/restore/ingester.go index 22d5e389a63c6..bc127c05963e2 100644 --- a/br/pkg/restore/ingester.go +++ b/br/pkg/restore/ingester.go @@ -30,18 +30,20 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - "github.com/pingcap/tidb/br/pkg/conn" - berrors "github.com/pingcap/tidb/br/pkg/errors" - "github.com/pingcap/tidb/br/pkg/kv" - "github.com/pingcap/tidb/br/pkg/logutil" - "github.com/pingcap/tidb/br/pkg/membuf" - "github.com/pingcap/tidb/br/pkg/utils" "github.com/tikv/pd/pkg/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + + "github.com/pingcap/tidb/br/pkg/conn" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/kv" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/membuf" + "github.com/pingcap/tidb/br/pkg/restore/split" + "github.com/pingcap/tidb/br/pkg/utils" ) const ( @@ -88,7 +90,7 @@ type Ingester struct { tlsConf *tls.Config conns gRPCConns - splitCli SplitClient + splitCli split.SplitClient WorkerPool *utils.WorkerPool batchWriteKVPairs int @@ -97,7 +99,7 @@ type Ingester struct { // NewIngester creates Ingester. func NewIngester( - splitCli SplitClient, cfg concurrencyCfg, commitTS uint64, tlsConf *tls.Config, + splitCli split.SplitClient, cfg concurrencyCfg, commitTS uint64, tlsConf *tls.Config, ) *Ingester { workerPool := utils.NewWorkerPool(cfg.IngestConcurrency, "ingest worker") return &Ingester{ @@ -173,7 +175,7 @@ func (i *Ingester) writeAndIngestByRange( logutil.Key("end", end), logutil.Key("pairStart", pairStart), logutil.Key("pairEnd", pairEnd)) return nil } - var regions []*RegionInfo + var regions []*split.RegionInfo var err error ctx, cancel := context.WithCancel(ctxt) defer cancel() @@ -189,7 +191,7 @@ WriteAndIngest: } startKey := codec.EncodeBytes(pairStart) endKey := codec.EncodeBytes(kv.NextKey(pairEnd)) - regions, err = PaginateScanRegion(ctx, i.splitCli, startKey, endKey, 128) + regions, err = split.PaginateScanRegion(ctx, i.splitCli, startKey, endKey, 128) if err != nil || len(regions) == 0 { log.Warn("scan region failed", zap.Error(err), zap.Int("region_len", len(regions)), logutil.Key("startKey", startKey), logutil.Key("endKey", endKey), zap.Int("retry", retry)) @@ -228,7 +230,7 @@ WriteAndIngest: func (i *Ingester) writeAndIngestPairs( ctx context.Context, iter kv.Iter, - region *RegionInfo, + region *split.RegionInfo, start, end []byte, ) (*Range, error) { var err error @@ -272,7 +274,7 @@ loopWrite: } }) var retryTy retryType - var newRegion *RegionInfo + var newRegion *split.RegionInfo retryTy, newRegion, err = i.isIngestRetryable(ctx, resp, region, meta) if err == nil { // ingest next meta @@ -310,7 +312,7 @@ loopWrite: func (i *Ingester) writeToTiKV( ctx context.Context, iter kv.Iter, - region *RegionInfo, + region *split.RegionInfo, start, end []byte, ) ([]*sst.SSTMeta, *Range, error) { begin := time.Now() @@ -471,7 +473,7 @@ func (i *Ingester) writeToTiKV( return leaderPeerMetas, remainRange, nil } -func (i *Ingester) ingest(ctx context.Context, meta *sst.SSTMeta, region *RegionInfo) (*sst.IngestResponse, error) { +func (i *Ingester) ingest(ctx context.Context, meta *sst.SSTMeta, region *split.RegionInfo) (*sst.IngestResponse, error) { leader := region.Leader if leader == nil { leader = region.Region.GetPeers()[0] @@ -521,14 +523,14 @@ func (i *Ingester) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc func (i *Ingester) isIngestRetryable( ctx context.Context, resp *sst.IngestResponse, - region *RegionInfo, + region *split.RegionInfo, meta *sst.SSTMeta, -) (retryType, *RegionInfo, error) { +) (retryType, *split.RegionInfo, error) { if resp.GetError() == nil { return retryNone, nil, nil } - getRegion := func() (*RegionInfo, error) { + getRegion := func() (*split.RegionInfo, error) { for retry := 0; ; retry++ { newRegion, err := i.splitCli.GetRegion(ctx, region.Region.GetStartKey()) if err != nil { @@ -547,12 +549,12 @@ func (i *Ingester) isIngestRetryable( } } - var newRegion *RegionInfo + var newRegion *split.RegionInfo var err error switch errPb := resp.GetError(); { case errPb.NotLeader != nil: if newLeader := errPb.GetNotLeader().GetLeader(); newLeader != nil { - newRegion = &RegionInfo{ + newRegion = &split.RegionInfo{ Leader: newLeader, Region: region.Region, } @@ -581,7 +583,7 @@ func (i *Ingester) isIngestRetryable( } } if newLeader != nil { - newRegion = &RegionInfo{ + newRegion = &split.RegionInfo{ Leader: newLeader, Region: currentRegion, } diff --git a/br/pkg/restore/log_client.go b/br/pkg/restore/log_client.go index fd71a06903ad1..0332d2817a6fa 100644 --- a/br/pkg/restore/log_client.go +++ b/br/pkg/restore/log_client.go @@ -17,18 +17,20 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" filter "github.com/pingcap/tidb-tools/pkg/table-filter" + "github.com/tikv/client-go/v2/oracle" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "github.com/pingcap/tidb/br/pkg/cdclog" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/kv" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" titable "github.com/pingcap/tidb/table" - "github.com/tikv/client-go/v2/oracle" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) const ( @@ -67,7 +69,7 @@ type LogClient struct { ddlLock sync.Mutex restoreClient *Client - splitClient SplitClient + splitClient split.SplitClient importerClient ImporterClient // ingester is used to write and ingest kvs to tikv. @@ -113,7 +115,7 @@ func NewLogRestoreClient( } tlsConf := restoreClient.GetTLSConfig() - splitClient := NewSplitClient(restoreClient.GetPDClient(), tlsConf) + splitClient := split.NewSplitClient(restoreClient.GetPDClient(), tlsConf) importClient := NewImportClient(splitClient, tlsConf, restoreClient.keepaliveConf) cfg := concurrencyCfg{ diff --git a/br/pkg/restore/range.go b/br/pkg/restore/range.go index cc81ba8423d94..a05c5992ec904 100644 --- a/br/pkg/restore/range.go +++ b/br/pkg/restore/range.go @@ -9,13 +9,13 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" + "go.uber.org/zap" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/tablecodec" - "go.uber.org/zap" ) // Range record start and end key for localStoreDir.DB @@ -105,20 +105,6 @@ func SortRanges(ranges []rtree.Range, rewriteRules *RewriteRules) ([]rtree.Range return sortedRanges, nil } -// RegionInfo includes a region and the leader of the region. -type RegionInfo struct { - Region *metapb.Region - Leader *metapb.Peer -} - -// ContainsInterior returns whether the region contains the given key, and also -// that the key does not fall on the boundary (start key) of the region. -func (region *RegionInfo) ContainsInterior(key []byte) bool { - return bytes.Compare(key, region.Region.GetStartKey()) > 0 && - (len(region.Region.GetEndKey()) == 0 || - bytes.Compare(key, region.Region.GetEndKey()) < 0) -} - // RewriteRules contains rules for rewriting keys of tables. type RewriteRules struct { Data []*import_sstpb.RewriteRule diff --git a/br/pkg/restore/split.go b/br/pkg/restore/split.go index b16d4225b5041..7c7f52e70a57a 100644 --- a/br/pkg/restore/split.go +++ b/br/pkg/restore/split.go @@ -5,7 +5,6 @@ package restore import ( "bytes" "context" - "encoding/hex" "strconv" "strings" "time" @@ -19,7 +18,7 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" - "github.com/pingcap/tidb/br/pkg/redact" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/utils/utildb" @@ -28,35 +27,13 @@ import ( "go.uber.org/zap" ) -// Constants for split retry machinery. -const ( - SplitRetryTimes = 32 - SplitRetryInterval = 50 * time.Millisecond - SplitMaxRetryInterval = time.Second - - SplitCheckMaxRetryTimes = 64 - SplitCheckInterval = 8 * time.Millisecond - SplitMaxCheckInterval = time.Second - - ScatterWaitMaxRetryTimes = 64 - ScatterWaitInterval = 50 * time.Millisecond - ScatterMaxWaitInterval = time.Second - ScatterWaitUpperInterval = 180 * time.Second - - ScanRegionPaginationLimit = 128 - - RejectStoreCheckRetryTimes = 64 - RejectStoreCheckInterval = 100 * time.Millisecond - RejectStoreMaxCheckInterval = 2 * time.Second -) - // RegionSplitter is a executor of region split by rules. type RegionSplitter struct { - client SplitClient + client split.SplitClient } // NewRegionSplitter returns a new RegionSplitter. -func NewRegionSplitter(client SplitClient) *RegionSplitter { +func NewRegionSplitter(client split.SplitClient) *RegionSplitter { return &RegionSplitter{ client: client, } @@ -95,11 +72,11 @@ func (rs *RegionSplitter) Split( } minKey := codec.EncodeBytes(sortedRanges[0].StartKey) maxKey := codec.EncodeBytes(sortedRanges[len(sortedRanges)-1].EndKey) - interval := SplitRetryInterval - scatterRegions := make([]*RegionInfo, 0) + interval := split.SplitRetryInterval + scatterRegions := make([]*split.RegionInfo, 0) SplitRegions: - for i := 0; i < SplitRetryTimes; i++ { - regions, errScan := PaginateScanRegion(ctx, rs.client, minKey, maxKey, ScanRegionPaginationLimit) + for i := 0; i < split.SplitRetryTimes; i++ { + regions, errScan := split.PaginateScanRegion(ctx, rs.client, minKey, maxKey, split.ScanRegionPaginationLimit) if errScan != nil { if berrors.ErrPDBatchScanRegion.Equal(errScan) { log.Warn("inconsistent region info get.", logutil.ShortError(errScan)) @@ -109,12 +86,12 @@ SplitRegions: return errors.Trace(errScan) } splitKeyMap := getSplitKeys(rewriteRules, sortedRanges, regions) - regionMap := make(map[uint64]*RegionInfo) + regionMap := make(map[uint64]*split.RegionInfo) for _, region := range regions { regionMap[region.Region.GetId()] = region } for regionID, keys := range splitKeyMap { - var newRegions []*RegionInfo + var newRegions []*split.RegionInfo region := regionMap[regionID] log.Info("split regions", logutil.Region(region.Region), logutil.Keys(keys), rtree.ZapRanges(ranges)) @@ -133,8 +110,8 @@ SplitRegions: return errors.Trace(errSplit) } interval = 2 * interval - if interval > SplitMaxRetryInterval { - interval = SplitMaxRetryInterval + if interval > split.SplitMaxRetryInterval { + interval = split.SplitMaxRetryInterval } time.Sleep(interval) log.Warn("split regions failed, retry", @@ -163,7 +140,7 @@ SplitRegions: scatterCount := 0 for _, region := range scatterRegions { rs.waitForScatterRegion(ctx, region) - if time.Since(startTime) > ScatterWaitUpperInterval { + if time.Since(startTime) > split.ScatterWaitUpperInterval { break } scatterCount++ @@ -211,8 +188,8 @@ func (rs *RegionSplitter) isScatterRegionFinished(ctx context.Context, regionID } func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { - interval := SplitCheckInterval - for i := 0; i < SplitCheckMaxRetryTimes; i++ { + interval := split.SplitCheckInterval + for i := 0; i < split.SplitCheckMaxRetryTimes; i++ { ok, err := rs.hasRegion(ctx, regionID) if err != nil { log.Warn("wait for split failed", zap.Error(err)) @@ -222,8 +199,8 @@ func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { break } interval = 2 * interval - if interval > SplitMaxCheckInterval { - interval = SplitMaxCheckInterval + if interval > split.SplitMaxCheckInterval { + interval = split.SplitMaxCheckInterval } time.Sleep(interval) } @@ -233,10 +210,10 @@ type retryTimeKey struct{} var retryTimes = new(retryTimeKey) -func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo *RegionInfo) { - interval := ScatterWaitInterval +func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo *split.RegionInfo) { + interval := split.ScatterWaitInterval regionID := regionInfo.Region.GetId() - for i := 0; i < ScatterWaitMaxRetryTimes; i++ { + for i := 0; i < split.ScatterWaitMaxRetryTimes; i++ { ctx1 := context.WithValue(ctx, retryTimes, i) ok, err := rs.isScatterRegionFinished(ctx1, regionID) if err != nil { @@ -248,18 +225,18 @@ func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo * break } interval = 2 * interval - if interval > ScatterMaxWaitInterval { - interval = ScatterMaxWaitInterval + if interval > split.ScatterMaxWaitInterval { + interval = split.ScatterMaxWaitInterval } time.Sleep(interval) } } func (rs *RegionSplitter) splitAndScatterRegions( - ctx context.Context, regionInfo *RegionInfo, keys [][]byte, -) ([]*RegionInfo, error) { + ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte, +) ([]*split.RegionInfo, error) { if len(keys) == 0 { - return []*RegionInfo{regionInfo}, nil + return []*split.RegionInfo{regionInfo}, nil } newRegions, err := rs.client.BatchSplitRegions(ctx, regionInfo, keys) @@ -286,8 +263,8 @@ func (rs *RegionSplitter) splitAndScatterRegions( // ScatterRegionsWithBackoffer scatter the region with some backoffer. // This function is for testing the retry mechanism. // For a real cluster, directly use ScatterRegions would be fine. -func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRegions []*RegionInfo, backoffer utildb.Backoffer) { - newRegionSet := make(map[uint64]*RegionInfo, len(newRegions)) +func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRegions []*split.RegionInfo, backoffer utildb.Backoffer) { + newRegionSet := make(map[uint64]*split.RegionInfo, len(newRegions)) for _, newRegion := range newRegions { newRegionSet[newRegion.Region.Id] = newRegion } @@ -318,7 +295,7 @@ func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRe // if all region are failed to scatter, the short error might also be verbose... logutil.ShortError(err), logutil.AbbreviatedArray("failed-regions", newRegionSet, func(i interface{}) []string { - m := i.(map[uint64]*RegionInfo) + m := i.(map[uint64]*split.RegionInfo) result := make([]string, len(m)) for id := range m { result = append(result, strconv.Itoa(int(id))) @@ -331,7 +308,7 @@ func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRe } // ScatterRegions scatter the regions. -func (rs *RegionSplitter) ScatterRegions(ctx context.Context, newRegions []*RegionInfo) { +func (rs *RegionSplitter) ScatterRegions(ctx context.Context, newRegions []*split.RegionInfo) { rs.ScatterRegionsWithBackoffer( ctx, newRegions, // backoff about 6s, or we give up scattering this region. @@ -341,104 +318,9 @@ func (rs *RegionSplitter) ScatterRegions(ctx context.Context, newRegions []*Regi }) } -func checkRegionConsistency(startKey, endKey []byte, regions []*RegionInfo) error { - // current pd can't guarantee the consistency of returned regions - if len(regions) == 0 { - return errors.Annotatef(berrors.ErrPDBatchScanRegion, "scan region return empty result, startKey: %s, endkey: %s", - redact.Key(startKey), redact.Key(endKey)) - } - - if bytes.Compare(regions[0].Region.StartKey, startKey) > 0 { - return errors.Annotatef(berrors.ErrPDBatchScanRegion, "first region's startKey > startKey, startKey: %s, regionStartKey: %s", - redact.Key(startKey), redact.Key(regions[0].Region.StartKey)) - } else if len(regions[len(regions)-1].Region.EndKey) != 0 && bytes.Compare(regions[len(regions)-1].Region.EndKey, endKey) < 0 { - return errors.Annotatef(berrors.ErrPDBatchScanRegion, "last region's endKey < startKey, startKey: %s, regionStartKey: %s", - redact.Key(endKey), redact.Key(regions[len(regions)-1].Region.EndKey)) - } - - cur := regions[0] - for _, r := range regions[1:] { - if !bytes.Equal(cur.Region.EndKey, r.Region.StartKey) { - return errors.Annotatef(berrors.ErrPDBatchScanRegion, "region endKey not equal to next region startKey, endKey: %s, startKey: %s", - redact.Key(cur.Region.EndKey), redact.Key(r.Region.StartKey)) - } - cur = r - } - - return nil -} - -// PaginateScanRegion scan regions with a limit pagination and -// return all regions at once. -// It reduces max gRPC message size. -func PaginateScanRegion( - ctx context.Context, client SplitClient, startKey, endKey []byte, limit int, -) ([]*RegionInfo, error) { - if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 { - return nil, errors.Annotatef(berrors.ErrRestoreInvalidRange, "startKey >= endKey, startKey: %s, endkey: %s", - hex.EncodeToString(startKey), hex.EncodeToString(endKey)) - } - - var regions []*RegionInfo - err := utildb.WithRetry(ctx, func() error { - regions = []*RegionInfo{} - scanStartKey := startKey - for { - batch, err := client.ScanRegions(ctx, scanStartKey, endKey, limit) - if err != nil { - return errors.Trace(err) - } - regions = append(regions, batch...) - if len(batch) < limit { - // No more region - break - } - scanStartKey = batch[len(batch)-1].Region.GetEndKey() - if len(scanStartKey) == 0 || - (len(endKey) > 0 && bytes.Compare(scanStartKey, endKey) >= 0) { - // All key space have scanned - break - } - } - if err := checkRegionConsistency(startKey, endKey, regions); err != nil { - log.Warn("failed to scan region, retrying", logutil.ShortError(err)) - return err - } - return nil - }, newScanRegionBackoffer()) - - return regions, err -} - -type scanRegionBackoffer struct { - attempt int -} - -func newScanRegionBackoffer() utildb.Backoffer { - return &scanRegionBackoffer{ - attempt: 3, - } -} - -// NextBackoff returns a duration to wait before retrying again -func (b *scanRegionBackoffer) NextBackoff(err error) time.Duration { - if berrors.ErrPDBatchScanRegion.Equal(err) { - // 500ms * 3 could be enough for splitting remain regions in the hole. - b.attempt-- - return 500 * time.Millisecond - } - b.attempt = 0 - return 0 -} - -// Attempt returns the remain attempt times -func (b *scanRegionBackoffer) Attempt() int { - return b.attempt -} - // getSplitKeys checks if the regions should be split by the end key of // the ranges, groups the split keys by region id. -func getSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*RegionInfo) map[uint64][][]byte { +func getSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*split.RegionInfo) map[uint64][][]byte { splitKeyMap := make(map[uint64][][]byte) checkKeys := make([][]byte, 0) for _, rg := range ranges { @@ -461,7 +343,7 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*R } // NeedSplit checks whether a key is necessary to split, if true returns the split region. -func NeedSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { +func NeedSplit(splitKey []byte, regions []*split.RegionInfo) *split.RegionInfo { // If splitKey is the max key. if len(splitKey) == 0 { return nil diff --git a/br/pkg/restore/split/region.go b/br/pkg/restore/split/region.go new file mode 100644 index 0000000000000..2d8a72f75e072 --- /dev/null +++ b/br/pkg/restore/split/region.go @@ -0,0 +1,21 @@ +package split + +import ( + "bytes" + + "github.com/pingcap/kvproto/pkg/metapb" +) + +// RegionInfo includes a region and the leader of the region. +type RegionInfo struct { + Region *metapb.Region + Leader *metapb.Peer +} + +// ContainsInterior returns whether the region contains the given key, and also +// that the key does not fall on the boundary (start key) of the region. +func (region *RegionInfo) ContainsInterior(key []byte) bool { + return bytes.Compare(key, region.Region.GetStartKey()) > 0 && + (len(region.Region.GetEndKey()) == 0 || + bytes.Compare(key, region.Region.GetEndKey()) < 0) +} diff --git a/br/pkg/restore/split/split.go b/br/pkg/restore/split/split.go new file mode 100644 index 0000000000000..487f72c52c611 --- /dev/null +++ b/br/pkg/restore/split/split.go @@ -0,0 +1,25 @@ +package split + +import "time" + +// Constants for split retry machinery. +const ( + SplitRetryTimes = 32 + SplitRetryInterval = 50 * time.Millisecond + SplitMaxRetryInterval = time.Second + + SplitCheckMaxRetryTimes = 64 + SplitCheckInterval = 8 * time.Millisecond + SplitMaxCheckInterval = time.Second + + ScatterWaitMaxRetryTimes = 64 + ScatterWaitInterval = 50 * time.Millisecond + ScatterMaxWaitInterval = time.Second + ScatterWaitUpperInterval = 180 * time.Second + + ScanRegionPaginationLimit = 128 + + RejectStoreCheckRetryTimes = 64 + RejectStoreCheckInterval = 100 * time.Millisecond + RejectStoreMaxCheckInterval = 2 * time.Second +) diff --git a/br/pkg/restore/split/split_client.go b/br/pkg/restore/split/split_client.go new file mode 100644 index 0000000000000..ec105d1f64f5e --- /dev/null +++ b/br/pkg/restore/split/split_client.go @@ -0,0 +1,668 @@ +package split + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/errorpb" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/kvproto/pkg/tikvpb" + "github.com/pingcap/log" + pd "github.com/tikv/pd/client" + "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/placement" + "go.uber.org/multierr" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/pingcap/tidb/br/pkg/conn" + errors2 "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/httputil" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/redact" + "github.com/pingcap/tidb/br/pkg/utils/utildb" +) + +// SplitClient is an external client used by RegionSplitter. +type SplitClient interface { + // GetStore gets a store by a store id. + GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) + // GetRegion gets a region which includes a specified key. + GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) + // GetRegionByID gets a region by a region id. + GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) + // SplitRegion splits a region from a key, if key is not included in the region, it will return nil. + // note: the key should not be encoded + SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) + // BatchSplitRegions splits a region from a batch of keys. + // note: the keys should not be encoded + BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error) + // BatchSplitRegionsWithOrigin splits a region from a batch of keys and return the original region and split new regions + BatchSplitRegionsWithOrigin(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) (*RegionInfo, []*RegionInfo, error) + // ScatterRegion scatters a specified region. + ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error + // GetOperator gets the status of operator of the specified region. + GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) + // ScanRegion gets a list of regions, starts from the region that contains key. + // Limit limits the maximum number of regions returned. + ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) + // GetPlacementRule loads a placement rule from PD. + GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) + // SetPlacementRule insert or update a placement rule to PD. + SetPlacementRule(ctx context.Context, rule placement.Rule) error + // DeletePlacementRule removes a placement rule from PD. + DeletePlacementRule(ctx context.Context, groupID, ruleID string) error + // SetStoreLabel add or update specified label of stores. If labelValue + // is empty, it clears the label. + SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error +} + +func checkRegionConsistency(startKey, endKey []byte, regions []*RegionInfo) error { + // current pd can't guarantee the consistency of returned regions + if len(regions) == 0 { + return errors.Annotatef(errors2.ErrPDBatchScanRegion, "scan region return empty result, startKey: %s, endkey: %s", + redact.Key(startKey), redact.Key(endKey)) + } + + if bytes.Compare(regions[0].Region.StartKey, startKey) > 0 { + return errors.Annotatef(errors2.ErrPDBatchScanRegion, "first region's startKey > startKey, startKey: %s, regionStartKey: %s", + redact.Key(startKey), redact.Key(regions[0].Region.StartKey)) + } else if len(regions[len(regions)-1].Region.EndKey) != 0 && bytes.Compare(regions[len(regions)-1].Region.EndKey, endKey) < 0 { + return errors.Annotatef(errors2.ErrPDBatchScanRegion, "last region's endKey < startKey, startKey: %s, regionStartKey: %s", + redact.Key(endKey), redact.Key(regions[len(regions)-1].Region.EndKey)) + } + + cur := regions[0] + for _, r := range regions[1:] { + if !bytes.Equal(cur.Region.EndKey, r.Region.StartKey) { + return errors.Annotatef(errors2.ErrPDBatchScanRegion, "region endKey not equal to next region startKey, endKey: %s, startKey: %s", + redact.Key(cur.Region.EndKey), redact.Key(r.Region.StartKey)) + } + cur = r + } + + return nil +} + +// PaginateScanRegion scan regions with a limit pagination and +// return all regions at once. +// It reduces max gRPC message size. +func PaginateScanRegion( + ctx context.Context, client SplitClient, startKey, endKey []byte, limit int, +) ([]*RegionInfo, error) { + if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 { + return nil, errors.Annotatef(errors2.ErrRestoreInvalidRange, "startKey >= endKey, startKey: %s, endkey: %s", + hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + } + + var regions []*RegionInfo + err := utildb.WithRetry(ctx, func() error { + regions = []*RegionInfo{} + scanStartKey := startKey + for { + batch, err := client.ScanRegions(ctx, scanStartKey, endKey, limit) + if err != nil { + return errors.Trace(err) + } + regions = append(regions, batch...) + if len(batch) < limit { + // No more region + break + } + scanStartKey = batch[len(batch)-1].Region.GetEndKey() + if len(scanStartKey) == 0 || + (len(endKey) > 0 && bytes.Compare(scanStartKey, endKey) >= 0) { + // All key space have scanned + break + } + } + if err := checkRegionConsistency(startKey, endKey, regions); err != nil { + log.Warn("failed to scan region, retrying", logutil.ShortError(err)) + return err + } + return nil + }, newScanRegionBackoffer()) + + return regions, err +} + +type scanRegionBackoffer struct { + attempt int +} + +func newScanRegionBackoffer() utildb.Backoffer { + return &scanRegionBackoffer{ + attempt: 3, + } +} + +// NextBackoff returns a duration to wait before retrying again +func (b *scanRegionBackoffer) NextBackoff(err error) time.Duration { + if errors2.ErrPDBatchScanRegion.Equal(err) { + // 500ms * 3 could be enough for splitting remain regions in the hole. + b.attempt-- + return 500 * time.Millisecond + } + b.attempt = 0 + return 0 +} + +// Attempt returns the remain attempt times +func (b *scanRegionBackoffer) Attempt() int { + return b.attempt +} + +const ( + splitRegionMaxRetryTime = 4 +) + +// pdClient is a wrapper of pd client, can be used by RegionSplitter. +type pdClient struct { + mu sync.Mutex + client pd.Client + tlsConf *tls.Config + storeCache map[uint64]*metapb.Store + + // FIXME when config changed during the lifetime of pdClient, + // this may mislead the scatter. + needScatterVal bool + needScatterInit sync.Once +} + +// NewSplitClient returns a client used by RegionSplitter. +func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient { + cli := &pdClient{ + client: client, + tlsConf: tlsConf, + storeCache: make(map[uint64]*metapb.Store), + } + return cli +} + +func (c *pdClient) needScatter(ctx context.Context) bool { + c.needScatterInit.Do(func() { + var err error + c.needScatterVal, err = c.checkNeedScatter(ctx) + if err != nil { + log.Warn("failed to check whether need to scatter, use permissive strategy: always scatter", logutil.ShortError(err)) + c.needScatterVal = true + } + if !c.needScatterVal { + log.Info("skipping scatter because the replica number isn't less than store count.") + } + }) + return c.needScatterVal +} + +func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.Lock() + defer c.mu.Unlock() + store, ok := c.storeCache[storeID] + if ok { + return store, nil + } + store, err := c.client.GetStore(ctx, storeID) + if err != nil { + return nil, errors.Trace(err) + } + c.storeCache[storeID] = store + return store, nil +} + +func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + region, err := c.client.GetRegion(ctx, key) + if err != nil { + return nil, errors.Trace(err) + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region.Meta, + Leader: region.Leader, + }, nil +} + +func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + region, err := c.client.GetRegionByID(ctx, regionID) + if err != nil { + return nil, errors.Trace(err) + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region.Meta, + Leader: region.Leader, + }, nil +} + +func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.Annotate(errors2.ErrRestoreNoPeer, "region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, errors.Trace(err) + } + conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + if err != nil { + return nil, errors.Trace(err) + } + defer conn.Close() + + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKey: key, + }) + if err != nil { + return nil, errors.Trace(err) + } + if resp.RegionError != nil { + log.Error("fail to split region", + logutil.Region(regionInfo.Region), + logutil.Key("key", key), + zap.Stringer("regionErr", resp.RegionError)) + return nil, errors.Annotatef(errors2.ErrRestoreSplitFailed, "err=%v", resp.RegionError) + } + + // BUG: Left is deprecated, it may be nil even if split is succeed! + // Assume the new region is the left one. + newRegion := resp.GetLeft() + if newRegion == nil { + regions := resp.GetRegions() + for _, r := range regions { + if bytes.Equal(r.GetStartKey(), regionInfo.Region.GetStartKey()) { + newRegion = r + break + } + } + } + if newRegion == nil { + return nil, errors.Annotate(errors2.ErrRestoreSplitFailed, "new region is nil") + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range newRegion.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + return &RegionInfo{ + Region: newRegion, + Leader: leader, + }, nil +} + +func splitRegionWithFailpoint( + ctx context.Context, + regionInfo *RegionInfo, + peer *metapb.Peer, + client tikvpb.TikvClient, + keys [][]byte, +) (*kvrpcpb.SplitRegionResponse, error) { + failpoint.Inject("not-leader-error", func(injectNewLeader failpoint.Value) { + log.Debug("failpoint not-leader-error injected.") + resp := &kvrpcpb.SplitRegionResponse{ + RegionError: &errorpb.Error{ + NotLeader: &errorpb.NotLeader{ + RegionId: regionInfo.Region.Id, + }, + }, + } + if injectNewLeader.(bool) { + resp.RegionError.NotLeader.Leader = regionInfo.Leader + } + failpoint.Return(resp, nil) + }) + failpoint.Inject("somewhat-retryable-error", func() { + log.Debug("failpoint somewhat-retryable-error injected.") + failpoint.Return(&kvrpcpb.SplitRegionResponse{ + RegionError: &errorpb.Error{ + ServerIsBusy: &errorpb.ServerIsBusy{}, + }, + }, nil) + }) + return client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKeys: keys, + }) +} + +func (c *pdClient) sendSplitRegionRequest( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) (*kvrpcpb.SplitRegionResponse, error) { + var splitErrors error + for i := 0; i < splitRegionMaxRetryTime; i++ { + var peer *metapb.Peer + // scanRegions may return empty Leader in https://github.com/tikv/pd/blob/v4.0.8/server/grpc_service.go#L524 + // so wee also need check Leader.Id != 0 + if regionInfo.Leader != nil && regionInfo.Leader.Id != 0 { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, multierr.Append(splitErrors, + errors.Annotatef(errors2.ErrRestoreNoPeer, "region[%d] doesn't have any peer", regionInfo.Region.GetId())) + } + peer = regionInfo.Region.Peers[0] + } + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, multierr.Append(splitErrors, err) + } + opt := grpc.WithInsecure() + if c.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) + if err != nil { + return nil, multierr.Append(splitErrors, err) + } + defer conn.Close() + client := tikvpb.NewTikvClient(conn) + resp, err := splitRegionWithFailpoint(ctx, regionInfo, peer, client, keys) + if err != nil { + return nil, multierr.Append(splitErrors, err) + } + if resp.RegionError != nil { + log.Warn("fail to split region", + logutil.Region(regionInfo.Region), + zap.Stringer("regionErr", resp.RegionError)) + splitErrors = multierr.Append(splitErrors, + errors.Annotatef(errors2.ErrRestoreSplitFailed, "split region failed: err=%v", resp.RegionError)) + if nl := resp.RegionError.NotLeader; nl != nil { + if leader := nl.GetLeader(); leader != nil { + regionInfo.Leader = leader + } else { + newRegionInfo, findLeaderErr := c.GetRegionByID(ctx, nl.RegionId) + if findLeaderErr != nil { + return nil, multierr.Append(splitErrors, findLeaderErr) + } + if !CheckRegionEpoch(newRegionInfo, regionInfo) { + return nil, multierr.Append(splitErrors, errors2.ErrKVEpochNotMatch) + } + log.Info("find new leader", zap.Uint64("new leader", newRegionInfo.Leader.Id)) + regionInfo = newRegionInfo + } + log.Info("split region meet not leader error, retrying", + zap.Int("retry times", i), + zap.Uint64("regionID", regionInfo.Region.Id), + zap.Any("new leader", regionInfo.Leader), + ) + continue + } + // TODO: we don't handle RegionNotMatch and RegionNotFound here, + // because I think we don't have enough information to retry. + // But maybe we can handle them here by some information the error itself provides. + if resp.RegionError.ServerIsBusy != nil || + resp.RegionError.StaleCommand != nil { + log.Warn("a error occurs on split region", + zap.Int("retry times", i), + zap.Uint64("regionID", regionInfo.Region.Id), + zap.String("error", resp.RegionError.Message), + zap.Any("error verbose", resp.RegionError), + ) + continue + } + return nil, errors.Trace(splitErrors) + } + return resp, nil + } + return nil, errors.Trace(splitErrors) +} + +func (c *pdClient) BatchSplitRegionsWithOrigin( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) (*RegionInfo, []*RegionInfo, error) { + resp, err := c.sendSplitRegionRequest(ctx, regionInfo, keys) + if err != nil { + return nil, nil, errors.Trace(err) + } + + regions := resp.GetRegions() + newRegionInfos := make([]*RegionInfo, 0, len(regions)) + var originRegion *RegionInfo + for _, region := range regions { + var leader *metapb.Peer + + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range region.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + // original region + if region.GetId() == regionInfo.Region.GetId() { + originRegion = &RegionInfo{ + Region: region, + Leader: leader, + } + continue + } + newRegionInfos = append(newRegionInfos, &RegionInfo{ + Region: region, + Leader: leader, + }) + } + return originRegion, newRegionInfos, nil +} + +func (c *pdClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + _, newRegions, err := c.BatchSplitRegionsWithOrigin(ctx, regionInfo, keys) + return newRegions, err +} + +func (c *pdClient) getStoreCount(ctx context.Context) (int, error) { + stores, err := conn.GetAllTiKVStores(ctx, c.client, conn.SkipTiFlash) + if err != nil { + return 0, err + } + return len(stores), err +} + +func (c *pdClient) getMaxReplica(ctx context.Context) (int, error) { + api := c.getPDAPIAddr() + configAPI := api + "/pd/api/v1/config" + req, err := http.NewRequestWithContext(ctx, "GET", configAPI, nil) + if err != nil { + return 0, errors.Trace(err) + } + res, err := httputil.NewClient(c.tlsConf).Do(req) + if err != nil { + return 0, errors.Trace(err) + } + var conf config.Config + if err := json.NewDecoder(res.Body).Decode(&conf); err != nil { + return 0, errors.Trace(err) + } + return int(conf.Replication.MaxReplicas), nil +} + +func (c *pdClient) checkNeedScatter(ctx context.Context) (bool, error) { + storeCount, err := c.getStoreCount(ctx) + if err != nil { + return false, err + } + maxReplica, err := c.getMaxReplica(ctx) + if err != nil { + return false, err + } + log.Info("checking whether need to scatter", zap.Int("store", storeCount), zap.Int("max-replica", maxReplica)) + // Skipping scatter may lead to leader unbalanced, + // currently, we skip scatter only when: + // 1. max-replica > store-count (Probably a misconfigured or playground cluster.) + // 2. store-count == 1 (No meaning for scattering.) + // We can still omit scatter when `max-replica == store-count`, if we create a BalanceLeader operator here, + // however, there isn't evidence for transform leader is much faster than scattering empty regions. + return storeCount >= maxReplica && storeCount > 1, nil +} + +func (c *pdClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + if !c.needScatter(ctx) { + return nil + } + return c.client.ScatterRegion(ctx, regionInfo.Region.GetId()) +} + +func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return c.client.GetOperator(ctx, regionID) +} + +func (c *pdClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + regions, err := c.client.ScanRegions(ctx, key, endKey, limit) + if err != nil { + return nil, errors.Trace(err) + } + regionInfos := make([]*RegionInfo, 0, len(regions)) + for _, region := range regions { + regionInfos = append(regionInfos, &RegionInfo{ + Region: region.Meta, + Leader: region.Leader, + }) + } + return regionInfos, nil +} + +func (c *pdClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) { + var rule placement.Rule + addr := c.getPDAPIAddr() + if addr == "" { + return rule, errors.Annotate(errors2.ErrRestoreSplitFailed, "failed to add stores labels: no leader") + } + req, err := http.NewRequestWithContext(ctx, "GET", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + if err != nil { + return rule, errors.Trace(err) + } + res, err := httputil.NewClient(c.tlsConf).Do(req) + if err != nil { + return rule, errors.Trace(err) + } + b, err := io.ReadAll(res.Body) + if err != nil { + return rule, errors.Trace(err) + } + res.Body.Close() + err = json.Unmarshal(b, &rule) + if err != nil { + return rule, errors.Trace(err) + } + return rule, nil +} + +func (c *pdClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.Annotate(errors2.ErrPDLeaderNotFound, "failed to add stores labels") + } + m, _ := json.Marshal(rule) + req, err := http.NewRequestWithContext(ctx, "POST", addr+path.Join("/pd/api/v1/config/rule"), bytes.NewReader(m)) + if err != nil { + return errors.Trace(err) + } + res, err := httputil.NewClient(c.tlsConf).Do(req) + if err != nil { + return errors.Trace(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.Annotate(errors2.ErrPDLeaderNotFound, "failed to add stores labels") + } + req, err := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + if err != nil { + return errors.Trace(err) + } + res, err := httputil.NewClient(c.tlsConf).Do(req) + if err != nil { + return errors.Trace(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) SetStoresLabel( + ctx context.Context, stores []uint64, labelKey, labelValue string, +) error { + b := []byte(fmt.Sprintf(`{"%s": "%s"}`, labelKey, labelValue)) + addr := c.getPDAPIAddr() + if addr == "" { + return errors.Annotate(errors2.ErrPDLeaderNotFound, "failed to add stores labels") + } + httpCli := httputil.NewClient(c.tlsConf) + for _, id := range stores { + req, err := http.NewRequestWithContext( + ctx, "POST", + addr+path.Join("/pd/api/v1/store", strconv.FormatUint(id, 10), "label"), + bytes.NewReader(b), + ) + if err != nil { + return errors.Trace(err) + } + res, err := httpCli.Do(req) + if err != nil { + return errors.Trace(err) + } + err = res.Body.Close() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (c *pdClient) getPDAPIAddr() string { + addr := c.client.GetLeaderAddr() + if addr != "" && !strings.HasPrefix(addr, "http") { + addr = "http://" + addr + } + return strings.TrimRight(addr, "/") +} + +func CheckRegionEpoch(new, old *RegionInfo) bool { + return new.Region.GetId() == old.Region.GetId() && + new.Region.GetRegionEpoch().GetVersion() == old.Region.GetRegionEpoch().GetVersion() && + new.Region.GetRegionEpoch().GetConfVer() == old.Region.GetRegionEpoch().GetConfVer() +} diff --git a/br/pkg/restore/split_client.go b/br/pkg/restore/split_client.go index e5105a56dc603..fd38210125942 100755 --- a/br/pkg/restore/split_client.go +++ b/br/pkg/restore/split_client.go @@ -3,574 +3,12 @@ package restore import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net/http" - "path" - "strconv" "strings" - "sync" "time" - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/errorpb" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/kvproto/pkg/tikvpb" - "github.com/pingcap/log" - "github.com/pingcap/tidb/br/pkg/conn" - berrors "github.com/pingcap/tidb/br/pkg/errors" - "github.com/pingcap/tidb/br/pkg/httputil" - "github.com/pingcap/tidb/br/pkg/logutil" - pd "github.com/tikv/pd/client" - "github.com/tikv/pd/server/config" - "github.com/tikv/pd/server/schedule/placement" - "go.uber.org/multierr" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" ) -const ( - splitRegionMaxRetryTime = 4 -) - -// SplitClient is an external client used by RegionSplitter. -type SplitClient interface { - // GetStore gets a store by a store id. - GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) - // GetRegion gets a region which includes a specified key. - GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) - // GetRegionByID gets a region by a region id. - GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) - // SplitRegion splits a region from a key, if key is not included in the region, it will return nil. - // note: the key should not be encoded - SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) - // BatchSplitRegions splits a region from a batch of keys. - // note: the keys should not be encoded - BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error) - // BatchSplitRegionsWithOrigin splits a region from a batch of keys and return the original region and split new regions - BatchSplitRegionsWithOrigin(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) (*RegionInfo, []*RegionInfo, error) - // ScatterRegion scatters a specified region. - ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error - // GetOperator gets the status of operator of the specified region. - GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) - // ScanRegion gets a list of regions, starts from the region that contains key. - // Limit limits the maximum number of regions returned. - ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) - // GetPlacementRule loads a placement rule from PD. - GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) - // SetPlacementRule insert or update a placement rule to PD. - SetPlacementRule(ctx context.Context, rule placement.Rule) error - // DeletePlacementRule removes a placement rule from PD. - DeletePlacementRule(ctx context.Context, groupID, ruleID string) error - // SetStoreLabel add or update specified label of stores. If labelValue - // is empty, it clears the label. - SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error -} - -// pdClient is a wrapper of pd client, can be used by RegionSplitter. -type pdClient struct { - mu sync.Mutex - client pd.Client - tlsConf *tls.Config - storeCache map[uint64]*metapb.Store - - // FIXME when config changed during the lifetime of pdClient, - // this may mislead the scatter. - needScatterVal bool - needScatterInit sync.Once -} - -// NewSplitClient returns a client used by RegionSplitter. -func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient { - cli := &pdClient{ - client: client, - tlsConf: tlsConf, - storeCache: make(map[uint64]*metapb.Store), - } - return cli -} - -func (c *pdClient) needScatter(ctx context.Context) bool { - c.needScatterInit.Do(func() { - var err error - c.needScatterVal, err = c.checkNeedScatter(ctx) - if err != nil { - log.Warn("failed to check whether need to scatter, use permissive strategy: always scatter", logutil.ShortError(err)) - c.needScatterVal = true - } - if !c.needScatterVal { - log.Info("skipping scatter because the replica number isn't less than store count.") - } - }) - return c.needScatterVal -} - -func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { - c.mu.Lock() - defer c.mu.Unlock() - store, ok := c.storeCache[storeID] - if ok { - return store, nil - } - store, err := c.client.GetStore(ctx, storeID) - if err != nil { - return nil, errors.Trace(err) - } - c.storeCache[storeID] = store - return store, nil -} - -func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { - region, err := c.client.GetRegion(ctx, key) - if err != nil { - return nil, errors.Trace(err) - } - if region == nil { - return nil, nil - } - return &RegionInfo{ - Region: region.Meta, - Leader: region.Leader, - }, nil -} - -func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { - region, err := c.client.GetRegionByID(ctx, regionID) - if err != nil { - return nil, errors.Trace(err) - } - if region == nil { - return nil, nil - } - return &RegionInfo{ - Region: region.Meta, - Leader: region.Leader, - }, nil -} - -func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { - var peer *metapb.Peer - if regionInfo.Leader != nil { - peer = regionInfo.Leader - } else { - if len(regionInfo.Region.Peers) == 0 { - return nil, errors.Annotate(berrors.ErrRestoreNoPeer, "region does not have peer") - } - peer = regionInfo.Region.Peers[0] - } - storeID := peer.GetStoreId() - store, err := c.GetStore(ctx, storeID) - if err != nil { - return nil, errors.Trace(err) - } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) - if err != nil { - return nil, errors.Trace(err) - } - defer conn.Close() - - client := tikvpb.NewTikvClient(conn) - resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ - Context: &kvrpcpb.Context{ - RegionId: regionInfo.Region.Id, - RegionEpoch: regionInfo.Region.RegionEpoch, - Peer: peer, - }, - SplitKey: key, - }) - if err != nil { - return nil, errors.Trace(err) - } - if resp.RegionError != nil { - log.Error("fail to split region", - logutil.Region(regionInfo.Region), - logutil.Key("key", key), - zap.Stringer("regionErr", resp.RegionError)) - return nil, errors.Annotatef(berrors.ErrRestoreSplitFailed, "err=%v", resp.RegionError) - } - - // BUG: Left is deprecated, it may be nil even if split is succeed! - // Assume the new region is the left one. - newRegion := resp.GetLeft() - if newRegion == nil { - regions := resp.GetRegions() - for _, r := range regions { - if bytes.Equal(r.GetStartKey(), regionInfo.Region.GetStartKey()) { - newRegion = r - break - } - } - } - if newRegion == nil { - return nil, errors.Annotate(berrors.ErrRestoreSplitFailed, "new region is nil") - } - var leader *metapb.Peer - // Assume the leaders will be at the same store. - if regionInfo.Leader != nil { - for _, p := range newRegion.GetPeers() { - if p.GetStoreId() == regionInfo.Leader.GetStoreId() { - leader = p - break - } - } - } - return &RegionInfo{ - Region: newRegion, - Leader: leader, - }, nil -} - -func splitRegionWithFailpoint( - ctx context.Context, - regionInfo *RegionInfo, - peer *metapb.Peer, - client tikvpb.TikvClient, - keys [][]byte, -) (*kvrpcpb.SplitRegionResponse, error) { - failpoint.Inject("not-leader-error", func(injectNewLeader failpoint.Value) { - log.Debug("failpoint not-leader-error injected.") - resp := &kvrpcpb.SplitRegionResponse{ - RegionError: &errorpb.Error{ - NotLeader: &errorpb.NotLeader{ - RegionId: regionInfo.Region.Id, - }, - }, - } - if injectNewLeader.(bool) { - resp.RegionError.NotLeader.Leader = regionInfo.Leader - } - failpoint.Return(resp, nil) - }) - failpoint.Inject("somewhat-retryable-error", func() { - log.Debug("failpoint somewhat-retryable-error injected.") - failpoint.Return(&kvrpcpb.SplitRegionResponse{ - RegionError: &errorpb.Error{ - ServerIsBusy: &errorpb.ServerIsBusy{}, - }, - }, nil) - }) - return client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ - Context: &kvrpcpb.Context{ - RegionId: regionInfo.Region.Id, - RegionEpoch: regionInfo.Region.RegionEpoch, - Peer: peer, - }, - SplitKeys: keys, - }) -} - -func (c *pdClient) sendSplitRegionRequest( - ctx context.Context, regionInfo *RegionInfo, keys [][]byte, -) (*kvrpcpb.SplitRegionResponse, error) { - var splitErrors error - for i := 0; i < splitRegionMaxRetryTime; i++ { - var peer *metapb.Peer - // scanRegions may return empty Leader in https://github.com/tikv/pd/blob/v4.0.8/server/grpc_service.go#L524 - // so wee also need check Leader.Id != 0 - if regionInfo.Leader != nil && regionInfo.Leader.Id != 0 { - peer = regionInfo.Leader - } else { - if len(regionInfo.Region.Peers) == 0 { - return nil, multierr.Append(splitErrors, - errors.Annotatef(berrors.ErrRestoreNoPeer, "region[%d] doesn't have any peer", regionInfo.Region.GetId())) - } - peer = regionInfo.Region.Peers[0] - } - storeID := peer.GetStoreId() - store, err := c.GetStore(ctx, storeID) - if err != nil { - return nil, multierr.Append(splitErrors, err) - } - opt := grpc.WithInsecure() - if c.tlsConf != nil { - opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) - } - conn, err := grpc.Dial(store.GetAddress(), opt) - if err != nil { - return nil, multierr.Append(splitErrors, err) - } - defer conn.Close() - client := tikvpb.NewTikvClient(conn) - resp, err := splitRegionWithFailpoint(ctx, regionInfo, peer, client, keys) - if err != nil { - return nil, multierr.Append(splitErrors, err) - } - if resp.RegionError != nil { - log.Warn("fail to split region", - logutil.Region(regionInfo.Region), - zap.Stringer("regionErr", resp.RegionError)) - splitErrors = multierr.Append(splitErrors, - errors.Annotatef(berrors.ErrRestoreSplitFailed, "split region failed: err=%v", resp.RegionError)) - if nl := resp.RegionError.NotLeader; nl != nil { - if leader := nl.GetLeader(); leader != nil { - regionInfo.Leader = leader - } else { - newRegionInfo, findLeaderErr := c.GetRegionByID(ctx, nl.RegionId) - if findLeaderErr != nil { - return nil, multierr.Append(splitErrors, findLeaderErr) - } - if !checkRegionEpoch(newRegionInfo, regionInfo) { - return nil, multierr.Append(splitErrors, berrors.ErrKVEpochNotMatch) - } - log.Info("find new leader", zap.Uint64("new leader", newRegionInfo.Leader.Id)) - regionInfo = newRegionInfo - } - log.Info("split region meet not leader error, retrying", - zap.Int("retry times", i), - zap.Uint64("regionID", regionInfo.Region.Id), - zap.Any("new leader", regionInfo.Leader), - ) - continue - } - // TODO: we don't handle RegionNotMatch and RegionNotFound here, - // because I think we don't have enough information to retry. - // But maybe we can handle them here by some information the error itself provides. - if resp.RegionError.ServerIsBusy != nil || - resp.RegionError.StaleCommand != nil { - log.Warn("a error occurs on split region", - zap.Int("retry times", i), - zap.Uint64("regionID", regionInfo.Region.Id), - zap.String("error", resp.RegionError.Message), - zap.Any("error verbose", resp.RegionError), - ) - continue - } - return nil, errors.Trace(splitErrors) - } - return resp, nil - } - return nil, errors.Trace(splitErrors) -} - -func (c *pdClient) BatchSplitRegionsWithOrigin( - ctx context.Context, regionInfo *RegionInfo, keys [][]byte, -) (*RegionInfo, []*RegionInfo, error) { - resp, err := c.sendSplitRegionRequest(ctx, regionInfo, keys) - if err != nil { - return nil, nil, errors.Trace(err) - } - - regions := resp.GetRegions() - newRegionInfos := make([]*RegionInfo, 0, len(regions)) - var originRegion *RegionInfo - for _, region := range regions { - var leader *metapb.Peer - - // Assume the leaders will be at the same store. - if regionInfo.Leader != nil { - for _, p := range region.GetPeers() { - if p.GetStoreId() == regionInfo.Leader.GetStoreId() { - leader = p - break - } - } - } - // original region - if region.GetId() == regionInfo.Region.GetId() { - originRegion = &RegionInfo{ - Region: region, - Leader: leader, - } - continue - } - newRegionInfos = append(newRegionInfos, &RegionInfo{ - Region: region, - Leader: leader, - }) - } - return originRegion, newRegionInfos, nil -} - -func (c *pdClient) BatchSplitRegions( - ctx context.Context, regionInfo *RegionInfo, keys [][]byte, -) ([]*RegionInfo, error) { - _, newRegions, err := c.BatchSplitRegionsWithOrigin(ctx, regionInfo, keys) - return newRegions, err -} - -func (c *pdClient) getStoreCount(ctx context.Context) (int, error) { - stores, err := conn.GetAllTiKVStores(ctx, c.client, conn.SkipTiFlash) - if err != nil { - return 0, err - } - return len(stores), err -} - -func (c *pdClient) getMaxReplica(ctx context.Context) (int, error) { - api := c.getPDAPIAddr() - configAPI := api + "/pd/api/v1/config" - req, err := http.NewRequestWithContext(ctx, "GET", configAPI, nil) - if err != nil { - return 0, errors.Trace(err) - } - res, err := httputil.NewClient(c.tlsConf).Do(req) - if err != nil { - return 0, errors.Trace(err) - } - var conf config.Config - if err := json.NewDecoder(res.Body).Decode(&conf); err != nil { - return 0, errors.Trace(err) - } - return int(conf.Replication.MaxReplicas), nil -} - -func (c *pdClient) checkNeedScatter(ctx context.Context) (bool, error) { - storeCount, err := c.getStoreCount(ctx) - if err != nil { - return false, err - } - maxReplica, err := c.getMaxReplica(ctx) - if err != nil { - return false, err - } - log.Info("checking whether need to scatter", zap.Int("store", storeCount), zap.Int("max-replica", maxReplica)) - // Skipping scatter may lead to leader unbalanced, - // currently, we skip scatter only when: - // 1. max-replica > store-count (Probably a misconfigured or playground cluster.) - // 2. store-count == 1 (No meaning for scattering.) - // We can still omit scatter when `max-replica == store-count`, if we create a BalanceLeader operator here, - // however, there isn't evidence for transform leader is much faster than scattering empty regions. - return storeCount >= maxReplica && storeCount > 1, nil -} - -func (c *pdClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { - if !c.needScatter(ctx) { - return nil - } - return c.client.ScatterRegion(ctx, regionInfo.Region.GetId()) -} - -func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { - return c.client.GetOperator(ctx, regionID) -} - -func (c *pdClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { - regions, err := c.client.ScanRegions(ctx, key, endKey, limit) - if err != nil { - return nil, errors.Trace(err) - } - regionInfos := make([]*RegionInfo, 0, len(regions)) - for _, region := range regions { - regionInfos = append(regionInfos, &RegionInfo{ - Region: region.Meta, - Leader: region.Leader, - }) - } - return regionInfos, nil -} - -func (c *pdClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) { - var rule placement.Rule - addr := c.getPDAPIAddr() - if addr == "" { - return rule, errors.Annotate(berrors.ErrRestoreSplitFailed, "failed to add stores labels: no leader") - } - req, err := http.NewRequestWithContext(ctx, "GET", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) - if err != nil { - return rule, errors.Trace(err) - } - res, err := httputil.NewClient(c.tlsConf).Do(req) - if err != nil { - return rule, errors.Trace(err) - } - b, err := io.ReadAll(res.Body) - if err != nil { - return rule, errors.Trace(err) - } - res.Body.Close() - err = json.Unmarshal(b, &rule) - if err != nil { - return rule, errors.Trace(err) - } - return rule, nil -} - -func (c *pdClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { - addr := c.getPDAPIAddr() - if addr == "" { - return errors.Annotate(berrors.ErrPDLeaderNotFound, "failed to add stores labels") - } - m, _ := json.Marshal(rule) - req, err := http.NewRequestWithContext(ctx, "POST", addr+path.Join("/pd/api/v1/config/rule"), bytes.NewReader(m)) - if err != nil { - return errors.Trace(err) - } - res, err := httputil.NewClient(c.tlsConf).Do(req) - if err != nil { - return errors.Trace(err) - } - return errors.Trace(res.Body.Close()) -} - -func (c *pdClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { - addr := c.getPDAPIAddr() - if addr == "" { - return errors.Annotate(berrors.ErrPDLeaderNotFound, "failed to add stores labels") - } - req, err := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) - if err != nil { - return errors.Trace(err) - } - res, err := httputil.NewClient(c.tlsConf).Do(req) - if err != nil { - return errors.Trace(err) - } - return errors.Trace(res.Body.Close()) -} - -func (c *pdClient) SetStoresLabel( - ctx context.Context, stores []uint64, labelKey, labelValue string, -) error { - b := []byte(fmt.Sprintf(`{"%s": "%s"}`, labelKey, labelValue)) - addr := c.getPDAPIAddr() - if addr == "" { - return errors.Annotate(berrors.ErrPDLeaderNotFound, "failed to add stores labels") - } - httpCli := httputil.NewClient(c.tlsConf) - for _, id := range stores { - req, err := http.NewRequestWithContext( - ctx, "POST", - addr+path.Join("/pd/api/v1/store", strconv.FormatUint(id, 10), "label"), - bytes.NewReader(b), - ) - if err != nil { - return errors.Trace(err) - } - res, err := httpCli.Do(req) - if err != nil { - return errors.Trace(err) - } - err = res.Body.Close() - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -func (c *pdClient) getPDAPIAddr() string { - addr := c.client.GetLeaderAddr() - if addr != "" && !strings.HasPrefix(addr, "http") { - addr = "http://" + addr - } - return strings.TrimRight(addr, "/") -} - -func checkRegionEpoch(new, old *RegionInfo) bool { - return new.Region.GetId() == old.Region.GetId() && - new.Region.GetRegionEpoch().GetVersion() == old.Region.GetRegionEpoch().GetVersion() && - new.Region.GetRegionEpoch().GetConfVer() == old.Region.GetRegionEpoch().GetConfVer() -} - // exponentialBackoffer trivially retry any errors it meets. // It's useful when the caller has handled the errors but // only want to a more semantic backoff implementation. diff --git a/br/pkg/restore/split_test.go b/br/pkg/restore/split_test.go index a7b02e9e4e29d..896e8779c6438 100644 --- a/br/pkg/restore/split_test.go +++ b/br/pkg/restore/split_test.go @@ -21,6 +21,7 @@ import ( "google.golang.org/grpc/status" "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/utils/utildb" "github.com/pingcap/tidb/util/codec" @@ -29,17 +30,17 @@ import ( type TestClient struct { mu sync.RWMutex stores map[uint64]*metapb.Store - regions map[uint64]*restore.RegionInfo + regions map[uint64]*split.RegionInfo regionsInfo *core.RegionsInfo // For now it's only used in ScanRegions nextRegionID uint64 - injectInScatter func(*restore.RegionInfo) error + injectInScatter func(*split.RegionInfo) error scattered map[uint64]bool } func NewTestClient( stores map[uint64]*metapb.Store, - regions map[uint64]*restore.RegionInfo, + regions map[uint64]*split.RegionInfo, nextRegionID uint64, ) *TestClient { regionsInfo := core.NewRegionsInfo() @@ -52,11 +53,11 @@ func NewTestClient( regionsInfo: regionsInfo, nextRegionID: nextRegionID, scattered: map[uint64]bool{}, - injectInScatter: func(*restore.RegionInfo) error { return nil }, + injectInScatter: func(*split.RegionInfo) error { return nil }, } } -func (c *TestClient) GetAllRegions() map[uint64]*restore.RegionInfo { +func (c *TestClient) GetAllRegions() map[uint64]*split.RegionInfo { c.mu.RLock() defer c.mu.RUnlock() return c.regions @@ -72,7 +73,7 @@ func (c *TestClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Stor return store, nil } -func (c *TestClient) GetRegion(ctx context.Context, key []byte) (*restore.RegionInfo, error) { +func (c *TestClient) GetRegion(ctx context.Context, key []byte) (*split.RegionInfo, error) { c.mu.RLock() defer c.mu.RUnlock() for _, region := range c.regions { @@ -84,7 +85,7 @@ func (c *TestClient) GetRegion(ctx context.Context, key []byte) (*restore.Region return nil, errors.Errorf("region not found: key=%s", string(key)) } -func (c *TestClient) GetRegionByID(ctx context.Context, regionID uint64) (*restore.RegionInfo, error) { +func (c *TestClient) GetRegionByID(ctx context.Context, regionID uint64) (*split.RegionInfo, error) { c.mu.RLock() defer c.mu.RUnlock() region, ok := c.regions[regionID] @@ -96,12 +97,12 @@ func (c *TestClient) GetRegionByID(ctx context.Context, regionID uint64) (*resto func (c *TestClient) SplitRegion( ctx context.Context, - regionInfo *restore.RegionInfo, + regionInfo *split.RegionInfo, key []byte, -) (*restore.RegionInfo, error) { +) (*split.RegionInfo, error) { c.mu.Lock() defer c.mu.Unlock() - var target *restore.RegionInfo + var target *split.RegionInfo splitKey := codec.EncodeBytes([]byte{}, key) for _, region := range c.regions { if bytes.Compare(splitKey, region.Region.StartKey) >= 0 && @@ -112,7 +113,7 @@ func (c *TestClient) SplitRegion( if target == nil { return nil, errors.Errorf("region not found: key=%s", string(key)) } - newRegion := &restore.RegionInfo{ + newRegion := &split.RegionInfo{ Region: &metapb.Region{ Peers: target.Region.Peers, Id: c.nextRegionID, @@ -128,14 +129,14 @@ func (c *TestClient) SplitRegion( } func (c *TestClient) BatchSplitRegionsWithOrigin( - ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte, -) (*restore.RegionInfo, []*restore.RegionInfo, error) { + ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte, +) (*split.RegionInfo, []*split.RegionInfo, error) { c.mu.Lock() defer c.mu.Unlock() - newRegions := make([]*restore.RegionInfo, 0) - var region *restore.RegionInfo + newRegions := make([]*split.RegionInfo, 0) + var region *split.RegionInfo for _, key := range keys { - var target *restore.RegionInfo + var target *split.RegionInfo splitKey := codec.EncodeBytes([]byte{}, key) for _, region := range c.regions { if region.ContainsInterior(splitKey) { @@ -145,7 +146,7 @@ func (c *TestClient) BatchSplitRegionsWithOrigin( if target == nil { continue } - newRegion := &restore.RegionInfo{ + newRegion := &split.RegionInfo{ Region: &metapb.Region{ Peers: target.Region.Peers, Id: c.nextRegionID, @@ -164,13 +165,13 @@ func (c *TestClient) BatchSplitRegionsWithOrigin( } func (c *TestClient) BatchSplitRegions( - ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte, -) ([]*restore.RegionInfo, error) { + ctx context.Context, regionInfo *split.RegionInfo, keys [][]byte, +) ([]*split.RegionInfo, error) { _, newRegions, err := c.BatchSplitRegionsWithOrigin(ctx, regionInfo, keys) return newRegions, err } -func (c *TestClient) ScatterRegion(ctx context.Context, regionInfo *restore.RegionInfo) error { +func (c *TestClient) ScatterRegion(ctx context.Context, regionInfo *split.RegionInfo) error { return c.injectInScatter(regionInfo) } @@ -180,11 +181,11 @@ func (c *TestClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.Ge }, nil } -func (c *TestClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*restore.RegionInfo, error) { +func (c *TestClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*split.RegionInfo, error) { infos := c.regionsInfo.ScanRange(key, endKey, limit) - regions := make([]*restore.RegionInfo, 0, len(infos)) + regions := make([]*split.RegionInfo, 0, len(infos)) for _, info := range infos { - regions = append(regions, &restore.RegionInfo{ + regions = append(regions, &split.RegionInfo{ Region: info.GetMeta(), Leader: info.GetLeader(), }) @@ -256,12 +257,12 @@ func TestScatterFinishInTime(t *testing.T) { t.Fail() } - regionInfos := make([]*restore.RegionInfo, 0, len(regions)) + regionInfos := make([]*split.RegionInfo, 0, len(regions)) for _, info := range regions { regionInfos = append(regionInfos, info) } failed := map[uint64]int{} - client.injectInScatter = func(r *restore.RegionInfo) error { + client.injectInScatter = func(r *split.RegionInfo) error { failed[r.Region.Id]++ if failed[r.Region.Id] > 7 { return nil @@ -301,13 +302,13 @@ func TestSplitAndScatter(t *testing.T) { t.Log("get wrong result") t.Fail() } - regionInfos := make([]*restore.RegionInfo, 0, len(regions)) + regionInfos := make([]*split.RegionInfo, 0, len(regions)) for _, info := range regions { regionInfos = append(regionInfos, info) } scattered := map[uint64]bool{} const alwaysFailedRegionID = 1 - client.injectInScatter = func(regionInfo *restore.RegionInfo) error { + client.injectInScatter = func(regionInfo *split.RegionInfo) error { if _, ok := scattered[regionInfo.Region.Id]; !ok || regionInfo.Region.Id == alwaysFailedRegionID { scattered[regionInfo.Region.Id] = false return status.Errorf(codes.Unknown, "region %d is not fully replicated", regionInfo.Region.Id) @@ -334,7 +335,7 @@ func initTestClient() *TestClient { StoreId: 1, } keys := [6]string{"", "aay", "bba", "bbh", "cca", ""} - regions := make(map[uint64]*restore.RegionInfo) + regions := make(map[uint64]*split.RegionInfo) for i := uint64(1); i < 6; i++ { startKey := []byte(keys[i-1]) if len(startKey) != 0 { @@ -344,7 +345,7 @@ func initTestClient() *TestClient { if len(endKey) != 0 { endKey = codec.EncodeBytes([]byte{}, endKey) } - regions[i] = &restore.RegionInfo{ + regions[i] = &split.RegionInfo{ Region: &metapb.Region{ Id: i, Peers: peers, @@ -400,7 +401,7 @@ func initRewriteRules() *restore.RewriteRules { // expected regions after split: // [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), // [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, ) -func validateRegions(regions map[uint64]*restore.RegionInfo) bool { +func validateRegions(regions map[uint64]*split.RegionInfo) bool { keys := [...]string{"", "aay", "bba", "bbf", "bbh", "bbj", "cca", "xxe", "xxz", ""} if len(regions) != len(keys)-1 { return false @@ -427,7 +428,7 @@ FindRegion: } func (s *testRangeSuite) TestNeedSplit(c *C) { - regions := []*restore.RegionInfo{ + regions := []*split.RegionInfo{ { Region: &metapb.Region{ StartKey: codec.EncodeBytes([]byte{}, []byte("b")), diff --git a/br/pkg/restore/util.go b/br/pkg/restore/util.go index 812d87b09cec6..377a86ea68f23 100644 --- a/br/pkg/restore/util.go +++ b/br/pkg/restore/util.go @@ -15,16 +15,18 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) var ( @@ -346,7 +348,7 @@ func SplitRanges( rewriteRules *RewriteRules, updateCh glue.Progress, ) error { - splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) + splitter := NewRegionSplitter(split.NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { for range keys { diff --git a/br/pkg/restore/util_test.go b/br/pkg/restore/util_test.go index e8263c6b8462c..27dab0ac74588 100644 --- a/br/pkg/restore/util_test.go +++ b/br/pkg/restore/util_test.go @@ -10,7 +10,9 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" ) @@ -177,12 +179,12 @@ func (s *testRestoreUtilSuite) TestPaginateScanRegion(c *C) { Id: 1, } - makeRegions := func(num uint64) (map[uint64]*restore.RegionInfo, []*restore.RegionInfo) { - regionsMap := make(map[uint64]*restore.RegionInfo, num) - regions := make([]*restore.RegionInfo, 0, num) + makeRegions := func(num uint64) (map[uint64]*split.RegionInfo, []*split.RegionInfo) { + regionsMap := make(map[uint64]*split.RegionInfo, num) + regions := make([]*split.RegionInfo, 0, num) endKey := make([]byte, 8) for i := uint64(0); i < num-1; i++ { - ri := &restore.RegionInfo{ + ri := &split.RegionInfo{ Region: &metapb.Region{ Id: i + 1, Peers: peers, @@ -207,7 +209,7 @@ func (s *testRestoreUtilSuite) TestPaginateScanRegion(c *C) { } else { endKey = codec.EncodeBytes([]byte{}, endKey) } - ri := &restore.RegionInfo{ + ri := &split.RegionInfo{ Region: &metapb.Region{ Id: num, Peers: peers, @@ -222,53 +224,53 @@ func (s *testRestoreUtilSuite) TestPaginateScanRegion(c *C) { } ctx := context.Background() - regionMap := make(map[uint64]*restore.RegionInfo) - regions := []*restore.RegionInfo{} - batch, err := restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + regionMap := make(map[uint64]*split.RegionInfo) + regions := []*split.RegionInfo{} + batch, err := split.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) c.Assert(err, ErrorMatches, ".*scan region return empty result.*") regionMap, regions = makeRegions(1) - batch, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + batch, err = split.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions) regionMap, regions = makeRegions(2) - batch, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + batch, err = split.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions) regionMap, regions = makeRegions(3) - batch, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + batch, err = split.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions) regionMap, regions = makeRegions(8) - batch, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + batch, err = split.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions) regionMap, regions = makeRegions(8) - batch, err = restore.PaginateScanRegion( + batch, err = split.PaginateScanRegion( ctx, NewTestClient(stores, regionMap, 0), regions[1].Region.StartKey, []byte{}, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions[1:]) - batch, err = restore.PaginateScanRegion( + batch, err = split.PaginateScanRegion( ctx, NewTestClient(stores, regionMap, 0), []byte{}, regions[6].Region.EndKey, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions[:7]) - batch, err = restore.PaginateScanRegion( + batch, err = split.PaginateScanRegion( ctx, NewTestClient(stores, regionMap, 0), regions[1].Region.StartKey, regions[1].Region.EndKey, 3) c.Assert(err, IsNil) c.Assert(batch, DeepEquals, regions[1:2]) - _, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{2}, []byte{1}, 3) + _, err = split.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{2}, []byte{1}, 3) c.Assert(err, ErrorMatches, ".*startKey >= endKey.*") // make the regionMap losing some region, this will cause scan region check fails delete(regionMap, uint64(3)) - _, err = restore.PaginateScanRegion( + _, err = split.PaginateScanRegion( ctx, NewTestClient(stores, regionMap, 0), regions[1].Region.EndKey, regions[5].Region.EndKey, 3) c.Assert(err, ErrorMatches, ".*region endKey not equal to next region startKey.*") From 03ac78ae4907eed2b1bf38187c91589862e6737a Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 30 Dec 2021 13:49:38 +0800 Subject: [PATCH 6/9] final resolve repo --- br/pkg/backup/client.go | 12 +- br/pkg/backup/schema.go | 8 +- br/pkg/conn/conn.go | 62 +---- br/pkg/conn/conn_test.go | 30 +-- br/pkg/conn/util/util.go | 63 +++++ br/pkg/lightning/backend/local/duplicate.go | 16 +- br/pkg/restore/client.go | 17 +- br/pkg/restore/import.go | 4 +- br/pkg/restore/ingester.go | 6 +- br/pkg/restore/log_client.go | 4 +- br/pkg/restore/pipeline_items.go | 9 +- br/pkg/restore/split/split_client.go | 4 +- br/pkg/task/common.go | 16 +- br/pkg/utils/{ => utilpool}/worker.go | 2 +- distsql/request/request.go | 240 ++++++++++++++++++++ distsql/request_builder.go | 239 +------------------ distsql/request_builder_test.go | 14 +- executor/builder.go | 13 +- executor/distsql.go | 18 +- executor/index_merge_reader.go | 10 +- planner/core/fragment.go | 10 +- planner/core/plan_to_pb.go | 6 +- 22 files changed, 426 insertions(+), 377 deletions(-) create mode 100644 br/pkg/conn/util/util.go rename br/pkg/utils/{ => utilpool}/worker.go (99%) create mode 100644 distsql/request/request.go diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 422c1d980d596..07242490739b6 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/status" "github.com/pingcap/tidb/br/pkg/conn" + util2 "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" @@ -40,7 +41,8 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/utils/utildb" - "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" @@ -228,7 +230,7 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { ranges = ranger.FullIntRange(false) } - kvRanges, err := distsql.TableHandleRangesToKVRanges(nil, []int64{tblID}, tbl.IsCommonHandle, ranges, nil) + kvRanges, err := request.TableHandleRangesToKVRanges(nil, []int64{tblID}, tbl.IsCommonHandle, ranges, nil) if err != nil { return nil, errors.Trace(err) } @@ -238,7 +240,7 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { continue } ranges = ranger.FullRange() - idxRanges, err := distsql.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil) + idxRanges, err := request.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil) if err != nil { return nil, errors.Trace(err) } @@ -445,7 +447,7 @@ func (bc *Client) BackupRanges( } // we collect all files in a single goroutine to avoid thread safety issues. - workerPool := utils.NewWorkerPool(concurrency, "Ranges") + workerPool := utilpool.NewWorkerPool(concurrency, "Ranges") eg, ectx := errgroup.WithContext(ctx) for id, r := range ranges { id := id @@ -486,7 +488,7 @@ func (bc *Client) BackupRange( zap.Uint32("concurrency", req.Concurrency)) var allStores []*metapb.Store - allStores, err = conn.GetAllTiKVStoresWithRetry(ctx, bc.mgr.GetPDClient(), conn.SkipTiFlash) + allStores, err = conn.GetAllTiKVStoresWithRetry(ctx, bc.mgr.GetPDClient(), util2.SkipTiFlash) if err != nil { return errors.Trace(err) } diff --git a/br/pkg/backup/schema.go b/br/pkg/backup/schema.go index 4e653253cafcd..156ce306f83fb 100644 --- a/br/pkg/backup/schema.go +++ b/br/pkg/backup/schema.go @@ -12,17 +12,19 @@ import ( "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "github.com/pingcap/tidb/br/pkg/checksum" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/statistics/handle" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) const ( @@ -81,7 +83,7 @@ func (ss *Schemas) BackupSchemas( ctx = opentracing.ContextWithSpan(ctx, span1) } - workerPool := utils.NewWorkerPool(concurrency, "Schemas") + workerPool := utilpool.NewWorkerPool(concurrency, "Schemas") errg, ectx := errgroup.WithContext(ctx) startAll := time.Now() op := metautil.AppendSchema diff --git a/br/pkg/conn/conn.go b/br/pkg/conn/conn.go index 52b14742b2c8f..30d553460ddcc 100755 --- a/br/pkg/conn/conn.go +++ b/br/pkg/conn/conn.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" @@ -117,60 +118,9 @@ type Mgr struct { ownsStorage bool } -// StoreBehavior is the action to do in GetAllTiKVStores when a non-TiKV -// store (e.g. TiFlash store) is found. -type StoreBehavior uint8 - -const ( - // ErrorOnTiFlash causes GetAllTiKVStores to return error when the store is - // found to be a TiFlash node. - ErrorOnTiFlash StoreBehavior = 0 - // SkipTiFlash causes GetAllTiKVStores to skip the store when it is found to - // be a TiFlash node. - SkipTiFlash StoreBehavior = 1 - // TiFlashOnly caused GetAllTiKVStores to skip the store which is not a - // TiFlash node. - TiFlashOnly StoreBehavior = 2 -) - -// GetAllTiKVStores returns all TiKV stores registered to the PD client. The -// stores must not be a tombstone and must never contain a label `engine=tiflash`. -func GetAllTiKVStores( - ctx context.Context, - pdClient pd.Client, - storeBehavior StoreBehavior, -) ([]*metapb.Store, error) { - // get all live stores. - stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) - if err != nil { - return nil, errors.Trace(err) - } - - // filter out all stores which are TiFlash. - j := 0 - for _, store := range stores { - isTiFlash := false - if version.IsTiFlash(store) { - if storeBehavior == SkipTiFlash { - continue - } else if storeBehavior == ErrorOnTiFlash { - return nil, errors.Annotatef(berrors.ErrPDInvalidResponse, - "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) - } - isTiFlash = true - } - if !isTiFlash && storeBehavior == TiFlashOnly { - continue - } - stores[j] = store - j++ - } - return stores[:j], nil -} - func GetAllTiKVStoresWithRetry(ctx context.Context, pdClient pd.Client, - storeBehavior StoreBehavior, + storeBehavior util.StoreBehavior, ) ([]*metapb.Store, error) { stores := make([]*metapb.Store, 0) var err error @@ -178,7 +128,7 @@ func GetAllTiKVStoresWithRetry(ctx context.Context, errRetry := utildb.WithRetry( ctx, func() error { - stores, err = GetAllTiKVStores(ctx, pdClient, storeBehavior) + stores, err = util.GetAllTiKVStores(ctx, pdClient, storeBehavior) failpoint.Inject("hint-GetAllTiKVStores-error", func(val failpoint.Value) { if val.(bool) { logutil.CL(ctx).Debug("failpoint hint-GetAllTiKVStores-error injected.") @@ -203,9 +153,9 @@ func GetAllTiKVStoresWithRetry(ctx context.Context, func checkStoresAlive(ctx context.Context, pdclient pd.Client, - storeBehavior StoreBehavior) error { + storeBehavior util.StoreBehavior) error { // Check live tikv. - stores, err := GetAllTiKVStores(ctx, pdclient, storeBehavior) + stores, err := util.GetAllTiKVStores(ctx, pdclient, storeBehavior) if err != nil { log.Error("fail to get store", zap.Error(err)) return errors.Trace(err) @@ -233,7 +183,7 @@ func NewMgr( tlsConf *tls.Config, securityOption pd.SecurityOption, keepalive keepalive.ClientParameters, - storeBehavior StoreBehavior, + storeBehavior util.StoreBehavior, checkRequirements bool, needDomain bool, ) (*Mgr, error) { diff --git a/br/pkg/conn/conn_test.go b/br/pkg/conn/conn_test.go index 2f77803fc3f78..dfe0d95fe0719 100644 --- a/br/pkg/conn/conn_test.go +++ b/br/pkg/conn/conn_test.go @@ -9,11 +9,13 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/stretchr/testify/require" pd "github.com/tikv/pd/client" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/pingcap/tidb/br/pkg/conn/util" + "github.com/pingcap/tidb/br/pkg/pdutil" ) type fakePDClient struct { @@ -60,7 +62,7 @@ func TestGetAllTiKVStoresWithRetryCancel(t *testing.T) { stores: stores, } - _, err := GetAllTiKVStoresWithRetry(ctx, fpdc, SkipTiFlash) + _, err := GetAllTiKVStoresWithRetry(ctx, fpdc, util.SkipTiFlash) require.Error(t, err) require.Equal(t, codes.Canceled, status.Code(errors.Cause(err))) } @@ -100,7 +102,7 @@ func TestGetAllTiKVStoresWithUnknown(t *testing.T) { stores: stores, } - _, err := GetAllTiKVStoresWithRetry(ctx, fpdc, SkipTiFlash) + _, err := GetAllTiKVStoresWithRetry(ctx, fpdc, util.SkipTiFlash) require.Error(t, err) require.Equal(t, codes.Unknown, status.Code(errors.Cause(err))) } @@ -155,12 +157,12 @@ func TestCheckStoresAlive(t *testing.T) { stores: stores, } - kvStores, err := GetAllTiKVStoresWithRetry(ctx, fpdc, SkipTiFlash) + kvStores, err := GetAllTiKVStoresWithRetry(ctx, fpdc, util.SkipTiFlash) require.NoError(t, err) require.Len(t, kvStores, 2) require.Equal(t, stores[2:], kvStores) - err = checkStoresAlive(ctx, fpdc, SkipTiFlash) + err = checkStoresAlive(ctx, fpdc, util.SkipTiFlash) require.NoError(t, err) } @@ -169,7 +171,7 @@ func TestGetAllTiKVStores(t *testing.T) { testCases := []struct { stores []*metapb.Store - storeBehavior StoreBehavior + storeBehavior util.StoreBehavior expectedStores map[uint64]int expectedError string }{ @@ -177,14 +179,14 @@ func TestGetAllTiKVStores(t *testing.T) { stores: []*metapb.Store{ {Id: 1}, }, - storeBehavior: SkipTiFlash, + storeBehavior: util.SkipTiFlash, expectedStores: map[uint64]int{1: 1}, }, { stores: []*metapb.Store{ {Id: 1}, }, - storeBehavior: ErrorOnTiFlash, + storeBehavior: util.ErrorOnTiFlash, expectedStores: map[uint64]int{1: 1}, }, { @@ -192,7 +194,7 @@ func TestGetAllTiKVStores(t *testing.T) { {Id: 1}, {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, }, - storeBehavior: SkipTiFlash, + storeBehavior: util.SkipTiFlash, expectedStores: map[uint64]int{1: 1}, }, { @@ -200,7 +202,7 @@ func TestGetAllTiKVStores(t *testing.T) { {Id: 1}, {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, }, - storeBehavior: ErrorOnTiFlash, + storeBehavior: util.ErrorOnTiFlash, expectedError: "cannot restore to a cluster with active TiFlash stores.*", }, { @@ -212,7 +214,7 @@ func TestGetAllTiKVStores(t *testing.T) { {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, }, - storeBehavior: SkipTiFlash, + storeBehavior: util.SkipTiFlash, expectedStores: map[uint64]int{1: 1, 3: 1, 4: 1, 6: 1}, }, { @@ -224,7 +226,7 @@ func TestGetAllTiKVStores(t *testing.T) { {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, }, - storeBehavior: ErrorOnTiFlash, + storeBehavior: util.ErrorOnTiFlash, expectedError: "cannot restore to a cluster with active TiFlash stores.*", }, { @@ -236,14 +238,14 @@ func TestGetAllTiKVStores(t *testing.T) { {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, }, - storeBehavior: TiFlashOnly, + storeBehavior: util.TiFlashOnly, expectedStores: map[uint64]int{2: 1, 5: 1}, }, } for _, testCase := range testCases { pdClient := fakePDClient{stores: testCase.stores} - stores, err := GetAllTiKVStores(context.Background(), pdClient, testCase.storeBehavior) + stores, err := util.GetAllTiKVStores(context.Background(), pdClient, testCase.storeBehavior) if len(testCase.expectedError) != 0 { require.Error(t, err) require.Regexp(t, testCase.expectedError, err.Error()) diff --git a/br/pkg/conn/util/util.go b/br/pkg/conn/util/util.go new file mode 100644 index 0000000000000..f569edd03ed2b --- /dev/null +++ b/br/pkg/conn/util/util.go @@ -0,0 +1,63 @@ +package util + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/metapb" + pd "github.com/tikv/pd/client" + + errors2 "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/version" +) + +// GetAllTiKVStores returns all TiKV stores registered to the PD client. The +// stores must not be a tombstone and must never contain a label `engine=tiflash`. +func GetAllTiKVStores( + ctx context.Context, + pdClient pd.Client, + storeBehavior StoreBehavior, +) ([]*metapb.Store, error) { + // get all live stores. + stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + if err != nil { + return nil, errors.Trace(err) + } + + // filter out all stores which are TiFlash. + j := 0 + for _, store := range stores { + isTiFlash := false + if version.IsTiFlash(store) { + if storeBehavior == SkipTiFlash { + continue + } else if storeBehavior == ErrorOnTiFlash { + return nil, errors.Annotatef(errors2.ErrPDInvalidResponse, + "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) + } + isTiFlash = true + } + if !isTiFlash && storeBehavior == TiFlashOnly { + continue + } + stores[j] = store + j++ + } + return stores[:j], nil +} + +// StoreBehavior is the action to do in GetAllTiKVStores when a non-TiKV +// store (e.g. TiFlash store) is found. +type StoreBehavior uint8 + +const ( + // ErrorOnTiFlash causes GetAllTiKVStores to return error when the store is + // found to be a TiFlash node. + ErrorOnTiFlash StoreBehavior = 0 + // SkipTiFlash causes GetAllTiKVStores to skip the store when it is found to + // be a TiFlash node. + SkipTiFlash StoreBehavior = 1 + // TiFlashOnly caused GetAllTiKVStores to skip the store which is not a + // TiFlash node. + TiFlashOnly StoreBehavior = 2 +) diff --git a/br/pkg/lightning/backend/local/duplicate.go b/br/pkg/lightning/backend/local/duplicate.go index 212331ba8f6f4..fefc914fe523c 100644 --- a/br/pkg/lightning/backend/local/duplicate.go +++ b/br/pkg/lightning/backend/local/duplicate.go @@ -43,8 +43,8 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/restore/split" - "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" + "github.com/pingcap/tidb/distsql/request" tidbkv "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" @@ -73,7 +73,7 @@ type DuplicateManager struct { tls *common.TLS ts uint64 keyAdapter KeyAdapter - remoteWorkerPool *utils.WorkerPool + remoteWorkerPool *utilpool.WorkerPool opts *kv.SessionOptions } @@ -196,7 +196,7 @@ func NewDuplicateManager(local *local, ts uint64, opts *kv.SessionOptions) (*Dup ts: ts, connPool: common.NewGRPCConns(), // TODO: not sure what is the correct concurrency value. - remoteWorkerPool: utils.NewWorkerPool(uint(local.tcpConcurrency), "duplicates"), + remoteWorkerPool: utilpool.NewWorkerPool(uint(local.tcpConcurrency), "duplicates"), opts: opts, }, nil } @@ -474,7 +474,7 @@ func (manager *DuplicateManager) CollectDuplicateRowsFromLocalIndex( if tbl.Meta().IsCommonHandle { ranges = ranger.FullRange() } - keyRanges, err := distsql.TableHandleRangesToKVRanges(nil, tableIDs, tbl.Meta().IsCommonHandle, ranges, nil) + keyRanges, err := request.TableHandleRangesToKVRanges(nil, tableIDs, tbl.Meta().IsCommonHandle, ranges, nil) if err != nil { return false, errors.Trace(err) } @@ -542,7 +542,7 @@ func (manager *DuplicateManager) CollectDuplicateRowsFromLocalIndex( ranges := ranger.FullRange() var keysRanges []tidbkv.KeyRange for _, id := range tableIDs { - partitionKeysRanges, err := distsql.IndexRangesToKVRanges(nil, id, indexInfo.ID, ranges, nil) + partitionKeysRanges, err := request.IndexRangesToKVRanges(nil, id, indexInfo.ID, ranges, nil) if err != nil { return false, err } @@ -788,7 +788,7 @@ func buildTableRequests(tableID int64, isCommonHandle bool) ([]*DuplicateRequest if isCommonHandle { ranges = ranger.FullRange() } - keysRanges, err := distsql.TableHandleRangesToKVRanges(nil, []int64{tableID}, isCommonHandle, ranges, nil) + keysRanges, err := request.TableHandleRangesToKVRanges(nil, []int64{tableID}, isCommonHandle, ranges, nil) if err != nil { return nil, errors.Trace(err) } @@ -807,7 +807,7 @@ func buildTableRequests(tableID int64, isCommonHandle bool) ([]*DuplicateRequest func buildIndexRequests(tableID int64, indexInfo *model.IndexInfo) ([]*DuplicateRequest, error) { ranges := ranger.FullRange() - keysRanges, err := distsql.IndexRangesToKVRanges(nil, tableID, indexInfo.ID, ranges, nil) + keysRanges, err := request.IndexRangesToKVRanges(nil, tableID, indexInfo.ID, ranges, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index 8be21b4d68e1a..56c6390a8a45f 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/pingcap/tidb/br/pkg/checksum" - "github.com/pingcap/tidb/br/pkg/conn" + "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/utils/utildb" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -61,7 +62,7 @@ type Client struct { pdClient pd.Client toolClient split.SplitClient fileImporter FileImporter - workerPool *utils.WorkerPool + workerPool *utilpool.WorkerPool tlsConf *tls.Config keepaliveConf keepalive.ClientParameters @@ -277,7 +278,7 @@ func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) // SetConcurrency sets the concurrency of dbs tables files. func (rc *Client) SetConcurrency(c uint) { - rc.workerPool = utils.NewWorkerPool(c, "file") + rc.workerPool = utilpool.NewWorkerPool(c, "file") } // EnableOnline sets the mode of restore to online. @@ -518,7 +519,7 @@ func (rc *Client) createTablesWithDBPool(ctx context.Context, createOneTable func(ctx context.Context, db *DB, t *metautil.Table) error, tables []*metautil.Table, dbPool []*DB) error { eg, ectx := errgroup.WithContext(ctx) - workers := utils.NewWorkerPool(uint(len(dbPool)), "DDL workers") + workers := utilpool.NewWorkerPool(uint(len(dbPool)), "DDL workers") for _, t := range tables { table := t workers.ApplyWithIDInErrorGroup(eg, func(id uint64) error { @@ -551,7 +552,7 @@ func (rc *Client) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error { func (rc *Client) setSpeedLimit(ctx context.Context) error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { - stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) + stores, err := util.GetAllTiKVStores(ctx, rc.pdClient, util.SkipTiFlash) if err != nil { return errors.Trace(err) } @@ -741,7 +742,7 @@ func (rc *Client) SwitchToNormalMode(ctx context.Context) error { } func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMode) error { - stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) + stores, err := util.GetAllTiKVStores(ctx, rc.pdClient, util.SkipTiFlash) if err != nil { return errors.Trace(err) } @@ -801,7 +802,7 @@ func (rc *Client) GoValidateChecksum( defer wg.Done() rc.updateMetaAndLoadStats(ctx, loadStatCh) }() - workers := utils.NewWorkerPool(defaultChecksumConcurrency, "RestoreChecksum") + workers := utilpool.NewWorkerPool(defaultChecksumConcurrency, "RestoreChecksum") go func() { eg, ectx := errgroup.WithContext(ctx) defer func() { @@ -1142,7 +1143,7 @@ func (rc *Client) PreCheckTableTiFlashReplica( ctx context.Context, tables []*metautil.Table, ) error { - tiFlashStores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.TiFlashOnly) + tiFlashStores, err := util.GetAllTiKVStores(ctx, rc.pdClient, util.TiFlashOnly) if err != nil { return errors.Trace(err) } diff --git a/br/pkg/restore/import.go b/br/pkg/restore/import.go index 954679fff8f28..9f41e27d69f66 100644 --- a/br/pkg/restore/import.go +++ b/br/pkg/restore/import.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - "github.com/pingcap/tidb/br/pkg/conn" + "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/restore/split" @@ -230,7 +230,7 @@ func NewFileImporter( // CheckMultiIngestSupport checks whether all stores support multi-ingest func (importer *FileImporter) CheckMultiIngestSupport(ctx context.Context, pdClient pd.Client) error { - allStores, err := conn.GetAllTiKVStores(ctx, pdClient, conn.SkipTiFlash) + allStores, err := util.GetAllTiKVStores(ctx, pdClient, util.SkipTiFlash) if err != nil { return errors.Trace(err) } diff --git a/br/pkg/restore/ingester.go b/br/pkg/restore/ingester.go index bc127c05963e2..8dcf52b24e743 100644 --- a/br/pkg/restore/ingester.go +++ b/br/pkg/restore/ingester.go @@ -43,7 +43,7 @@ import ( "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/membuf" "github.com/pingcap/tidb/br/pkg/restore/split" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" ) const ( @@ -91,7 +91,7 @@ type Ingester struct { conns gRPCConns splitCli split.SplitClient - WorkerPool *utils.WorkerPool + WorkerPool *utilpool.WorkerPool batchWriteKVPairs int regionSplitSize int64 @@ -101,7 +101,7 @@ type Ingester struct { func NewIngester( splitCli split.SplitClient, cfg concurrencyCfg, commitTS uint64, tlsConf *tls.Config, ) *Ingester { - workerPool := utils.NewWorkerPool(cfg.IngestConcurrency, "ingest worker") + workerPool := utilpool.NewWorkerPool(cfg.IngestConcurrency, "ingest worker") return &Ingester{ tlsConf: tlsConf, conns: gRPCConns{ diff --git a/br/pkg/restore/log_client.go b/br/pkg/restore/log_client.go index 0332d2817a6fa..b0fe7e4614259 100644 --- a/br/pkg/restore/log_client.go +++ b/br/pkg/restore/log_client.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/br/pkg/kv" "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/storage" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" @@ -663,7 +663,7 @@ func (l *LogClient) restoreTables(ctx context.Context, dom *domain.Domain) error // a. encode row changed files to kvpairs and ingest into tikv // b. exec ddl log.Debug("start restore tables") - workerPool := utils.NewWorkerPool(l.concurrencyCfg.Concurrency, "table log restore") + workerPool := utilpool.NewWorkerPool(l.concurrencyCfg.Concurrency, "table log restore") eg, ectx := errgroup.WithContext(ctx) for tableID, puller := range l.eventPullers { pullerReplica := puller diff --git a/br/pkg/restore/pipeline_items.go b/br/pkg/restore/pipeline_items.go index 1bd7502f30642..bb441da36911a 100644 --- a/br/pkg/restore/pipeline_items.go +++ b/br/pkg/restore/pipeline_items.go @@ -9,14 +9,15 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/summary" - "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/utilpool" "github.com/pingcap/tidb/parser/model" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) const ( @@ -263,7 +264,7 @@ func (b *tikvSender) splitWorker(ctx context.Context, summary.CollectDuration("split region", elapsed) }() - pool := utils.NewWorkerPool(concurrency, "split") + pool := utilpool.NewWorkerPool(concurrency, "split") for { select { case <-ctx.Done(): diff --git a/br/pkg/restore/split/split_client.go b/br/pkg/restore/split/split_client.go index ec105d1f64f5e..7d7fce267adad 100644 --- a/br/pkg/restore/split/split_client.go +++ b/br/pkg/restore/split/split_client.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - "github.com/pingcap/tidb/br/pkg/conn" + "github.com/pingcap/tidb/br/pkg/conn/util" errors2 "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/httputil" "github.com/pingcap/tidb/br/pkg/logutil" @@ -494,7 +494,7 @@ func (c *pdClient) BatchSplitRegions( } func (c *pdClient) getStoreCount(ctx context.Context) (int, error) { - stores, err := conn.GetAllTiKVStores(ctx, c.client, conn.SkipTiFlash) + stores, err := util.GetAllTiKVStores(ctx, c.client, util.SkipTiFlash) if err != nil { return 0, err } diff --git a/br/pkg/task/common.go b/br/pkg/task/common.go index 36de8583ea92e..5df8e1b2a6468 100644 --- a/br/pkg/task/common.go +++ b/br/pkg/task/common.go @@ -21,19 +21,21 @@ import ( "github.com/pingcap/kvproto/pkg/encryptionpb" "github.com/pingcap/log" filter "github.com/pingcap/tidb-tools/pkg/table-filter" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + pd "github.com/tikv/pd/client" + "go.etcd.io/etcd/pkg/transport" + "go.uber.org/zap" + "google.golang.org/grpc/keepalive" + "github.com/pingcap/tidb/br/pkg/conn" + "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/sessionctx/variable" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - pd "github.com/tikv/pd/client" - "go.etcd.io/etcd/pkg/transport" - "go.uber.org/zap" - "google.golang.org/grpc/keepalive" ) const ( @@ -523,7 +525,7 @@ func NewMgr(ctx context.Context, // Is it necessary to remove `StoreBehavior`? return conn.NewMgr( - ctx, g, pdAddress, tlsConf, securityOption, keepalive, conn.SkipTiFlash, + ctx, g, pdAddress, tlsConf, securityOption, keepalive, util.SkipTiFlash, checkRequirements, needDomain, ) } diff --git a/br/pkg/utils/worker.go b/br/pkg/utils/utilpool/worker.go similarity index 99% rename from br/pkg/utils/worker.go rename to br/pkg/utils/utilpool/worker.go index 773cfd41a64da..61ce617212f0b 100644 --- a/br/pkg/utils/worker.go +++ b/br/pkg/utils/utilpool/worker.go @@ -1,6 +1,6 @@ // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. -package utils +package utilpool import ( "github.com/pingcap/log" diff --git a/distsql/request/request.go b/distsql/request/request.go new file mode 100644 index 0000000000000..68360e798f58c --- /dev/null +++ b/distsql/request/request.go @@ -0,0 +1,240 @@ +package request + +import ( + "sync/atomic" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/memory" + "github.com/pingcap/tidb/util/ranger" +) + +// TableHandleRangesToKVRanges convert table handle ranges to "KeyRanges" for multiple tables. +func TableHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { + if !isCommonHandle { + return tablesRangesToKVRanges(tid, ranges, fb), nil + } + return CommonHandleRangesToKVRanges(sc, tid, ranges) +} + +// TableRangesToKVRanges converts table ranges to "KeyRange". +// Note this function should not be exported, but currently +// br refers to it, so have to keep it. +func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange { + return tablesRangesToKVRanges([]int64{tid}, ranges, fb) +} + +// tablesRangesToKVRanges converts table ranges to "KeyRange". +func tablesRangesToKVRanges(tids []int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange { + if fb == nil || fb.Hist == nil { + return tableRangesToKVRangesWithoutSplit(tids, ranges) + } + krs := make([]kv.KeyRange, 0, len(ranges)) + feedbackRanges := make([]*ranger.Range, 0, len(ranges)) + for _, ran := range ranges { + low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64()) + high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64()) + if ran.LowExclude { + low = kv.Key(low).PrefixNext() + } + // If this range is split by histogram, then the high val will equal to one bucket's upper bound, + // since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the + // high value greater than upper bound, so we store the range here. + r := &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)}, + HighVal: []types.Datum{types.NewBytesDatum(high)}} + feedbackRanges = append(feedbackRanges, r) + + if !ran.HighExclude { + high = kv.Key(high).PrefixNext() + } + for _, tid := range tids { + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + } + fb.StoreRanges(feedbackRanges) + return krs +} + +func tableRangesToKVRangesWithoutSplit(tids []int64, ranges []*ranger.Range) []kv.KeyRange { + krs := make([]kv.KeyRange, 0, len(ranges)*len(tids)) + for _, ran := range ranges { + low, high := encodeHandleKey(ran) + for _, tid := range tids { + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + } + return krs +} + +func encodeHandleKey(ran *ranger.Range) ([]byte, []byte) { + low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64()) + high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64()) + if ran.LowExclude { + low = kv.Key(low).PrefixNext() + } + if !ran.HighExclude { + high = kv.Key(high).PrefixNext() + } + return low, high +} + +// IndexRangesToKVRangesWithInterruptSignal converts index ranges to "KeyRange". +// The process can be interrupted by set `interruptSignal` to true. +func IndexRangesToKVRangesWithInterruptSignal(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { + return indexRangesToKVRangesForTablesWithInterruptSignal(sc, []int64{tid}, idxID, ranges, fb, memTracker, interruptSignal) +} + +// IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange". +func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { + return indexRangesToKVRangesForTablesWithInterruptSignal(sc, tids, idxID, ranges, fb, nil, nil) +} + +// IndexRangesToKVRangesForTablesWithInterruptSignal converts indexes ranges to "KeyRange". +// The process can be interrupted by set `interruptSignal` to true. +func indexRangesToKVRangesForTablesWithInterruptSignal(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { + if fb == nil || fb.Hist == nil { + return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges, memTracker, interruptSignal) + } + feedbackRanges := make([]*ranger.Range, 0, len(ranges)) + for _, ran := range ranges { + low, high, err := encodeIndexKey(sc, ran) + if err != nil { + return nil, err + } + feedbackRanges = append(feedbackRanges, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)}, + HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true}) + } + feedbackRanges, ok := fb.Hist.SplitRange(sc, feedbackRanges, true) + if !ok { + fb.Invalidate() + } + krs := make([]kv.KeyRange, 0, len(feedbackRanges)) + for _, ran := range feedbackRanges { + low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes() + if ran.LowExclude { + low = kv.Key(low).PrefixNext() + } + ran.LowVal[0].SetBytes(low) + // If this range is split by histogram, then the high val will equal to one bucket's upper bound, + // since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the + // high value greater than upper bound, so we store the high value here. + ran.HighVal[0].SetBytes(high) + if !ran.HighExclude { + high = kv.Key(high).PrefixNext() + } + for _, tid := range tids { + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + } + fb.StoreRanges(feedbackRanges) + return krs, nil +} + +// CommonHandleRangesToKVRanges converts common handle ranges to "KeyRange". +func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tids []int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { + rans := make([]*ranger.Range, 0, len(ranges)) + for _, ran := range ranges { + low, high, err := encodeIndexKey(sc, ran) + if err != nil { + return nil, err + } + rans = append(rans, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)}, + HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true}) + } + krs := make([]kv.KeyRange, 0, len(rans)) + for _, ran := range rans { + low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes() + if ran.LowExclude { + low = kv.Key(low).PrefixNext() + } + ran.LowVal[0].SetBytes(low) + for _, tid := range tids { + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + } + return krs, nil +} + +func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { + krs := make([]kv.KeyRange, 0, len(ranges)) + const CheckSignalStep = 8 + var estimatedMemUsage int64 + // encodeIndexKey and EncodeIndexSeekKey is time-consuming, thus we need to + // check the interrupt signal periodically. + for i, ran := range ranges { + low, high, err := encodeIndexKey(sc, ran) + if err != nil { + return nil, err + } + if i == 0 { + estimatedMemUsage += int64(cap(low) + cap(high)) + } + for _, tid := range tids { + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + if i == 0 { + estimatedMemUsage += int64(cap(startKey)) + int64(cap(endKey)) + } + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + if i%CheckSignalStep == 0 { + if i == 0 && memTracker != nil { + estimatedMemUsage *= int64(len(ranges)) + memTracker.Consume(estimatedMemUsage) + } + if interruptSignal != nil && interruptSignal.Load().(bool) { + return nil, nil + } + } + } + return krs, nil +} + +func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) { + low, err := codec.EncodeKey(sc, nil, ran.LowVal...) + if err != nil { + return nil, nil, err + } + if ran.LowExclude { + low = kv.Key(low).PrefixNext() + } + high, err := codec.EncodeKey(sc, nil, ran.HighVal...) + if err != nil { + return nil, nil, err + } + + if !ran.HighExclude { + high = kv.Key(high).PrefixNext() + } + + var hasNull bool + for _, highVal := range ran.HighVal { + if highVal.IsNull() { + hasNull = true + break + } + } + + if hasNull { + // Append 0 to make unique-key range [null, null] to be a scan rather than point-get. + high = kv.Key(high).Next() + } + return low, high, nil +} + +// IndexRangesToKVRanges converts index ranges to "KeyRange". +func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { + return IndexRangesToKVRangesWithInterruptSignal(sc, tid, idxID, ranges, fb, nil, nil) +} diff --git a/distsql/request_builder.go b/distsql/request_builder.go index db62df97dc1c1..d886eb0203b54 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -18,12 +18,14 @@ import ( "fmt" "math" "sort" - "sync/atomic" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/tidb/ddl/placement" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/mysql" @@ -35,7 +37,6 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" - "github.com/pingcap/tipb/go-tipb" ) // RequestBuilder is used to build a "kv.Request". @@ -84,7 +85,7 @@ func (builder *RequestBuilder) SetMemTracker(tracker *memory.Tracker) *RequestBu // br refers it, so have to keep it. func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges = TableRangesToKVRanges(tid, tableRanges, fb) + builder.Request.KeyRanges = request.TableRangesToKVRanges(tid, tableRanges, fb) } return builder } @@ -93,7 +94,7 @@ func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.R // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges, nil) + builder.Request.KeyRanges, builder.err = request.IndexRangesToKVRanges(sc, tid, idxID, ranges, nil) } return builder } @@ -102,7 +103,7 @@ func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, // "ranges" to "KeyRanges" firstly. func (builder *RequestBuilder) SetIndexRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil) + builder.Request.KeyRanges, builder.err = request.IndexRangesToKVRangesForTables(sc, tids, idxID, ranges, nil) } return builder } @@ -117,7 +118,7 @@ func (builder *RequestBuilder) SetHandleRanges(sc *stmtctx.StatementContext, tid // "ranges" to "KeyRanges" firstly for multiple tables. func (builder *RequestBuilder) SetHandleRangesForTables(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) *RequestBuilder { if builder.err == nil { - builder.Request.KeyRanges, builder.err = TableHandleRangesToKVRanges(sc, tid, isCommonHandle, ranges, fb) + builder.Request.KeyRanges, builder.err = request.TableHandleRangesToKVRanges(sc, tid, isCommonHandle, ranges, fb) } return builder } @@ -344,79 +345,6 @@ func (builder *RequestBuilder) SetIsStaleness(is bool) *RequestBuilder { return builder } -// TableHandleRangesToKVRanges convert table handle ranges to "KeyRanges" for multiple tables. -func TableHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { - if !isCommonHandle { - return tablesRangesToKVRanges(tid, ranges, fb), nil - } - return CommonHandleRangesToKVRanges(sc, tid, ranges) -} - -// TableRangesToKVRanges converts table ranges to "KeyRange". -// Note this function should not be exported, but currently -// br refers to it, so have to keep it. -func TableRangesToKVRanges(tid int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange { - return tablesRangesToKVRanges([]int64{tid}, ranges, fb) -} - -// tablesRangesToKVRanges converts table ranges to "KeyRange". -func tablesRangesToKVRanges(tids []int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) []kv.KeyRange { - if fb == nil || fb.Hist == nil { - return tableRangesToKVRangesWithoutSplit(tids, ranges) - } - krs := make([]kv.KeyRange, 0, len(ranges)) - feedbackRanges := make([]*ranger.Range, 0, len(ranges)) - for _, ran := range ranges { - low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64()) - high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64()) - if ran.LowExclude { - low = kv.Key(low).PrefixNext() - } - // If this range is split by histogram, then the high val will equal to one bucket's upper bound, - // since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the - // high value greater than upper bound, so we store the range here. - r := &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)}, - HighVal: []types.Datum{types.NewBytesDatum(high)}} - feedbackRanges = append(feedbackRanges, r) - - if !ran.HighExclude { - high = kv.Key(high).PrefixNext() - } - for _, tid := range tids { - startKey := tablecodec.EncodeRowKey(tid, low) - endKey := tablecodec.EncodeRowKey(tid, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) - } - } - fb.StoreRanges(feedbackRanges) - return krs -} - -func tableRangesToKVRangesWithoutSplit(tids []int64, ranges []*ranger.Range) []kv.KeyRange { - krs := make([]kv.KeyRange, 0, len(ranges)*len(tids)) - for _, ran := range ranges { - low, high := encodeHandleKey(ran) - for _, tid := range tids { - startKey := tablecodec.EncodeRowKey(tid, low) - endKey := tablecodec.EncodeRowKey(tid, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) - } - } - return krs -} - -func encodeHandleKey(ran *ranger.Range) ([]byte, []byte) { - low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64()) - high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64()) - if ran.LowExclude { - low = kv.Key(low).PrefixNext() - } - if !ran.HighExclude { - high = kv.Key(high).PrefixNext() - } - return low, high -} - // SplitRangesAcrossInt64Boundary split the ranges into two groups: // 1. signedRanges is less or equal than MaxInt64 // 2. unsignedRanges is greater than MaxInt64 @@ -550,92 +478,6 @@ func PartitionHandlesToKVRanges(handles []kv.Handle) []kv.KeyRange { return krs } -// IndexRangesToKVRanges converts index ranges to "KeyRange". -func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { - return IndexRangesToKVRangesWithInterruptSignal(sc, tid, idxID, ranges, fb, nil, nil) -} - -// IndexRangesToKVRangesWithInterruptSignal converts index ranges to "KeyRange". -// The process can be interrupted by set `interruptSignal` to true. -func IndexRangesToKVRangesWithInterruptSignal(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { - return indexRangesToKVRangesForTablesWithInterruptSignal(sc, []int64{tid}, idxID, ranges, fb, memTracker, interruptSignal) -} - -// IndexRangesToKVRangesForTables converts indexes ranges to "KeyRange". -func IndexRangesToKVRangesForTables(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { - return indexRangesToKVRangesForTablesWithInterruptSignal(sc, tids, idxID, ranges, fb, nil, nil) -} - -// IndexRangesToKVRangesForTablesWithInterruptSignal converts indexes ranges to "KeyRange". -// The process can be interrupted by set `interruptSignal` to true. -func indexRangesToKVRangesForTablesWithInterruptSignal(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, fb *statistics.QueryFeedback, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { - if fb == nil || fb.Hist == nil { - return indexRangesToKVWithoutSplit(sc, tids, idxID, ranges, memTracker, interruptSignal) - } - feedbackRanges := make([]*ranger.Range, 0, len(ranges)) - for _, ran := range ranges { - low, high, err := encodeIndexKey(sc, ran) - if err != nil { - return nil, err - } - feedbackRanges = append(feedbackRanges, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)}, - HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true}) - } - feedbackRanges, ok := fb.Hist.SplitRange(sc, feedbackRanges, true) - if !ok { - fb.Invalidate() - } - krs := make([]kv.KeyRange, 0, len(feedbackRanges)) - for _, ran := range feedbackRanges { - low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes() - if ran.LowExclude { - low = kv.Key(low).PrefixNext() - } - ran.LowVal[0].SetBytes(low) - // If this range is split by histogram, then the high val will equal to one bucket's upper bound, - // since we need to guarantee each range falls inside the exactly one bucket, `PrefixNext` will make the - // high value greater than upper bound, so we store the high value here. - ran.HighVal[0].SetBytes(high) - if !ran.HighExclude { - high = kv.Key(high).PrefixNext() - } - for _, tid := range tids { - startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) - } - } - fb.StoreRanges(feedbackRanges) - return krs, nil -} - -// CommonHandleRangesToKVRanges converts common handle ranges to "KeyRange". -func CommonHandleRangesToKVRanges(sc *stmtctx.StatementContext, tids []int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { - rans := make([]*ranger.Range, 0, len(ranges)) - for _, ran := range ranges { - low, high, err := encodeIndexKey(sc, ran) - if err != nil { - return nil, err - } - rans = append(rans, &ranger.Range{LowVal: []types.Datum{types.NewBytesDatum(low)}, - HighVal: []types.Datum{types.NewBytesDatum(high)}, LowExclude: false, HighExclude: true}) - } - krs := make([]kv.KeyRange, 0, len(rans)) - for _, ran := range rans { - low, high := ran.LowVal[0].GetBytes(), ran.HighVal[0].GetBytes() - if ran.LowExclude { - low = kv.Key(low).PrefixNext() - } - ran.LowVal[0].SetBytes(low) - for _, tid := range tids { - startKey := tablecodec.EncodeRowKey(tid, low) - endKey := tablecodec.EncodeRowKey(tid, high) - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) - } - } - return krs, nil -} - // VerifyTxnScope verify whether the txnScope and visited physical table break the leader rule's dcLocation. func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSchema) bool { if txnScope == "" || txnScope == kv.GlobalTxnScope { @@ -654,70 +496,3 @@ func VerifyTxnScope(txnScope string, physicalTableID int64, is infoschema.InfoSc } return true } - -func indexRangesToKVWithoutSplit(sc *stmtctx.StatementContext, tids []int64, idxID int64, ranges []*ranger.Range, memTracker *memory.Tracker, interruptSignal *atomic.Value) ([]kv.KeyRange, error) { - krs := make([]kv.KeyRange, 0, len(ranges)) - const CheckSignalStep = 8 - var estimatedMemUsage int64 - // encodeIndexKey and EncodeIndexSeekKey is time-consuming, thus we need to - // check the interrupt signal periodically. - for i, ran := range ranges { - low, high, err := encodeIndexKey(sc, ran) - if err != nil { - return nil, err - } - if i == 0 { - estimatedMemUsage += int64(cap(low) + cap(high)) - } - for _, tid := range tids { - startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) - endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) - if i == 0 { - estimatedMemUsage += int64(cap(startKey)) + int64(cap(endKey)) - } - krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) - } - if i%CheckSignalStep == 0 { - if i == 0 && memTracker != nil { - estimatedMemUsage *= int64(len(ranges)) - memTracker.Consume(estimatedMemUsage) - } - if interruptSignal != nil && interruptSignal.Load().(bool) { - return nil, nil - } - } - } - return krs, nil -} - -func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) { - low, err := codec.EncodeKey(sc, nil, ran.LowVal...) - if err != nil { - return nil, nil, err - } - if ran.LowExclude { - low = kv.Key(low).PrefixNext() - } - high, err := codec.EncodeKey(sc, nil, ran.HighVal...) - if err != nil { - return nil, nil, err - } - - if !ran.HighExclude { - high = kv.Key(high).PrefixNext() - } - - var hasNull bool - for _, highVal := range ran.HighVal { - if highVal.IsNull() { - hasNull = true - break - } - } - - if hasNull { - // Append 0 to make unique-key range [null, null] to be a scan rather than point-get. - high = kv.Key(high).Next() - } - return low, high, nil -} diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index 3b9a7926e0fe1..dbd2ecf4a507b 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -17,6 +17,10 @@ package distsql import ( "testing" + "github.com/pingcap/tipb/go-tipb" + "github.com/stretchr/testify/require" + + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" @@ -28,8 +32,6 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" - "github.com/pingcap/tipb/go-tipb" - "github.com/stretchr/testify/require" ) type handleRange struct { @@ -100,7 +102,7 @@ func TestTableRangesToKVRanges(t *testing.T) { }, } - actual := TableRangesToKVRanges(13, ranges, nil) + actual := request.TableRangesToKVRanges(13, ranges, nil) expect := []kv.KeyRange{ { StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, @@ -181,7 +183,7 @@ func TestIndexRangesToKVRanges(t *testing.T) { }, } - actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil) + actual, err := request.IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil) require.NoError(t, err) for i := range actual { require.Equal(t, expect[i], actual[i]) @@ -590,7 +592,7 @@ func TestTableRangesToKVRangesWithFbs(t *testing.T) { }, } fb := newTestFb() - actual := TableRangesToKVRanges(0, ranges, fb) + actual := request.TableRangesToKVRanges(0, ranges, fb) expect := []kv.KeyRange{ { StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, @@ -612,7 +614,7 @@ func TestIndexRangesToKVRangesWithFbs(t *testing.T) { }, } fb := newTestFb() - actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb) + actual, err := request.IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb) require.NoError(t, err) expect := []kv.KeyRange{ { diff --git a/executor/builder.go b/executor/builder.go index 82be642d742bb..6aafe661a5fce 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -34,6 +34,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/expression" @@ -3800,7 +3801,7 @@ func (h kvRangeBuilderFromRangeAndPartition) buildKeyRangeSeparately(ranges []*r for _, p := range h.partitions { pid := p.GetPhysicalID() meta := p.Meta() - kvRange, err := distsql.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges, nil) + kvRange, err := request.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges, nil) if err != nil { return nil, nil, err } @@ -3815,7 +3816,7 @@ func (h kvRangeBuilderFromRangeAndPartition) buildKeyRange(_ int64, ranges []*ra for _, p := range h.partitions { pid := p.GetPhysicalID() meta := p.Meta() - kvRange, err := distsql.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges, nil) + kvRange, err := request.TableHandleRangesToKVRanges(h.sctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && meta.IsCommonHandle, ranges, nil) if err != nil { return nil, err } @@ -4068,9 +4069,9 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l var tmpKvRanges []kv.KeyRange var err error if indexID == -1 { - tmpKvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) + tmpKvRanges, err = request.CommonHandleRangesToKVRanges(sc, []int64{tableID}, ranges) } else { - tmpKvRanges, err = distsql.IndexRangesToKVRangesWithInterruptSignal(sc, tableID, indexID, ranges, nil, memTracker, interruptSignal) + tmpKvRanges, err = request.IndexRangesToKVRangesWithInterruptSignal(sc, tableID, indexID, ranges, nil, memTracker, interruptSignal) } if err != nil { return nil, err @@ -4111,9 +4112,9 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l } // Index id is -1 means it's a common handle. if indexID == -1 { - return distsql.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) + return request.CommonHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tableID}, tmpDatumRanges) } - return distsql.IndexRangesToKVRangesWithInterruptSignal(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, memTracker, interruptSignal) + return request.IndexRangesToKVRangesWithInterruptSignal(ctx.GetSessionVars().StmtCtx, tableID, indexID, tmpDatumRanges, nil, memTracker, interruptSignal) } func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { diff --git a/executor/distsql.go b/executor/distsql.go index 5a39794a8e76b..31dd4f6cbb30c 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -28,7 +28,11 @@ import ( "github.com/cznic/mathutil" "github.com/pingcap/errors" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/charset" @@ -50,8 +54,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" - "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) var ( @@ -224,9 +226,9 @@ func (e *IndexReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) error func (e *IndexReaderExecutor) buildKeyRanges(sc *stmtctx.StatementContext, ranges []*ranger.Range, physicalID int64) ([]kv.KeyRange, error) { if e.index.ID == -1 { - return distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) + return request.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) } - return distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) + return request.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) } // Open implements the Executor Open interface. @@ -434,9 +436,9 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { } var kvRange []kv.KeyRange if e.index.ID == -1 { - kvRange, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) + kvRange, err = request.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, ranges) } else { - kvRange, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) + kvRange, err = request.IndexRangesToKVRanges(sc, physicalID, e.index.ID, ranges, e.feedback) } if err != nil { return err @@ -446,9 +448,9 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { } else { physicalID := getPhysicalTableID(e.table) if e.index.ID == -1 { - e.kvRanges, err = distsql.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, e.ranges) + e.kvRanges, err = request.CommonHandleRangesToKVRanges(sc, []int64{physicalID}, e.ranges) } else { - e.kvRanges, err = distsql.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback) + e.kvRanges, err = request.IndexRangesToKVRanges(sc, physicalID, e.index.ID, e.ranges, e.feedback) } } return err diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 60828bd514ac4..af601a381c6a9 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -27,7 +27,11 @@ import ( "github.com/cznic/mathutil" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/terror" @@ -41,8 +45,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" - "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) var ( @@ -141,7 +143,7 @@ func (e *IndexMergeReaderExecutor) buildKeyRangesForTable(tbl table.Table) (rang _, ok := plan[0].(*plannercore.PhysicalIndexScan) if !ok { if tbl.Meta().IsCommonHandle { - keyRanges, err := distsql.CommonHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{getPhysicalTableID(tbl)}, e.ranges[i]) + keyRanges, err := request.CommonHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{getPhysicalTableID(tbl)}, e.ranges[i]) if err != nil { return nil, err } @@ -151,7 +153,7 @@ func (e *IndexMergeReaderExecutor) buildKeyRangesForTable(tbl table.Table) (rang } continue } - keyRange, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i], e.feedbacks[i]) + keyRange, err := request.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i], e.feedbacks[i]) if err != nil { return nil, err } diff --git a/planner/core/fragment.go b/planner/core/fragment.go index c8eae1c63d148..c47c19c22f060 100644 --- a/planner/core/fragment.go +++ b/planner/core/fragment.go @@ -19,7 +19,11 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -28,8 +32,6 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/logutil" - "github.com/pingcap/tipb/go-tipb" - "go.uber.org/zap" ) // Fragment is cut from the whole pushed-down plan by network communication. @@ -326,7 +328,7 @@ func (e *mppTaskGenerator) constructMPPTasksImpl(ctx context.Context, ts *Physic for _, p := range partitions { pid := p.GetPhysicalID() meta := p.Meta() - kvRanges, err := distsql.TableHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && ts.Table.IsCommonHandle, splitedRanges, nil) + kvRanges, err := request.TableHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{pid}, meta != nil && ts.Table.IsCommonHandle, splitedRanges, nil) if err != nil { return nil, errors.Trace(err) } @@ -339,7 +341,7 @@ func (e *mppTaskGenerator) constructMPPTasksImpl(ctx context.Context, ts *Physic return ret, nil } - kvRanges, err := distsql.TableHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{ts.Table.ID}, ts.Table.IsCommonHandle, splitedRanges, nil) + kvRanges, err := request.TableHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, []int64{ts.Table.ID}, ts.Table.IsCommonHandle, splitedRanges, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index e2425fa2056ee..87233b8ddeec2 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -16,7 +16,10 @@ package core import ( "github.com/pingcap/errors" + "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/distsql/request" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/kv" @@ -28,7 +31,6 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" - "github.com/pingcap/tipb/go-tipb" ) // ToPB implements PhysicalPlan ToPB interface. @@ -186,7 +188,7 @@ func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context, storeType kv.StoreType) if storeType == kv.TiFlash && p.IsGlobalRead { tsExec.NextReadEngine = tipb.EngineType_TiFlash splitedRanges, _ := distsql.SplitRangesAcrossInt64Boundary(p.Ranges, false, false, p.Table.IsCommonHandle) - ranges, err := distsql.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil) + ranges, err := request.TableHandleRangesToKVRanges(ctx.GetSessionVars().StmtCtx, []int64{tsExec.TableId}, p.Table.IsCommonHandle, splitedRanges, nil) if err != nil { return nil, err } From 1168dc766102904bb5ff6d4720339bf6ca5251bd Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 30 Dec 2021 13:51:06 +0800 Subject: [PATCH 7/9] Revert "rfc add" This reverts commit 9505955d60813a85c86cb9e6e25444e426b7797d. --- hackathon_rfc.md | 39 --------------------------------------- 1 file changed, 39 deletions(-) delete mode 100644 hackathon_rfc.md diff --git a/hackathon_rfc.md b/hackathon_rfc.md deleted file mode 100644 index f2f681979e7e4..0000000000000 --- a/hackathon_rfc.md +++ /dev/null @@ -1,39 +0,0 @@ -+ 作者:胡海峰(huhaifeng@pingcap.com)/李淳竹(lichunzhu@pingcap.com)/曹闯(2546768090@qq.com) -+ 项目进展:正在写 [demo](git@github.com:hackathon2021index/tidb.git) - -# 项目介绍 - 用 lightning 的方式来实现 索引相关ddl : 生成 sst 文件然后 ingest 到 tikv. - -# 背景&动机 - 在表数据量巨大的情况下,索引相关 ddl 是很慢的。而 TiDB 虽然支持 `在线 DDL`,但是还是会耗时很久。之前在 平安 POC ,跟 OB pk,ob 16分钟,我们要60分钟。 - - 我们 lightning 导入数据还是很快的,而导入数据本身包含 导入索引。所以,尝试将 lighting 导入索引功能来实现 tidb 的 ddl 。 - - -# 项目设计 -## 架构设计 - -原来的 `index ddl` 基本流程: -+ 修改表 meta 数据 -+ 修改索引数据 -+ finish - -现在,原来的没有变化,只需要把 `修改索引数据` 这里 修改为 `lightning` 来完成就可以了。 -`修改索引数据` 其实也有如下步骤: -+ 将索引列数据,主键数据 取出来 -+ 将 索引列数据 和 主键数据,构造为 kv 保存到本地。 -+ ddl 完成的时候,将 sst 文件 ingest 到 tikv - -该功能,主要涉及到 `tidb/ddl` / `table/tables` 相关组件 - -## 测试 - -- 功能验证 - - admin check - - 跟 ddl 速度对比 - - 是否正常走索引 -- 完备性验证 - - 正常加索引 - - 读时 加索引 - - 写时 加索引 - - 读写时 加索引 \ No newline at end of file From 435797ded444ac84f856bde031b92d323c6f98d8 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 30 Dec 2021 13:58:21 +0800 Subject: [PATCH 8/9] revert more code --- ddl/index.go | 14 +- ddl/sst/common.go | 105 --------------- ddl/sst/glue/lightning_glue.go | 4 - ddl/sst/index.go | 234 --------------------------------- table/tables/index.go | 9 +- 5 files changed, 4 insertions(+), 362 deletions(-) delete mode 100644 ddl/sst/common.go delete mode 100644 ddl/sst/glue/lightning_glue.go delete mode 100644 ddl/sst/index.go diff --git a/ddl/index.go b/ddl/index.go index 7513b986ea8cf..9ef764993259d 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -20,8 +20,7 @@ import ( "sync/atomic" "time" - "github.com/pingcap/tidb/ddl/sst" - util2 "github.com/pingcap/tidb/table/tables/util" + tableutil "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -513,8 +512,6 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo originalState := indexInfo.State switch indexInfo.State { case model.StateNone: - // TODO: optimize index-ddl - sst.PrepareIndexOp(w.ctx, sst.DDLInfo{job.SchemaName, tblInfo, job.RealStartTS}) // none -> delete only indexInfo.State = model.StateDeleteOnly updateHiddenColumns(tblInfo, indexInfo, model.StatePublic) @@ -605,11 +602,6 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo } // Finish this job. job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) - // TODO: optimize index ddl. - err = sst.FinishIndexOp(w.ctx, job.StartTS) - if err != nil { - logutil.BgLogger().Error("FinishIndexOp err" + err.Error()) - } default: err = ErrInvalidDDLState.GenWithStackByArgs("index", tblInfo.State) } @@ -1088,7 +1080,7 @@ func (w *baseIndexWorker) getIndexRecord(idxInfo *model.IndexInfo, handle kv.Han idxVal[j] = idxColumnVal continue } - idxColumnVal, err = util2.GetColDefaultValue(w.sessCtx, col, w.defaultVals) + idxColumnVal, err = tableutil.GetColDefaultValue(w.sessCtx, col, w.defaultVals) if err != nil { return nil, errors.Trace(err) } @@ -1217,7 +1209,7 @@ func (w *addIndexWorker) checkHandleExists(key kv.Key, value []byte, handle kv.H if hasBeenBackFilled { return nil } - colInfos := util2.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) + colInfos := tableutil.BuildRowcodecColInfoForIndexColumns(idxInfo, tblInfo) values, err := tablecodec.DecodeIndexKV(key, value, idxColLen, tablecodec.HandleNotNeeded, colInfos) if err != nil { return err diff --git a/ddl/sst/common.go b/ddl/sst/common.go deleted file mode 100644 index 3b7390387ad8a..0000000000000 --- a/ddl/sst/common.go +++ /dev/null @@ -1,105 +0,0 @@ -package sst - -import ( - "context" - "database/sql" - "fmt" - "github.com/google/uuid" - "github.com/pingcap/tidb/br/pkg/lightning/backend" - "github.com/pingcap/tidb/br/pkg/lightning/backend/local" - "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" - "github.com/pingcap/tidb/br/pkg/lightning/config" - "github.com/pingcap/tidb/br/pkg/lightning/glue" - "github.com/pingcap/tidb/br/pkg/lightning/log" - "github.com/pingcap/tidb/parser" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/util/logutil" - "go.uber.org/zap" - "io/ioutil" - "sync/atomic" - "time" -) - -var ( - limit = int64(1024) - tblId int64 = time.Now().Unix() -) - -func genNextTblId() int64 { - return atomic.AddInt64(&tblId, 1) -} - -func init() { - var rLimit local.Rlim_t - rLimit, err := local.GetSystemRLimit() - if err != nil { - logutil.BgLogger().Warn(fmt.Sprintf("GetSystemRLimit err:%s;use default 1024.", err.Error())) - } else { - limit = int64(rLimit) - } -} - -type glue_ struct{} - -func (_ glue_) OwnsSQLExecutor() bool { - return false -} -func (_ glue_) GetSQLExecutor() glue.SQLExecutor { - return nil -} -func (_ glue_) GetDB() (*sql.DB, error) { - return nil, nil -} -func (_ glue_) GetParser() *parser.Parser { - return nil -} -func (_ glue_) GetTables(context.Context, string) ([]*model.TableInfo, error) { - return nil, nil -} -func (_ glue_) GetSession(context.Context) (checkpoints.Session, error) { - return nil, nil -} -func (_ glue_) OpenCheckpointsDB(context.Context, *config.Config) (checkpoints.DB, error) { - return nil, nil -} - -// Record is used to report some information (key, value) to host TiDB, including progress, stage currently -func (_ glue_) Record(string, uint64) { - -} - -func makeLogger(tag string, engineUUID uuid.UUID) log.Logger { - obj := logutil.BgLogger().With( - zap.String("engineTag", tag), - zap.Stringer("engineUUID", engineUUID), - ) - return log.Logger{obj} -} - -func generateLightningConfig(info ClusterInfo) *config.Config { - cfg := config.Config{} - cfg.DefaultVarsForImporterAndLocalBackend() - name, err := ioutil.TempDir("/tmp/", "lightning") - if err != nil { - logutil.BgLogger().Warn(fmt.Sprintf("TempDir err:%s.", err.Error())) - name = "/tmp/lightning" - } - // cfg.TikvImporter.RangeConcurrency = 32 - cfg.Checkpoint.Enable = false - cfg.TikvImporter.SortedKVDir = name - cfg.TikvImporter.DuplicateResolution = config.DupeResAlgNone - cfg.TiDB.PdAddr = info.PdAddr - cfg.TiDB.Host = "127.0.0.1" - cfg.TiDB.StatusPort = int(info.Status) - return &cfg -} - -func createLocalBackend(ctx context.Context, info ClusterInfo) (backend.Backend, error) { - cfg := generateLightningConfig(info) - tls, err := cfg.ToTLS() - if err != nil { - return backend.Backend{}, err - } - var g glue_ - return local.NewLocalBackend(ctx, tls, cfg, &g, int(limit), nil) -} diff --git a/ddl/sst/glue/lightning_glue.go b/ddl/sst/glue/lightning_glue.go deleted file mode 100644 index 7dc475ad7cfbb..0000000000000 --- a/ddl/sst/glue/lightning_glue.go +++ /dev/null @@ -1,4 +0,0 @@ -package glue - -type engineGlue interface { -} diff --git a/ddl/sst/index.go b/ddl/sst/index.go deleted file mode 100644 index 77b2fa37743ac..0000000000000 --- a/ddl/sst/index.go +++ /dev/null @@ -1,234 +0,0 @@ -package sst - -import ( - "context" - "encoding/binary" - "flag" - "fmt" - "sync" - "sync/atomic" - - "github.com/pingcap/errors" - "github.com/twmb/murmur3" - - "github.com/pingcap/tidb/br/pkg/lightning/backend" - "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" - "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" - "github.com/pingcap/tidb/br/pkg/lightning/config" - tidbcfg "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/util/logutil" -) - -func LogDebug(format string, a ...interface{}) { - fmt.Printf("debug] %s", fmt.Sprintf(format, a...)) -} - -// pdaddr; tidb-host/status -type ClusterInfo struct { - PdAddr string - // TidbHost string - 127.0.0.1 - Port uint - Status uint -} - -type DDLInfo struct { - Schema string - Table *model.TableInfo - StartTs uint64 -} - -const ( - indexEngineID = -1 // same to restore.table_restore.go indexEngineID -) - -type engineInfo struct { - *backend.OpenedEngine - writer *backend.LocalEngineWriter - cfg *backend.EngineConfig - ref int32 -} - -func (ec *engineCache) put(startTs uint64, cfg *backend.EngineConfig, en *backend.OpenedEngine) { - ec.mtx.Lock() - ec.cache[startTs] = &engineInfo{ - en, - nil, - cfg, - 0, - } - ec.mtx.Unlock() - LogDebug("put %d", startTs) -} - -var ( - ErrNotFound = errors.New("not object in this cache") - ErrWasInUse = errors.New("this object was in used") - ec = engineCache{cache: map[uint64]*engineInfo{}} - cluster ClusterInfo - IndexDDLLightning = flag.Bool("ddl-mode", true, "index ddl use sst mode") -) - -func (ec *engineCache) getEngineInfo(startTs uint64) (*engineInfo, error) { - LogDebug("getEngineInfo by %d", startTs) - ec.mtx.RUnlock() - ei := ec.cache[startTs] - // `ref` or run by atomic ? - // if ei.ref { - // ei = nil - // } else { - // ei.ref = true - // } - ec.mtx.Unlock() - if false == atomic.CompareAndSwapInt32(&ei.ref, 0, 1) { - return nil, ErrWasInUse - } - return ei, nil -} - -func (ec *engineCache) releaseRef(startTs uint64) { - LogDebug("releaseRef by %d", startTs) - ec.mtx.RUnlock() - ei := ec.cache[startTs] - ec.mtx.Unlock() - atomic.CompareAndSwapInt32(&ei.ref, 1, 0) -} - -func (ec *engineCache) getWriter(startTs uint64) (*backend.LocalEngineWriter, error) { - LogDebug("getWriter by %d", startTs) - ei, err := ec.getEngineInfo(startTs) - if err != nil { - return nil, err - } - if ei.writer != nil { - return ei.writer, nil - } - ei.writer, err = ei.OpenedEngine.LocalWriter(context.TODO(), &backend.LocalWriterConfig{}) - if err != nil { - return nil, err - } - return ei.writer, nil -} - -type engineCache struct { - cache map[uint64]*engineInfo - mtx sync.RWMutex -} - -func init() { - cfg := tidbcfg.GetGlobalConfig() - cluster.PdAddr = cfg.AdvertiseAddress - cluster.Port = cfg.Port - cluster.Status = cfg.Status.StatusPort - LogDebug("InitOnce %+v", cluster) -} - -// TODO: 1. checkpoint?? -// TODO: 2. EngineID can use startTs for only. -func PrepareIndexOp(ctx context.Context, ddl DDLInfo) error { - LogDebug("PrepareIndexOp %+v", ddl) - info := cluster - be, err := createLocalBackend(ctx, info) - if err != nil { - return fmt.Errorf("PrepareIndexOp.createLocalBackend err:%w", err) - } - cpt := checkpoints.TidbTableInfo{ - genNextTblId(), - ddl.Schema, - ddl.Table.Name.String(), - ddl.Table, - } - var cfg backend.EngineConfig - cfg.TableInfo = &cpt - // - var b [8]byte - binary.BigEndian.PutUint64(b[:], ddl.StartTs) - h := murmur3.New32() - h.Write(b[:]) - en, err := be.OpenEngine(ctx, &cfg, ddl.Table.Name.String(), int32(h.Sum32())) - if err != nil { - return fmt.Errorf("PrepareIndexOp.OpenEngine err:%w", err) - } - ec.put(ddl.StartTs, &cfg, en) - return nil -} - -func IndexOperator(ctx context.Context, startTs uint64, kvp kv.KvPairs) error { - if kvp.Size() <= 0 { - return nil - } - lw, err := ec.getWriter(startTs) - if err != nil { - return fmt.Errorf("IndexOperator.getWriter err:%w", err) - } - defer ec.releaseRef(startTs) - err = lw.WriteRows(ctx, nil, &kvp) - if err != nil { - return fmt.Errorf("IndexOperator.WriteRows err:%w", err) - } - return nil -} - -// stop this routine by close(kvs) or some context error. -func RunIndexOpRoutine(ctx context.Context, engine *backend.OpenedEngine, kvs <-chan kv.KvPairs) error { - logutil.BgLogger().Info("createIndex-routine on dbname.tbl") - - running := true - for running { - select { - case <-ctx.Done(): - fmt.Errorf("RunIndexOpRoutine was exit by Context.Done") - case kvp, close := <-kvs: - if close { - running = false - break - } - err := process(ctx, engine, kvp) - if err != nil { - return fmt.Errorf("process err:%s.clean data.", err.Error()) - } - } - } - logutil.BgLogger().Info("createIndex-routine on dbname.tbl exit...") - return nil -} - -func FinishIndexOp(ctx context.Context, startTs uint64) error { - LogDebug("FinishIndexOp %d", startTs) - ei, err := ec.getEngineInfo(startTs) - if err != nil { - return err - } - defer ec.releaseRef(startTs) - indexEngine := ei.OpenedEngine - cfg := ei.cfg - // - closeEngine, err1 := indexEngine.Close(ctx, cfg) - if err1 != nil { - return fmt.Errorf("engine.Close err:%w", err1) - } - // use default value first; - err = closeEngine.Import(ctx, int64(config.SplitRegionSize)) - if err != nil { - return fmt.Errorf("engine.Import err:%w", err) - } - err = closeEngine.Cleanup(ctx) - if err != nil { - return fmt.Errorf("engine.Cleanup err:%w", err) - } - return nil -} - -func process(ctx context.Context, indexEngine *backend.OpenedEngine, kvp kv.KvPairs) error { - indexWriter, err := indexEngine.LocalWriter(ctx, &backend.LocalWriterConfig{}) - if err != nil { - return fmt.Errorf("LocalWriter err:%s", err.Error()) - } - // columnNames 可以不需要,因为我们肯定是 非 sorted 的数据. - err = indexWriter.WriteRows(ctx, nil, &kvp) - if err != nil { - indexWriter.Close(ctx) - return fmt.Errorf("WriteRows err:%s", err.Error()) - } - return nil -} diff --git a/table/tables/index.go b/table/tables/index.go index e52518423e219..07c68b2f8bd55 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -23,8 +23,6 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" - lkv "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" - "github.com/pingcap/tidb/ddl/sst" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -186,12 +184,7 @@ func (c *index) Create(sctx sessionctx.Context, txn kv.Transaction, indexedValue if err != nil { return nil, err } - // TODO: optimize index ddl - if *sst.IndexDDLLightning { - var kvp lkv.KvPairs - err = sst.IndexOperator(ctx, txn.StartTS(), kvp) - return nil, err - } + if !distinct || skipCheck || opt.Untouched { err = txn.GetMemBuffer().Set(key, idxVal) return nil, err From 6a912e709e07cba5318a13eebd52d4e9ebdb18db Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Thu, 30 Dec 2021 14:06:59 +0800 Subject: [PATCH 9/9] refind package names --- br/pkg/backup/client.go | 4 ++-- ddl/ddl_api.go | 8 ++++---- planner/core/physical_plans.go | 4 ++-- planner/core/planbuilder.go | 8 ++++---- server/http_handler.go | 4 ++-- table/tables/tables.go | 22 +++++++++++----------- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index 07242490739b6..d8cf30b5a63c6 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/status" "github.com/pingcap/tidb/br/pkg/conn" - util2 "github.com/pingcap/tidb/br/pkg/conn/util" + connutil "github.com/pingcap/tidb/br/pkg/conn/util" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" @@ -488,7 +488,7 @@ func (bc *Client) BackupRange( zap.Uint32("concurrency", req.Concurrency)) var allStores []*metapb.Store - allStores, err = conn.GetAllTiKVStoresWithRetry(ctx, bc.mgr.GetPDClient(), util2.SkipTiFlash) + allStores, err = conn.GetAllTiKVStoresWithRetry(ctx, bc.mgr.GetPDClient(), connutil.SkipTiFlash) if err != nil { return errors.Trace(err) } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 3cb5d61de3789..3e7ca5743c0e4 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -51,7 +51,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" - util2 "github.com/pingcap/tidb/table/tables/util" + tableutil "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" @@ -1637,7 +1637,7 @@ func buildTableInfo( if tbInfo.IsCommonHandle { // Ensure tblInfo's each non-unique secondary-index's len + primary-key's len <= MaxIndexLength for clustered index table. var pkLen, idxLen int - pkLen, err = indexColumnsLen(tbInfo.Columns, util2.FindPrimaryIndex(tbInfo).Columns) + pkLen, err = indexColumnsLen(tbInfo.Columns, tableutil.FindPrimaryIndex(tbInfo).Columns) if err != nil { return } @@ -4282,7 +4282,7 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol break } - pkIndex := util2.FindPrimaryIndex(tbInfo) + pkIndex := tableutil.FindPrimaryIndex(tbInfo) var clusteredPkLen int if tbInfo.IsCommonHandle { var err error @@ -5464,7 +5464,7 @@ func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde if !unique && tblInfo.IsCommonHandle { // Ensure new created non-unique secondary-index's len + primary-key's len <= MaxIndexLength in clustered index table. var pkLen, idxLen int - pkLen, err = indexColumnsLen(tblInfo.Columns, util2.FindPrimaryIndex(tblInfo).Columns) + pkLen, err = indexColumnsLen(tblInfo.Columns, tableutil.FindPrimaryIndex(tblInfo).Columns) if err != nil { return err } diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 35f5fde84b0a6..322c84c9c1bdb 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" - util2 "github.com/pingcap/tidb/table/tables/util" + tablesutil "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/stringutil" @@ -532,7 +532,7 @@ func (ts *PhysicalTableScan) IsPartition() (bool, int64) { func (ts *PhysicalTableScan) ResolveCorrelatedColumns() ([]*ranger.Range, error) { access := ts.AccessCondition if ts.Table.IsCommonHandle { - pkIdx := util2.FindPrimaryIndex(ts.Table) + pkIdx := tablesutil.FindPrimaryIndex(ts.Table) idxCols, idxColLens := expression.IndexInfo2PrefixCols(ts.Columns, ts.Schema().Columns, pkIdx) for _, cond := range access { newCond, err := expression.SubstituteCorCol2Constant(cond) diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 439e388a5d2a8..43b2a6f6ba3e5 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -48,7 +48,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" - util3 "github.com/pingcap/tidb/table/tables/util" + tablesutil "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" @@ -1524,7 +1524,7 @@ func tryGetCommonHandleCols(t table.Table, allColSchema *expression.Schema) ([]* if !tblInfo.IsCommonHandle { return nil, nil, false } - pk := util3.FindPrimaryIndex(tblInfo) + pk := tablesutil.FindPrimaryIndex(tblInfo) commonHandleCols, _ := expression.IndexInfo2Cols(tblInfo.Columns, allColSchema.Columns, pk) commonHandelColInfos := tables.TryGetCommonPkColumns(t) return commonHandelColInfos, commonHandleCols, true @@ -1738,7 +1738,7 @@ func BuildHandleColsForAnalyze(ctx sessionctx.Context, tblInfo *model.TableInfo, Index: index, }} case tblInfo.IsCommonHandle: - pkIdx := util3.FindPrimaryIndex(tblInfo) + pkIdx := tablesutil.FindPrimaryIndex(tblInfo) pkColLen := len(pkIdx.Columns) columns := make([]*expression.Column, pkColLen) for i := 0; i < pkColLen; i++ { @@ -3715,7 +3715,7 @@ func buildHandleColumnInfos(tblInfo *model.TableInfo) []*model.ColumnInfo { return []*model.ColumnInfo{col} } case tblInfo.IsCommonHandle: - pkIdx := util3.FindPrimaryIndex(tblInfo) + pkIdx := tablesutil.FindPrimaryIndex(tblInfo) pkCols := make([]*model.ColumnInfo, 0, len(pkIdx.Columns)) cols := tblInfo.Columns for _, idxCol := range pkIdx.Columns { diff --git a/server/http_handler.go b/server/http_handler.go index b940a9c0f8712..2836245c81d08 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -59,7 +59,7 @@ import ( "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" - util2 "github.com/pingcap/tidb/table/tables/util" + tablesutil "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" @@ -176,7 +176,7 @@ func (t *tikvHandlerTool) getHandle(tb table.PhysicalTable, params map[string]st handle = kv.IntHandle(intHandle) } else { tblInfo := tb.Meta() - pkIdx := util2.FindPrimaryIndex(tblInfo) + pkIdx := tablesutil.FindPrimaryIndex(tblInfo) if pkIdx == nil || !tblInfo.IsCommonHandle { return nil, errors.BadRequestf("Clustered common handle not found.") } diff --git a/table/tables/tables.go b/table/tables/tables.go index 0497485b70342..65d8d6608b636 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -43,8 +43,8 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" - context2 "github.com/pingcap/tidb/table/tables/context" - util2 "github.com/pingcap/tidb/table/tables/util" + tablecontext "github.com/pingcap/tidb/table/tables/context" + tablesutil "github.com/pingcap/tidb/table/tables/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" @@ -515,7 +515,7 @@ func adjustRowValuesBuf(writeBufs *variable.WriteStmtBufs, rowLen int) { // ClearAddRecordCtx remove `CommonAddRecordCtx` from session context func ClearAddRecordCtx(ctx sessionctx.Context) { - ctx.ClearValue(context2.AddRecordCtxKey) + ctx.ClearValue(tablecontext.AddRecordCtxKey) } // TryGetCommonPkColumnIds get the IDs of primary key column if the table has common handle. @@ -523,7 +523,7 @@ func TryGetCommonPkColumnIds(tbl *model.TableInfo) []int64 { if !tbl.IsCommonHandle { return nil } - pkIdx := util2.FindPrimaryIndex(tbl) + pkIdx := tablesutil.FindPrimaryIndex(tbl) pkColIds := make([]int64, 0, len(pkIdx.Columns)) for _, idxCol := range pkIdx.Columns { pkColIds = append(pkColIds, tbl.Columns[idxCol.Offset].ID) @@ -551,7 +551,7 @@ func TryGetCommonPkColumns(tbl table.Table) []*table.Column { if !tbl.Meta().IsCommonHandle { return nil } - pkIdx := util2.FindPrimaryIndex(tbl.Meta()) + pkIdx := tablesutil.FindPrimaryIndex(tbl.Meta()) cols := tbl.Cols() pkCols := make([]*table.Column, 0, len(pkIdx.Columns)) for _, idxCol := range pkIdx.Columns { @@ -637,7 +637,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . recordID = kv.IntHandle(r[tblInfo.GetPkColInfo().Offset].GetInt64()) hasRecordID = true } else if tblInfo.IsCommonHandle { - pkIdx := util2.FindPrimaryIndex(tblInfo) + pkIdx := tablesutil.FindPrimaryIndex(tblInfo) pkDts := make([]types.Datum, 0, len(pkIdx.Columns)) for _, idxCol := range pkIdx.Columns { pkDts = append(pkDts, r[idxCol.Offset]) @@ -676,7 +676,7 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . var colIDs, binlogColIDs []int64 var row, binlogRow []types.Datum - if recordCtx, ok := sctx.Value(context2.AddRecordCtxKey).(*context2.CommonAddRecordCtx); ok { + if recordCtx, ok := sctx.Value(tablecontext.AddRecordCtxKey).(*tablecontext.CommonAddRecordCtx); ok { colIDs = recordCtx.ColIDs[:0] row = recordCtx.Row[:0] } else { @@ -888,7 +888,7 @@ func RowWithCols(t table.Table, ctx sessionctx.Context, h kv.Handle, cols []*tab if err != nil { return nil, err } - v, _, err := util2.DecodeRawRowData(ctx, t.Meta(), h, cols, value) + v, _, err := tablesutil.DecodeRawRowData(ctx, t.Meta(), h, cols, value) if err != nil { return nil, err } @@ -1168,7 +1168,7 @@ func IterRecords(t table.Table, ctx sessionctx.Context, cols []*table.Column, data[col.Offset] = rowMap[col.ID] continue } - data[col.Offset], err = util2.GetColDefaultValue(ctx, col, defaultVals) + data[col.Offset], err = tablesutil.GetColDefaultValue(ctx, col, defaultVals) if err != nil { return err } @@ -1325,7 +1325,7 @@ func CanSkip(info *model.TableInfo, col *table.Column, value *types.Datum) bool return true } if col.IsCommonHandleColumn(info) { - pkIdx := util2.FindPrimaryIndex(info) + pkIdx := tablesutil.FindPrimaryIndex(info) for _, idxCol := range pkIdx.Columns { if info.Columns[idxCol.Offset].ID != col.ID { continue @@ -1591,7 +1591,7 @@ func TryGetHandleRestoredDataWrapper(t table.Table, row []types.Datum, rowMap ma return nil } rsData := make([]types.Datum, 0, 4) - pkIdx := util2.FindPrimaryIndex(t.Meta()) + pkIdx := tablesutil.FindPrimaryIndex(t.Meta()) for _, pkIdxCol := range pkIdx.Columns { pkCol := t.Meta().Columns[pkIdxCol.Offset] if !types.NeedRestoredData(&pkCol.FieldType) {