diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 5f2fc289a60d3..03a21be2a9311 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -1672,7 +1672,7 @@ func (local *local) ImportEngine(ctx context.Context, engineUUID uuid.UUID, regi needSplit = true }) for i := 0; i < maxRetryTimes; i++ { - err = local.SplitAndScatterRegionInBatches(ctx, unfinishedRanges, lf.tableInfo, needSplit, regionSplitSize, maxBatchSplitRanges) + err = local.SplitAndScatterRegionInBatches(ctx, unfinishedRanges, needSplit, maxBatchSplitRanges) if err == nil || common.IsContextCanceledError(err) { break } diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index cd8a27b9bda48..5c423f8c26cb6 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -31,7 +31,6 @@ import ( sst "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/logutil" @@ -67,9 +66,7 @@ var ( func (local *local) SplitAndScatterRegionInBatches( ctx context.Context, ranges []Range, - tableInfo *checkpoints.TidbTableInfo, needSplit bool, - regionSplitSize int64, batchCnt int, ) error { for i := 0; i < len(ranges); i += batchCnt { @@ -77,7 +74,7 @@ func (local *local) SplitAndScatterRegionInBatches( if len(batch) > batchCnt { batch = batch[:batchCnt] } - if err := local.SplitAndScatterRegionByRanges(ctx, batch, tableInfo, needSplit, regionSplitSize); err != nil { + if err := local.SplitAndScatterRegionByRanges(ctx, batch, needSplit); err != nil { return errors.Trace(err) } } @@ -91,18 +88,13 @@ func (local *local) SplitAndScatterRegionInBatches( func (local *local) SplitAndScatterRegionByRanges( ctx context.Context, ranges []Range, - tableInfo *checkpoints.TidbTableInfo, needSplit bool, - regionSplitSize int64, ) error { if len(ranges) == 0 { return nil } - db, err := local.g.GetDB() - if err != nil { - return errors.Trace(err) - } + var err error minKey := codec.EncodeBytes([]byte{}, ranges[0].start) maxKey := codec.EncodeBytes([]byte{}, ranges[len(ranges)-1].end) @@ -110,7 +102,6 @@ func (local *local) SplitAndScatterRegionByRanges( scatterRegions := make([]*split.RegionInfo, 0) var retryKeys [][]byte waitTime := splitRegionBaseBackOffTime - skippedKeys := 0 for i := 0; i < splitRetryTimes; i++ { log.FromContext(ctx).Info("split and scatter region", logutil.Key("minKey", minKey), @@ -172,16 +163,6 @@ func (local *local) SplitAndScatterRegionByRanges( return nil } - var tableRegionStats map[uint64]int64 - if tableInfo != nil { - tableRegionStats, err = fetchTableRegionSizeStats(ctx, db, tableInfo.ID) - if err != nil { - log.FromContext(ctx).Warn("fetch table region size statistics failed", - zap.String("table", tableInfo.Name), zap.Error(err)) - tableRegionStats, err = make(map[uint64]int64), nil - } - } - regionMap := make(map[uint64]*split.RegionInfo) for _, region := range regions { regionMap[region.Region.GetId()] = region @@ -291,15 +272,6 @@ func (local *local) SplitAndScatterRegionByRanges( } sendLoop: for regionID, keys := range splitKeyMap { - // if region not in tableRegionStats, that means this region is newly split, so - // we can skip split it again. - regionSize, ok := tableRegionStats[regionID] - if !ok { - log.FromContext(ctx).Warn("region stats not found", zap.Uint64("region", regionID)) - } - if len(keys) == 1 && regionSize < regionSplitSize { - skippedKeys++ - } select { case ch <- &splitInfo{region: regionMap[regionID], keys: keys}: case <-ctx.Done(): @@ -335,11 +307,9 @@ func (local *local) SplitAndScatterRegionByRanges( scatterCount, err := local.waitForScatterRegions(ctx, scatterRegions) if scatterCount == len(scatterRegions) { log.FromContext(ctx).Info("waiting for scattering regions done", - zap.Int("skipped_keys", skippedKeys), zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) } else { log.FromContext(ctx).Info("waiting for scattering regions timeout", - zap.Int("skipped_keys", skippedKeys), zap.Int("scatterCount", scatterCount), zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime)), diff --git a/br/pkg/lightning/backend/local/localhelper_test.go b/br/pkg/lightning/backend/local/localhelper_test.go index dccda6716d3d0..b233437685950 100644 --- a/br/pkg/lightning/backend/local/localhelper_test.go +++ b/br/pkg/lightning/backend/local/localhelper_test.go @@ -487,7 +487,7 @@ func doTestBatchSplitRegionByRanges(ctx context.Context, t *testing.T, hook clie split.ScanRegionAttemptTimes = backup }() } - err = local.SplitAndScatterRegionByRanges(ctx, ranges, nil, true, 1000) + err = local.SplitAndScatterRegionByRanges(ctx, ranges, true) if len(errPat) == 0 { require.NoError(t, err) } else { @@ -650,7 +650,7 @@ func TestSplitAndScatterRegionInBatches(t *testing.T) { }) } - err := local.SplitAndScatterRegionInBatches(ctx, ranges, nil, true, 1000, 4) + err := local.SplitAndScatterRegionInBatches(ctx, ranges, true, 4) require.NoError(t, err) rangeStart := codec.EncodeBytes([]byte{}, []byte("a")) @@ -747,7 +747,7 @@ func doTestBatchSplitByRangesWithClusteredIndex(t *testing.T, hook clientHook) { start = e } - err := local.SplitAndScatterRegionByRanges(ctx, ranges, nil, true, 1000) + err := local.SplitAndScatterRegionByRanges(ctx, ranges, true) require.NoError(t, err) startKey := codec.EncodeBytes([]byte{}, rangeKeys[0])