From b1138311741e1a4058ce94c1072d355679ad9359 Mon Sep 17 00:00:00 2001 From: you06 Date: Wed, 28 Jun 2023 15:54:50 +0800 Subject: [PATCH] reload region cache when store is resolved from invalid status (#843) (#846) * reload region cache when store is resolved from invalid Signed-off-by: you06 * reload region once Signed-off-by: you06 * build in go1.18 Signed-off-by: you06 * build in go1.18 Signed-off-by: you06 * handle region reload in resolve goroutine Signed-off-by: you06 * retest Signed-off-by: you06 * fix data race (#736) Signed-off-by: Smityz Co-authored-by: disksing Signed-off-by: you06 * build with go 1.18 Signed-off-by: you06 * fix integration test (#673) Signed-off-by: disksing Signed-off-by: you06 * Update internal/locate/region_cache.go Co-authored-by: crazycs Signed-off-by: you06 * address comment Signed-off-by: you06 * address comment Signed-off-by: you06 --------- Signed-off-by: you06 Signed-off-by: Smityz Signed-off-by: disksing Co-authored-by: Smilencer Co-authored-by: disksing Co-authored-by: crazycs --- error/error.go | 2 +- integration_tests/store_test.go | 3 + internal/locate/region_cache.go | 97 +++++++++++++++++++++---- internal/locate/region_cache_test.go | 14 ++-- internal/locate/region_request.go | 12 ++- internal/locate/region_request3_test.go | 36 +++++---- internal/locate/region_request_test.go | 4 +- 7 files changed, 127 insertions(+), 41 deletions(-) diff --git a/error/error.go b/error/error.go index 82a8c3d8d..ff6754da2 100644 --- a/error/error.go +++ b/error/error.go @@ -244,7 +244,7 @@ type ErrAssertionFailed struct { *kvrpcpb.AssertionFailed } -// ErrLockOnlyIfExistsNoReturnValue is used when the flag `LockOnlyIfExists` of `LockCtx` is set, but `ReturnValues`` is not. +// ErrLockOnlyIfExistsNoReturnValue is used when the flag `LockOnlyIfExists` of `LockCtx` is set, but `ReturnValues` is not. type ErrLockOnlyIfExistsNoReturnValue struct { StartTS uint64 ForUpdateTs uint64 diff --git a/integration_tests/store_test.go b/integration_tests/store_test.go index 44b737a90..8aa7b1aa3 100644 --- a/integration_tests/store_test.go +++ b/integration_tests/store_test.go @@ -118,6 +118,9 @@ type checkRequestClient struct { func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { resp, err := c.Client.SendRequest(ctx, addr, req, timeout) + if err != nil { + return resp, err + } if c.priority != req.Priority { if resp.Resp != nil { if getResp, ok := resp.Resp.(*kvrpcpb.GetResponse); ok { diff --git a/internal/locate/region_cache.go b/internal/locate/region_cache.go index 76a71cdfb..8482c81d0 100644 --- a/internal/locate/region_cache.go +++ b/internal/locate/region_cache.go @@ -126,6 +126,7 @@ type Region struct { syncFlag int32 // region need be sync in next turn lastAccess int64 // last region access time, see checkRegionCacheTTL invalidReason InvalidReason // the reason why the region is invalidated + asyncReload int32 // the region need to be reloaded in async mode } // AccessIndex represent the index for accessIndex array @@ -363,6 +364,8 @@ func (r *Region) isValid() bool { return r != nil && !r.checkNeedReload() && r.checkRegionCacheTTL(time.Now().Unix()) } +type livenessFunc func(s *Store, bo *retry.Backoffer) livenessState + // RegionCache caches Regions loaded from PD. // All public methods of this struct should be thread-safe, unless explicitly pointed out or the method is for testing // purposes only. @@ -395,7 +398,12 @@ type RegionCache struct { testingKnobs struct { // Replace the requestLiveness function for test purpose. Note that in unit tests, if this is not set, // requestLiveness always returns unreachable. - mockRequestLiveness func(s *Store, bo *retry.Backoffer) livenessState + mockRequestLiveness atomic.Value + } + + regionsNeedReload struct { + sync.Mutex + regions []uint64 } } @@ -447,8 +455,13 @@ func (c *RegionCache) Close() { // asyncCheckAndResolveLoop with func (c *RegionCache) asyncCheckAndResolveLoop(interval time.Duration) { ticker := time.NewTicker(interval) - defer ticker.Stop() + reloadRegionTicker := time.NewTicker(10 * time.Second) + defer func() { + ticker.Stop() + reloadRegionTicker.Stop() + }() var needCheckStores []*Store + reloadNextLoop := make(map[uint64]struct{}) for { needCheckStores = needCheckStores[:0] select { @@ -466,6 +479,22 @@ func (c *RegionCache) asyncCheckAndResolveLoop(interval time.Duration) { // there's a deleted store in the stores map which guaranteed by reReslve(). return state != unresolved && state != tombstone && state != deleted }) + + case <-reloadRegionTicker.C: + for regionID := range reloadNextLoop { + c.reloadRegion(regionID) + delete(reloadNextLoop, regionID) + } + c.regionsNeedReload.Lock() + for _, regionID := range c.regionsNeedReload.regions { + // will reload in next tick, wait a while for two reasons: + // 1. there may an unavailable duration while recreating the connection. + // 2. the store may just be started, and wait safe ts synced to avoid the + // possible dataIsNotReady error. + reloadNextLoop[regionID] = struct{}{} + } + c.regionsNeedReload.regions = c.regionsNeedReload.regions[:0] + c.regionsNeedReload.Unlock() } } } @@ -967,7 +996,7 @@ func (c *RegionCache) findRegionByKey(bo *retry.Backoffer, key []byte, isEndKey logutil.Eventf(bo.GetCtx(), "load region %d from pd, due to cache-miss", lr.GetID()) r = lr c.mu.Lock() - c.insertRegionToCache(r) + c.insertRegionToCache(r, true) c.mu.Unlock() } else if r.checkNeedReloadAndMarkUpdated() { // load region when it be marked as need reload. @@ -980,7 +1009,7 @@ func (c *RegionCache) findRegionByKey(bo *retry.Backoffer, key []byte, isEndKey logutil.Eventf(bo.GetCtx(), "load region %d from pd, due to need-reload", lr.GetID()) r = lr c.mu.Lock() - c.insertRegionToCache(r) + c.insertRegionToCache(r, true) c.mu.Unlock() } } @@ -1113,7 +1142,7 @@ func (c *RegionCache) LocateRegionByID(bo *retry.Backoffer, regionID uint64) (*K } else { r = lr c.mu.Lock() - c.insertRegionToCache(r) + c.insertRegionToCache(r, true) c.mu.Unlock() } } @@ -1132,7 +1161,7 @@ func (c *RegionCache) LocateRegionByID(bo *retry.Backoffer, regionID uint64) (*K } c.mu.Lock() - c.insertRegionToCache(r) + c.insertRegionToCache(r, true) c.mu.Unlock() return &KeyLocation{ Region: r.VerID(), @@ -1142,6 +1171,36 @@ func (c *RegionCache) LocateRegionByID(bo *retry.Backoffer, regionID uint64) (*K }, nil } +func (c *RegionCache) scheduleReloadRegion(region *Region) { + if region == nil || !atomic.CompareAndSwapInt32(®ion.asyncReload, 0, 1) { + // async reload triggered by other thread. + return + } + regionID := region.GetID() + if regionID > 0 { + c.regionsNeedReload.Lock() + c.regionsNeedReload.regions = append(c.regionsNeedReload.regions, regionID) + c.regionsNeedReload.Unlock() + } +} + +func (c *RegionCache) reloadRegion(regionID uint64) { + bo := retry.NewNoopBackoff(c.ctx) + lr, err := c.loadRegionByID(bo, regionID) + if err != nil { + // ignore error and use old region info. + logutil.Logger(bo.GetCtx()).Error("load region failure", + zap.Uint64("regionID", regionID), zap.Error(err)) + if oldRegion := c.getRegionByIDFromCache(regionID); oldRegion != nil { + atomic.StoreInt32(&oldRegion.asyncReload, 0) + } + return + } + c.mu.Lock() + c.insertRegionToCache(lr, false) + c.mu.Unlock() +} + // GroupKeysByRegion separates keys into groups by their belonging Regions. // Specially it also returns the first key's region which may be used as the // 'PrimaryLockKey' and should be committed ahead of others. @@ -1226,7 +1285,7 @@ func (c *RegionCache) BatchLoadRegionsWithKeyRange(bo *retry.Backoffer, startKey // TODO(youjiali1995): scanRegions always fetch regions from PD and these regions don't contain buckets information // for less traffic, so newly inserted regions in region cache don't have buckets information. We should improve it. for _, region := range regions { - c.insertRegionToCache(region) + c.insertRegionToCache(region, true) } return @@ -1300,7 +1359,7 @@ func (c *RegionCache) removeVersionFromCache(oldVer RegionVerID, regionID uint64 // insertRegionToCache tries to insert the Region to cache. // It should be protected by c.mu.Lock(). -func (c *RegionCache) insertRegionToCache(cachedRegion *Region) { +func (c *RegionCache) insertRegionToCache(cachedRegion *Region, invalidateOldRegion bool) { oldRegion := c.mu.sorted.ReplaceOrInsert(cachedRegion) if oldRegion != nil { store := cachedRegion.getStore() @@ -1315,8 +1374,11 @@ func (c *RegionCache) insertRegionToCache(cachedRegion *Region) { if InvalidReason(atomic.LoadInt32((*int32)(&oldRegion.invalidReason))) == NoLeader { store.workTiKVIdx = (oldRegionStore.workTiKVIdx + 1) % AccessIndex(store.accessStoreNum(tiKVOnly)) } - // Invalidate the old region in case it's not invalidated and some requests try with the stale region information. - oldRegion.invalidate(Other) + // If the region info is async reloaded, the old region is still valid. + if invalidateOldRegion { + // Invalidate the old region in case it's not invalidated and some requests try with the stale region information. + oldRegion.invalidate(Other) + } // Don't refresh TiFlash work idx for region. Otherwise, it will always goto a invalid store which // is under transferring regions. store.workTiFlashIdx.Store(oldRegionStore.workTiFlashIdx.Load()) @@ -1804,7 +1866,7 @@ func (c *RegionCache) OnRegionEpochNotMatch(bo *retry.Backoffer, ctx *RPCContext c.mu.Lock() for _, region := range newRegions { - c.insertRegionToCache(region) + c.insertRegionToCache(region, true) } c.mu.Unlock() @@ -1918,7 +1980,7 @@ func (c *RegionCache) UpdateBucketsIfNeeded(regionID RegionVerID, latestBucketsV return } c.mu.Lock() - c.insertRegionToCache(new) + c.insertRegionToCache(new, true) c.mu.Unlock() }() } @@ -2371,9 +2433,8 @@ func (s *Store) reResolve(c *RegionCache) (bool, error) { } func (s *Store) getResolveState() resolveState { - var state resolveState if s == nil { - return state + return unresolved } return resolveState(atomic.LoadUint64(&s.state)) } @@ -2544,8 +2605,12 @@ func (s *Store) requestLiveness(bo *retry.Backoffer, c *RegionCache) (l liveness return unknown } } - if c != nil && c.testingKnobs.mockRequestLiveness != nil { - return c.testingKnobs.mockRequestLiveness(s, bo) + + if c != nil { + lf := c.testingKnobs.mockRequestLiveness.Load() + if lf != nil { + return (*lf.(*livenessFunc))(s, bo) + } } if storeLivenessTimeout == 0 { diff --git a/internal/locate/region_cache_test.go b/internal/locate/region_cache_test.go index 423b8ca5b..caefb2ae5 100644 --- a/internal/locate/region_cache_test.go +++ b/internal/locate/region_cache_test.go @@ -964,7 +964,7 @@ func (s *testRegionCacheSuite) TestRegionEpochAheadOfTiKV() { region := createSampleRegion([]byte("k1"), []byte("k2")) region.meta.Id = 1 region.meta.RegionEpoch = &metapb.RegionEpoch{Version: 10, ConfVer: 10} - cache.insertRegionToCache(region) + cache.insertRegionToCache(region, true) r1 := metapb.Region{Id: 1, RegionEpoch: &metapb.RegionEpoch{Version: 9, ConfVer: 10}} r2 := metapb.Region{Id: 1, RegionEpoch: &metapb.RegionEpoch{Version: 10, ConfVer: 9}} @@ -1255,7 +1255,7 @@ func (s *testRegionCacheSuite) TestPeersLenChange() { filterUnavailablePeers(cpRegion) region, err := newRegion(s.bo, s.cache, cpRegion) s.Nil(err) - s.cache.insertRegionToCache(region) + s.cache.insertRegionToCache(region, true) // OnSendFail should not panic s.cache.OnSendFail(retry.NewNoopBackoff(context.Background()), ctx, false, errors.New("send fail")) @@ -1428,12 +1428,12 @@ func (s *testRegionCacheSuite) TestBuckets() { fakeRegion.setStore(cachedRegion.getStore().clone()) // no buckets fakeRegion.getStore().buckets = nil - s.cache.insertRegionToCache(fakeRegion) + s.cache.insertRegionToCache(fakeRegion, true) cachedRegion = s.getRegion([]byte("a")) s.Equal(defaultBuckets, cachedRegion.getStore().buckets) // stale buckets fakeRegion.getStore().buckets = &metapb.Buckets{Version: defaultBuckets.Version - 1} - s.cache.insertRegionToCache(fakeRegion) + s.cache.insertRegionToCache(fakeRegion, true) cachedRegion = s.getRegion([]byte("a")) s.Equal(defaultBuckets, cachedRegion.getStore().buckets) // new buckets @@ -1443,7 +1443,7 @@ func (s *testRegionCacheSuite) TestBuckets() { Keys: buckets.Keys, } fakeRegion.getStore().buckets = newBuckets - s.cache.insertRegionToCache(fakeRegion) + s.cache.insertRegionToCache(fakeRegion, true) cachedRegion = s.getRegion([]byte("a")) s.Equal(newBuckets, cachedRegion.getStore().buckets) @@ -1576,7 +1576,7 @@ func (s *testRegionCacheSuite) TestRemoveIntersectingRegions() { region, err := s.cache.loadRegion(s.bo, []byte("c"), false) s.Nil(err) s.Equal(region.GetID(), regions[0]) - s.cache.insertRegionToCache(region) + s.cache.insertRegionToCache(region, true) loc, err = s.cache.LocateKey(s.bo, []byte{'c'}) s.Nil(err) s.Equal(loc.Region.GetID(), regions[0]) @@ -1587,7 +1587,7 @@ func (s *testRegionCacheSuite) TestRemoveIntersectingRegions() { region, err = s.cache.loadRegion(s.bo, []byte("e"), false) s.Nil(err) s.Equal(region.GetID(), regions[0]) - s.cache.insertRegionToCache(region) + s.cache.insertRegionToCache(region, true) loc, err = s.cache.LocateKey(s.bo, []byte{'e'}) s.Nil(err) s.Equal(loc.Region.GetID(), regions[0]) diff --git a/internal/locate/region_request.go b/internal/locate/region_request.go index 2479b296c..7664bd4f8 100644 --- a/internal/locate/region_request.go +++ b/internal/locate/region_request.go @@ -551,13 +551,23 @@ func (state *accessFollower) next(bo *retry.Backoffer, selector *replicaSelector state.lastIdx++ } + reloadRegion := false for i := 0; i < len(selector.replicas) && !state.option.leaderOnly; i++ { idx := AccessIndex((int(state.lastIdx) + i) % len(selector.replicas)) - if state.isCandidate(idx, selector.replicas[idx]) { + selectReplica := selector.replicas[idx] + if state.isCandidate(idx, selectReplica) { state.lastIdx = idx selector.targetIdx = idx break } + if selectReplica.isEpochStale() && + selectReplica.store.getResolveState() == resolved && + selectReplica.store.getLivenessState() == reachable { + reloadRegion = true + } + } + if reloadRegion { + selector.regionCache.scheduleReloadRegion(selector.region) } // If there is no candidate, fallback to the leader. if selector.targetIdx < 0 { diff --git a/internal/locate/region_request3_test.go b/internal/locate/region_request3_test.go index 34cd7e856..752e30dac 100644 --- a/internal/locate/region_request3_test.go +++ b/internal/locate/region_request3_test.go @@ -164,12 +164,13 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding() { return innerClient.SendRequest(ctx, addr, req, timeout) }} var storeState = uint32(unreachable) - s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { + tf := func(s *Store, bo *retry.Backoffer) livenessState { if s.addr == leaderAddr { return livenessState(atomic.LoadUint32(&storeState)) } return reachable } + s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness.Store((*livenessFunc)(&tf)) loc, err := s.regionRequestSender.regionCache.LocateKey(bo, []byte("k")) s.Nil(err) @@ -298,21 +299,21 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { region = &Region{ meta: region.GetMeta(), } - region.lastAccess = time.Now().Unix() + atomic.StoreInt64(®ion.lastAccess, time.Now().Unix()) region.meta.Peers = append(region.meta.Peers, peer) atomic.StorePointer(®ion.store, unsafe.Pointer(regionStore)) cache := NewRegionCache(s.cache.pdClient) defer cache.Close() cache.mu.Lock() - cache.insertRegionToCache(region) + cache.insertRegionToCache(region, true) cache.mu.Unlock() // Verify creating the replicaSelector. replicaSelector, err := newReplicaSelector(cache, regionLoc.Region, req) s.NotNil(replicaSelector) s.Nil(err) - s.Equal(replicaSelector.region, region) + s.Equal(replicaSelector.region, region, true) // Should only contain TiKV stores. s.Equal(len(replicaSelector.replicas), regionStore.accessStoreNum(tiKVOnly)) s.Equal(len(replicaSelector.replicas), len(regionStore.stores)-1) @@ -373,13 +374,14 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { s.False(replicaSelector.region.isValid()) // Test switching to tryFollower if leader is unreachable - region.lastAccess = time.Now().Unix() + atomic.StoreInt64(®ion.lastAccess, time.Now().Unix()) replicaSelector, err = newReplicaSelector(cache, regionLoc.Region, req) s.Nil(err) s.NotNil(replicaSelector) - cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { + tf := func(s *Store, bo *retry.Backoffer) livenessState { return unreachable } + cache.testingKnobs.mockRequestLiveness.Store((*livenessFunc)(&tf)) s.IsType(&accessKnownLeader{}, replicaSelector.state) _, err = replicaSelector.next(s.bo) s.Nil(err) @@ -415,9 +417,11 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { // Do not try to use proxy if livenessState is unknown instead of unreachable. refreshEpochs(regionStore) cache.enableForwarding = true - cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { + tf = func(s *Store, bo *retry.Backoffer) livenessState { return unknown } + cache.testingKnobs.mockRequestLiveness.Store( + (*livenessFunc)(&tf)) replicaSelector, err = newReplicaSelector(cache, regionLoc.Region, req) s.Nil(err) s.NotNil(replicaSelector) @@ -439,9 +443,10 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { replicaSelector, err = newReplicaSelector(cache, regionLoc.Region, req) s.Nil(err) s.NotNil(replicaSelector) - cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { + tf = func(s *Store, bo *retry.Backoffer) livenessState { return unreachable } + cache.testingKnobs.mockRequestLiveness.Store((*livenessFunc)(&tf)) s.Eventually(func() bool { return regionStore.stores[regionStore.workTiKVIdx].getLivenessState() == unreachable }, 3*time.Second, 200*time.Millisecond) @@ -558,7 +563,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { assertRPCCtxEqual(rpcCtx, replicaSelector.replicas[regionStore.workTiKVIdx], nil) // Test accessFollower state filtering label-not-match stores. - region.lastAccess = time.Now().Unix() + atomic.StoreInt64(®ion.lastAccess, time.Now().Unix()) refreshEpochs(regionStore) labels := []*metapb.StoreLabel{ { @@ -580,7 +585,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { } // Test accessFollower state with leaderOnly option - region.lastAccess = time.Now().Unix() + atomic.StoreInt64(®ion.lastAccess, time.Now().Unix()) refreshEpochs(regionStore) for i := 0; i < 5; i++ { replicaSelector, err = newReplicaSelector(cache, regionLoc.Region, req, WithLeaderOnly()) @@ -593,7 +598,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { } // Test accessFollower state with kv.ReplicaReadMixed request type. - region.lastAccess = time.Now().Unix() + atomic.StoreInt64(®ion.lastAccess, time.Now().Unix()) refreshEpochs(regionStore) req.ReplicaReadType = kv.ReplicaReadMixed replicaSelector, err = newReplicaSelector(cache, regionLoc.Region, req) @@ -601,7 +606,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector() { s.Nil(err) // Invalidate the region if the leader is not in the region. - region.lastAccess = time.Now().Unix() + atomic.StoreInt64(®ion.lastAccess, time.Now().Unix()) replicaSelector.updateLeader(&metapb.Peer{Id: s.cluster.AllocID(), StoreId: s.cluster.AllocID()}) s.False(region.isValid()) // Don't try next replica if the region is invalidated. @@ -695,9 +700,11 @@ func (s *testRegionRequestToThreeStoresSuite) TestSendReqWithReplicaSelector() { s.cluster.ChangeLeader(s.regionID, s.peerIDs[0]) // The leader store is alive but can't provide service. - s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { + + tf := func(s *Store, bo *retry.Backoffer) livenessState { return reachable } + s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness.Store((*livenessFunc)(&tf)) s.Eventually(func() bool { stores := s.regionRequestSender.replicaSelector.regionStore.stores return stores[0].getLivenessState() == reachable && @@ -823,9 +830,10 @@ func (s *testRegionRequestToThreeStoresSuite) TestSendReqWithReplicaSelector() { } // Runs out of all replicas and then returns a send error. - s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { + tf = func(s *Store, bo *retry.Backoffer) livenessState { return unreachable } + s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness.Store((*livenessFunc)(&tf)) reloadRegion() for _, store := range s.storeIDs { s.cluster.StopStore(store) diff --git a/internal/locate/region_request_test.go b/internal/locate/region_request_test.go index 8692acc70..8023ba554 100644 --- a/internal/locate/region_request_test.go +++ b/internal/locate/region_request_test.go @@ -574,7 +574,7 @@ func (s *testRegionRequestToSingleStoreSuite) TestGetRegionByIDFromCache() { v2 := region.Region.confVer + 1 r2 := metapb.Region{Id: region.Region.id, RegionEpoch: &metapb.RegionEpoch{Version: region.Region.ver, ConfVer: v2}, StartKey: []byte{1}} st := &Store{storeID: s.store} - s.cache.insertRegionToCache(&Region{meta: &r2, store: unsafe.Pointer(st), lastAccess: time.Now().Unix()}) + s.cache.insertRegionToCache(&Region{meta: &r2, store: unsafe.Pointer(st), lastAccess: time.Now().Unix()}, true) region, err = s.cache.LocateRegionByID(s.bo, s.region) s.Nil(err) s.NotNil(region) @@ -584,7 +584,7 @@ func (s *testRegionRequestToSingleStoreSuite) TestGetRegionByIDFromCache() { v3 := region.Region.confVer + 1 r3 := metapb.Region{Id: region.Region.id, RegionEpoch: &metapb.RegionEpoch{Version: v3, ConfVer: region.Region.confVer}, StartKey: []byte{2}} st = &Store{storeID: s.store} - s.cache.insertRegionToCache(&Region{meta: &r3, store: unsafe.Pointer(st), lastAccess: time.Now().Unix()}) + s.cache.insertRegionToCache(&Region{meta: &r3, store: unsafe.Pointer(st), lastAccess: time.Now().Unix()}, true) region, err = s.cache.LocateRegionByID(s.bo, s.region) s.Nil(err) s.NotNil(region)