Skip to content

Commit

Permalink
*: check if GetStore returns nil (tikv#4347) (tikv#4378)
Browse files Browse the repository at this point in the history
* This is an automated cherry-pick of tikv#4347

Signed-off-by: ti-chi-bot <ti-community-prow-bot@tidb.io>

* fix conflicts

Signed-off-by: disksing <i@disksing.com>
  • Loading branch information
ti-chi-bot authored Nov 30, 2021
1 parent b2c3de6 commit 3a23716
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 2 deletions.
9 changes: 9 additions & 0 deletions server/schedule/region_scatterer.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,9 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string) *
// Group peers by the engine of their stores
for _, peer := range region.GetPeers() {
store := r.cluster.GetStore(peer.GetStoreId())
if store == nil {
return nil
}
if ordinaryFilter.Target(r.cluster, store) {
ordinaryPeers[peer.GetId()] = peer
} else {
Expand Down Expand Up @@ -415,6 +418,9 @@ func (r *RegionScatterer) selectAvailableLeaderStores(group string, peers map[ui
leaderCandidateStores := make([]uint64, 0)
for storeID := range peers {
store := r.cluster.GetStore(storeID)
if store == nil {
return 0
}
engine := store.GetLabelValue(filter.EngineKey)
if len(engine) < 1 {
leaderCandidateStores = append(leaderCandidateStores, storeID)
Expand All @@ -439,6 +445,9 @@ func (r *RegionScatterer) Put(peers map[uint64]*metapb.Peer, leaderStoreID uint6
for _, peer := range peers {
storeID := peer.GetStoreId()
store := r.cluster.GetStore(storeID)
if store == nil {
continue
}
if ordinaryFilter.Target(r.cluster, store) {
r.ordinaryEngine.selectedPeer.Put(storeID, group)
scatterDistributionCounter.WithLabelValues(
Expand Down
6 changes: 5 additions & 1 deletion server/schedulers/shuffle_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,11 +159,15 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster opt.Cluster) (*core.
}

func (s *shuffleRegionScheduler) scheduleAddPeer(cluster opt.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer {
store := cluster.GetStore(oldPeer.GetStoreId())
if store == nil {
return nil
}
var scoreGuard filter.Filter
if cluster.IsPlacementRulesEnabled() {
scoreGuard = filter.NewRuleFitFilter(s.GetName(), cluster, region, oldPeer.GetStoreId())
} else {
scoreGuard = filter.NewDistinctScoreFilter(s.GetName(), cluster.GetLocationLabels(), cluster.GetRegionStores(region), cluster.GetStore(oldPeer.GetStoreId()))
scoreGuard = filter.NewDistinctScoreFilter(s.GetName(), cluster.GetLocationLabels(), cluster.GetRegionStores(region), store)
}
excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds())

Expand Down
2 changes: 1 addition & 1 deletion server/statistics/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ func (s *StoresStats) GetStoresKeysReadStat() map[uint64]float64 {

func (s *StoresStats) storeIsUnhealthy(cluster core.StoreSetInformer, storeID uint64) bool {
store := cluster.GetStore(storeID)
return store.IsTombstone() || store.IsUnhealth()
return store == nil || store.IsTombstone() || store.IsUnhealth()
}

// FilterUnhealthyStore filter unhealthy store
Expand Down

0 comments on commit 3a23716

Please sign in to comment.