Skip to content

Commit

Permalink
schedule: fix panic caused by removing tombstone (#2015) (#2038)
Browse files Browse the repository at this point in the history
Signed-off-by: Ryan Leung <rleungx@gmail.com>
  • Loading branch information
rleungx authored and disksing committed Dec 19, 2019
1 parent 175f710 commit d9c97ed
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 0 deletions.
1 change: 1 addition & 0 deletions server/schedule/operator_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -773,5 +773,6 @@ func (oc *OperatorController) GetAllStoresLimit() map[uint64]float64 {
func (oc *OperatorController) RemoveStoreLimit(storeID uint64) {
oc.Lock()
defer oc.Unlock()
oc.cluster.AttachOverloadStatus(storeID, nil)
delete(oc.storesLimit, storeID)
}
50 changes: 50 additions & 0 deletions server/schedule/operator_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package schedule

import (
"container/heap"
"fmt"
"sync"
"time"

Expand All @@ -26,6 +27,7 @@ import (
"github.com/pingcap/pd/pkg/mock/mockhbstream"
"github.com/pingcap/pd/pkg/mock/mockoption"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
)

var _ = Suite(&testOperatorControllerSuite{})
Expand Down Expand Up @@ -203,3 +205,51 @@ func (t *testOperatorControllerSuite) TestPollDispatchRegion(c *C) {
c.Assert(r, IsNil)
c.Assert(next, IsFalse)
}

func (t *testOperatorControllerSuite) TestRemoveTombstone(c *C) {
var mu sync.RWMutex
cfg := mockoption.NewScheduleOptions()
cfg.StoreBalanceRate = 1000
cfg.LocationLabels = []string{"zone", "rack"}
tc := mockcluster.NewCluster(cfg)
rc := NewReplicaChecker(tc, namespace.DefaultClassifier)
oc := NewOperatorController(tc, mockhbstream.NewHeartbeatStream())

tc.AddLabelsStore(1, 100, map[string]string{"zone": "zone1", "rack": "rack1"})
tc.AddLabelsStore(2, 100, map[string]string{"zone": "zone1", "rack": "rack1"})
tc.AddLabelsStore(3, 100, map[string]string{"zone": "zone2", "rack": "rack1"})
tc.AddLabelsStore(4, 10, map[string]string{"zone": "zone3", "rack": "rack1"})
peers := []*metapb.Peer{
{Id: 4, StoreId: 1},
{Id: 5, StoreId: 2},
{Id: 6, StoreId: 3},
}
regions := make([]*core.RegionInfo, 100)
for i := 2; i < 20; i++ {
r := core.NewRegionInfo(&metapb.Region{
Id: uint64(i),
StartKey: []byte(fmt.Sprintf("%20d", i)),
EndKey: []byte(fmt.Sprintf("%20d", i+1)),
Peers: peers}, peers[0], core.SetApproximateSize(50*(1<<20)))
regions[i] = r
tc.PutRegion(r)
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(100 * time.Millisecond)
mu.Lock()
defer mu.Unlock()
oc.RemoveStoreLimit(4)
}()
for i := 2; i < 20; i++ {
time.Sleep(10 * time.Millisecond)
mu.Lock()
op := rc.Check(regions[i])
mu.Unlock()
oc.AddOperator(op)
oc.RemoveOperator(op)
}
wg.Wait()
}

0 comments on commit d9c97ed

Please sign in to comment.