diff --git a/pkg/schedule/checker/merge_checker_test.go b/pkg/schedule/checker/merge_checker_test.go index 2ce871f65de..28dd203d502 100644 --- a/pkg/schedule/checker/merge_checker_test.go +++ b/pkg/schedule/checker/merge_checker_test.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/goleak" @@ -251,7 +252,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { // partial store overlap not including leader ops := suite.mc.Check(suite.regions[2]) suite.NotNil(ops) - testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 2}, @@ -265,7 +266,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { IsPassive: false, }, }) - testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), @@ -285,7 +286,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { suite.regions[2] = newRegion suite.cluster.PutRegion(suite.regions[2]) ops = suite.mc.Check(suite.regions[2]) - testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 4}, operator.PromoteLearner{ToStore: 4}, operator.RemovePeer{FromStore: 6}, @@ -295,7 +296,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { IsPassive: false, }, }) - testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), @@ -311,14 +312,14 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { })) suite.cluster.PutRegion(suite.regions[2]) ops = suite.mc.Check(suite.regions[2]) - testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), @@ -334,7 +335,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { }), core.WithLeader(&metapb.Peer{Id: 109, StoreId: 2})) suite.cluster.PutRegion(suite.regions[2]) ops = suite.mc.Check(suite.regions[2]) - testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -351,7 +352,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { IsPassive: false, }, }) - testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), @@ -370,7 +371,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { ) suite.cluster.PutRegion(suite.regions[1]) ops = suite.mc.Check(suite.regions[2]) - testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -390,7 +391,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { IsPassive: false, }, }) - testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), @@ -417,7 +418,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { ) suite.cluster.PutRegion(suite.regions[1]) ops = suite.mc.Check(suite.regions[2]) - testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -431,7 +432,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { IsPassive: false, }, }) - testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ + operatorutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ FromRegion: suite.regions[2].GetMeta(), ToRegion: suite.regions[1].GetMeta(), diff --git a/pkg/schedule/checker/replica_checker_test.go b/pkg/schedule/checker/replica_checker_test.go index 454937bd3dd..8975b8ec578 100644 --- a/pkg/schedule/checker/replica_checker_test.go +++ b/pkg/schedule/checker/replica_checker_test.go @@ -29,7 +29,7 @@ import ( "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -219,7 +219,7 @@ func (suite *replicaCheckerTestSuite) TestBasic() { // Region has 2 peers, we need to add a new peer. region := tc.GetRegion(1) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Disable make up replica feature. tc.SetEnableMakeUpReplica(false) @@ -229,17 +229,17 @@ func (suite *replicaCheckerTestSuite) TestBasic() { // Test healthFilter. // If store 4 is down, we add to store 3. tc.SetStoreDown(4) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) tc.SetStoreUp(4) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Test snapshotCountFilter. // If snapshotCount > MaxSnapshotCount, we add to store 3. tc.UpdateSnapshotCount(4, 3) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) // If snapshotCount < MaxSnapshotCount, we can add peer again. tc.UpdateSnapshotCount(4, 1) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Add peer in store 4, and we have enough replicas. peer4, _ := tc.AllocPeer(4) @@ -249,7 +249,7 @@ func (suite *replicaCheckerTestSuite) TestBasic() { // Add peer in store 3, and we have redundant replicas. peer3, _ := tc.AllocPeer(3) region = region.Clone(core.WithAddPeer(peer3)) - testutil.CheckRemovePeer(suite.Require(), rc.Check(region), 1) + operatorutil.CheckRemovePeer(suite.Require(), rc.Check(region), 1) // Disable remove extra replica feature. tc.SetEnableRemoveExtraReplica(false) @@ -266,13 +266,13 @@ func (suite *replicaCheckerTestSuite) TestBasic() { } region = region.Clone(core.WithDownPeers(append(region.GetDownPeers(), downPeer))) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 1) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 1) region = region.Clone(core.WithDownPeers(nil)) suite.Nil(rc.Check(region)) // Peer in store 3 is offline, transfer peer to store 1. tc.SetStoreOffline(3) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 1) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 1) } func (suite *replicaCheckerTestSuite) TestLostStore() { @@ -311,36 +311,36 @@ func (suite *replicaCheckerTestSuite) TestOffline() { region := tc.GetRegion(1) // Store 2 has different zone and smallest region score. - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2) peer2, _ := tc.AllocPeer(2) region = region.Clone(core.WithAddPeer(peer2)) // Store 3 has different zone and smallest region score. - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) peer3, _ := tc.AllocPeer(3) region = region.Clone(core.WithAddPeer(peer3)) // Store 4 has the same zone with store 3 and larger region score. peer4, _ := tc.AllocPeer(4) region = region.Clone(core.WithAddPeer(peer4)) - testutil.CheckRemovePeer(suite.Require(), rc.Check(region), 4) + operatorutil.CheckRemovePeer(suite.Require(), rc.Check(region), 4) // Test offline // the number of region peers more than the maxReplicas // remove the peer tc.SetStoreOffline(3) - testutil.CheckRemovePeer(suite.Require(), rc.Check(region), 3) + operatorutil.CheckRemovePeer(suite.Require(), rc.Check(region), 3) region = region.Clone(core.WithRemoveStorePeer(4)) // the number of region peers equals the maxReplicas // Transfer peer to store 4. - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) // Store 5 has a same label score with store 4, but the region score smaller than store 4, we will choose store 5. tc.AddLabelsStore(5, 3, map[string]string{"zone": "z4", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 5) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 5) // Store 5 has too many snapshots, choose store 4 tc.UpdateSnapshotCount(5, 100) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) tc.UpdatePendingPeerCount(4, 100) suite.Nil(rc.Check(region)) } @@ -360,49 +360,49 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore() { // We need 3 replicas. tc.AddLeaderRegion(1, 1) region := tc.GetRegion(1) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2) peer2, _ := tc.AllocPeer(2) region = region.Clone(core.WithAddPeer(peer2)) // Store 1,2,3 have the same zone, rack, and host. tc.AddLabelsStore(3, 5, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 3) // Store 4 has smaller region score. tc.AddLabelsStore(4, 4, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Store 5 has a different host. tc.AddLabelsStore(5, 5, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 5) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 5) // Store 6 has a different rack. tc.AddLabelsStore(6, 6, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 6) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 6) // Store 7 has a different zone. tc.AddLabelsStore(7, 7, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 7) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 7) // Test stateFilter. tc.SetStoreOffline(7) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 6) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 6) tc.SetStoreUp(7) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 7) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 7) // Add peer to store 7. peer7, _ := tc.AllocPeer(7) region = region.Clone(core.WithAddPeer(peer7)) // Replace peer in store 1 with store 6 because it has a different rack. - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 6) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 6) // Disable locationReplacement feature. tc.SetEnableLocationReplacement(false) suite.Nil(rc.Check(region)) tc.SetEnableLocationReplacement(true) peer6, _ := tc.AllocPeer(6) region = region.Clone(core.WithAddPeer(peer6)) - testutil.CheckRemovePeer(suite.Require(), rc.Check(region), 1) + operatorutil.CheckRemovePeer(suite.Require(), rc.Check(region), 1) region = region.Clone(core.WithRemoveStorePeer(1), core.WithLeader(region.GetStorePeer(2))) suite.Nil(rc.Check(region)) @@ -416,10 +416,10 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore() { // Store 2 and 6 have the same distinct score, but store 2 has larger region score. // So replace peer in store 2 with store 10. tc.AddLabelsStore(10, 1, map[string]string{"zone": "z3", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 10) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 10) peer10, _ := tc.AllocPeer(10) region = region.Clone(core.WithAddPeer(peer10)) - testutil.CheckRemovePeer(suite.Require(), rc.Check(region), 2) + operatorutil.CheckRemovePeer(suite.Require(), rc.Check(region), 2) region = region.Clone(core.WithRemoveStorePeer(2)) suite.Nil(rc.Check(region)) } @@ -443,11 +443,11 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore2() { tc.AddLeaderRegion(1, 1, 2, 4) region := tc.GetRegion(1) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 6) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 6) peer6, _ := tc.AllocPeer(6) region = region.Clone(core.WithAddPeer(peer6)) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 5) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 5) peer5, _ := tc.AllocPeer(5) region = region.Clone(core.WithAddPeer(peer5)) @@ -475,7 +475,7 @@ func (suite *replicaCheckerTestSuite) TestStorageThreshold() { // Move peer to better location. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.9, 0.1) suite.Nil(rc.Check(region)) @@ -484,10 +484,10 @@ func (suite *replicaCheckerTestSuite) TestStorageThreshold() { region = tc.GetRegion(2) // Add peer on store4. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.8, 0) - testutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2) + operatorutil.CheckAddPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2) } func (suite *replicaCheckerTestSuite) TestOpts() { @@ -513,9 +513,9 @@ func (suite *replicaCheckerTestSuite) TestOpts() { })) tc.SetStoreOffline(2) // RemoveDownReplica has higher priority than replaceOfflineReplica. - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) tc.SetEnableRemoveDownReplica(false) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 4) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpReplica, 2, 4) tc.SetEnableReplaceOfflineReplica(false) suite.Nil(rc.Check(region)) } @@ -542,10 +542,10 @@ func (suite *replicaCheckerTestSuite) TestFixDownPeer() { region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreDown(5) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") suite.Nil(rc.Check(region)) @@ -570,10 +570,10 @@ func (suite *replicaCheckerTestSuite) TestFixOfflinePeer() { suite.Nil(rc.Check(region)) tc.SetStoreOffline(4) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreOffline(5) - testutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) + operatorutil.CheckTransferPeer(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") suite.Nil(rc.Check(region)) diff --git a/pkg/schedule/checker/rule_checker_test.go b/pkg/schedule/checker/rule_checker_test.go index a8de4cc5496..0593a2224d1 100644 --- a/pkg/schedule/checker/rule_checker_test.go +++ b/pkg/schedule/checker/rule_checker_test.go @@ -32,7 +32,7 @@ import ( "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -807,10 +807,10 @@ func (suite *ruleCheckerTestSuite) TestFixDownPeer() { region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) + operatorutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) suite.cluster.SetStoreDown(5) - testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) + operatorutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" suite.ruleManager.SetRule(rule) @@ -1022,10 +1022,10 @@ func (suite *ruleCheckerTestSuite) TestFixOfflinePeer() { suite.Nil(suite.rc.Check(region)) suite.cluster.SetStoreOffline(4) - testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) + operatorutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) suite.cluster.SetStoreOffline(5) - testutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) + operatorutil.CheckTransferPeer(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" suite.ruleManager.SetRule(rule) diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 084698d5d19..e8c9e084114 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -33,7 +33,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -420,16 +420,16 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalanceFilter() { suite.tc.AddLeaderStore(4, 16) suite.tc.AddLeaderRegion(1, 4, 1, 2, 3) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 1) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 1) // Test stateFilter. // if store 4 is offline, we should consider it // because it still provides services suite.tc.SetStoreOffline(4) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 1) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 1) // If store 1 is down, it will be filtered, // store 2 becomes the store with least leaders. suite.tc.SetStoreDown(1) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 2) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 2) plans := suite.dryRun() suite.NotEmpty(plans) suite.Equal(0, plans[0].GetStep()) @@ -440,7 +440,7 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalanceFilter() { // If store 2 is busy, it will be filtered, // store 3 becomes the store with least leaders. suite.tc.SetStoreBusy(2, true) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 3) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 3) // Test disconnectFilter. // If store 3 is disconnected, no operator can be created. @@ -462,9 +462,9 @@ func (suite *balanceLeaderSchedulerTestSuite) TestLeaderWeight() { suite.tc.UpdateStoreLeaderWeight(3, 1) suite.tc.UpdateStoreLeaderWeight(4, 2) suite.tc.AddLeaderRegion(1, 1, 2, 3, 4) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 1, 4) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 1, 4) suite.tc.UpdateLeaderCount(4, 30) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 1, 3) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 1, 3) } func (suite *balanceLeaderSchedulerTestSuite) TestBalancePolicy() { @@ -478,9 +478,9 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalancePolicy() { suite.tc.AddLeaderRegion(1, 2, 1, 3, 4) suite.tc.AddLeaderRegion(2, 1, 2, 3, 4) suite.tc.SetLeaderSchedulePolicy("count") - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 2, 3) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 2, 3) suite.tc.SetLeaderSchedulePolicy("size") - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 1, 4) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 1, 4) } func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { @@ -496,7 +496,7 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { suite.tc.AddLeaderRegion(2, 3, 1, 2) // store4 has max leader score, store1 has min leader score. // The scheduler try to move a leader out of 16 first. - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 2) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 2) // Stores: 1 2 3 4 // Leaders: 1 14 15 16 @@ -505,7 +505,7 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { suite.tc.UpdateLeaderCount(2, 14) suite.tc.UpdateLeaderCount(3, 15) // Cannot move leader out of store4, move a leader into store1. - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 3, 1) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 3, 1) // Stores: 1 2 3 4 // Leaders: 1 2 15 16 @@ -515,7 +515,7 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { suite.tc.AddLeaderRegion(1, 3, 2, 4) suite.tc.AddLeaderRegion(2, 1, 2, 3) // No leader in store16, no follower in store1. Now source and target are store3 and store2. - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 3, 2) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 3, 2) // Stores: 1 2 3 4 // Leaders: 9 10 10 11 @@ -538,7 +538,7 @@ func (suite *balanceLeaderSchedulerTestSuite) TestBalanceSelector() { suite.tc.AddLeaderStore(2, 13) suite.tc.AddLeaderStore(3, 0) suite.tc.AddLeaderStore(4, 16) - testutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 3) + operatorutil.CheckTransferLeader(suite.Require(), suite.schedule()[0], operator.OpKind(0), 4, 3) } type balanceLeaderRangeSchedulerTestSuite struct { @@ -759,7 +759,7 @@ func checkBalanceRegionSchedule1(re *require.Assertions, enablePlacementRules bo tc.AddLeaderRegion(1, 4) ops, _ := sb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpKind(0), 4, 1) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpKind(0), 4, 1) // Test stateFilter. tc.SetStoreOffline(1) @@ -769,7 +769,7 @@ func checkBalanceRegionSchedule1(re *require.Assertions, enablePlacementRules bo // store 2 becomes the store with least regions. ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpKind(0), 4, 2) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpKind(0), 4, 2) tc.SetStoreUp(1) // test region replicate not match tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) @@ -819,37 +819,37 @@ func checkReplica3(re *require.Assertions, enablePlacementRules bool) { tc.AddLabelsStore(4, 2, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) ops, _ = sb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 2, 4) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 2, 4) // Store 5 has smaller region score than store 1. tc.AddLabelsStore(5, 2, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 5) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 5) // Store 6 has smaller region score than store 5. tc.AddLabelsStore(6, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 6) // Store 7 has smaller region score with store 6. tc.AddLabelsStore(7, 0, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 7) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 7) // If store 7 is not available, will choose store 6. tc.SetStoreDown(7) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 6) // Store 8 has smaller region score than store 7, but the distinct score decrease. tc.AddLabelsStore(8, 1, map[string]string{"zone": "z1", "rack": "r2", "host": "h3"}) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 6) // Take down 4,5,6,7 tc.SetStoreDown(4) @@ -891,19 +891,19 @@ func checkReplica5(re *require.Assertions, enablePlacementRules bool) { tc.AddLabelsStore(6, 1, map[string]string{"zone": "z5", "rack": "r2", "host": "h1"}) ops, _ := sb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 5, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 5, 6) // Store 7 has larger region score and same distinct score with store 6. tc.AddLabelsStore(7, 5, map[string]string{"zone": "z6", "rack": "r1", "host": "h1"}) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 5, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 5, 6) // Store 1 has smaller region score and higher distinct score. tc.AddLeaderRegion(1, 2, 3, 4, 5, 6) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 5, 1) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 5, 1) // Store 6 has smaller region score and higher distinct score. tc.AddLabelsStore(11, 29, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) @@ -912,7 +912,7 @@ func checkReplica5(re *require.Assertions, enablePlacementRules bool) { tc.AddLeaderRegion(1, 2, 3, 11, 12, 13) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 11, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 11, 6) } // TestBalanceRegionSchedule2 for corner case 1: @@ -1001,7 +1001,7 @@ func checkBalanceRegionSchedule2(re *require.Assertions, enablePlacementRules bo // if the space of store 5 is normal, we can balance region to store 5 ops1, _ = sb.Schedule(tc, false) op = ops1[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 5) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 5) // the used size of store 5 reach (highSpace, lowSpace) origin := tc.GetStore(5) @@ -1019,7 +1019,7 @@ func checkBalanceRegionSchedule2(re *require.Assertions, enablePlacementRules bo // Then it will try store 4. ops1, _ = sb.Schedule(tc, false) op = ops1[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 4) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 4) } func TestBalanceRegionStoreWeight(t *testing.T) { @@ -1049,12 +1049,12 @@ func checkBalanceRegionStoreWeight(re *require.Assertions, enablePlacementRules tc.AddLeaderRegion(1, 1) ops, _ := sb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 4) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 4) tc.UpdateRegionCount(4, 30) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 3) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 3) } func TestBalanceRegionOpInfluence(t *testing.T) { @@ -1090,7 +1090,7 @@ func checkBalanceRegionOpInfluence(re *require.Assertions, enablePlacementRules } ops, _ := sb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpKind(0), 2, 1) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpKind(0), 2, 1) } func TestBalanceRegionReplacePendingRegion(t *testing.T) { @@ -1127,7 +1127,7 @@ func checkReplacePendingRegion(re *require.Assertions, enablePlacementRules bool re.Equal(uint64(3), op.RegionID()) ops, _ = sb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 4) + operatorutil.CheckTransferPeer(re, op, operator.OpKind(0), 1, 4) } func TestBalanceRegionShouldNotBalance(t *testing.T) { diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index 37262e81593..8cc1d7a96d1 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -25,7 +25,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" ) func TestEvictLeader(t *testing.T) { @@ -46,7 +46,7 @@ func TestEvictLeader(t *testing.T) { re.NoError(err) re.True(sl.IsScheduleAllowed(tc)) ops, _ := sl.Schedule(tc, false) - testutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2, 3}) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2, 3}) re.False(ops[0].Step(0).(operator.TransferLeader).IsFinish(tc.MockRegionInfo(1, 1, []uint64{2, 3}, []uint64{}, &metapb.RegionEpoch{ConfVer: 0, Version: 0}))) re.True(ops[0].Step(0).(operator.TransferLeader).IsFinish(tc.MockRegionInfo(1, 2, []uint64{1, 3}, []uint64{}, &metapb.RegionEpoch{ConfVer: 0, Version: 0}))) } @@ -74,11 +74,11 @@ func TestEvictLeaderWithUnhealthyPeer(t *testing.T) { // only pending tc.PutRegion(region.Clone(withPendingPeer)) ops, _ := sl.Schedule(tc, false) - testutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{3}) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{3}) // only down tc.PutRegion(region.Clone(withDownPeer)) ops, _ = sl.Schedule(tc, false) - testutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) // pending + down tc.PutRegion(region.Clone(withPendingPeer, withDownPeer)) ops, _ = sl.Schedule(tc, false) diff --git a/pkg/schedule/schedulers/evict_slow_store_test.go b/pkg/schedule/schedulers/evict_slow_store_test.go index 845b3b1b93c..b0989246f3f 100644 --- a/pkg/schedule/schedulers/evict_slow_store_test.go +++ b/pkg/schedule/schedulers/evict_slow_store_test.go @@ -27,7 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" ) type evictSlowStoreTestSuite struct { @@ -76,7 +76,7 @@ func (suite *evictSlowStoreTestSuite) TestEvictSlowStore() { suite.True(suite.es.IsScheduleAllowed(suite.tc)) // Add evict leader scheduler to store 1 ops, _ := suite.es.Schedule(suite.tc, false) - testutil.CheckMultiTargetTransferLeader(suite.Require(), ops[0], operator.OpLeader, 1, []uint64{2}) + operatorutil.CheckMultiTargetTransferLeader(suite.Require(), ops[0], operator.OpLeader, 1, []uint64{2}) suite.Equal(EvictSlowStoreType, ops[0].Desc()) // Cannot balance leaders to store 1 ops, _ = suite.bs.Schedule(suite.tc, false) @@ -89,7 +89,7 @@ func (suite *evictSlowStoreTestSuite) TestEvictSlowStore() { ops, _ = suite.es.Schedule(suite.tc, false) suite.Empty(ops) ops, _ = suite.bs.Schedule(suite.tc, false) - testutil.CheckTransferLeader(suite.Require(), ops[0], operator.OpLeader, 2, 1) + operatorutil.CheckTransferLeader(suite.Require(), ops[0], operator.OpLeader, 2, 1) // no slow store need to evict. ops, _ = suite.es.Schedule(suite.tc, false) diff --git a/pkg/schedule/schedulers/evict_slow_trend_test.go b/pkg/schedule/schedulers/evict_slow_trend_test.go index 3227d285014..0c96170e0c9 100644 --- a/pkg/schedule/schedulers/evict_slow_trend_test.go +++ b/pkg/schedule/schedulers/evict_slow_trend_test.go @@ -27,7 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" ) type evictSlowTrendTestSuite struct { @@ -111,7 +111,7 @@ func (suite *evictSlowTrendTestSuite) TestEvictSlowTrend() { suite.tc.PutStore(newStoreInfo) } ops, _ = suite.es.Schedule(suite.tc, false) - testutil.CheckMultiTargetTransferLeader(suite.Require(), ops[0], operator.OpLeader, 1, []uint64{2, 3}) + operatorutil.CheckMultiTargetTransferLeader(suite.Require(), ops[0], operator.OpLeader, 1, []uint64{2, 3}) suite.Equal(EvictSlowTrendType, ops[0].Desc()) suite.Equal(es2.conf.candidate(), uint64(0)) suite.Equal(es2.conf.evictedStore(), uint64(1)) @@ -134,7 +134,7 @@ func (suite *evictSlowTrendTestSuite) TestEvictSlowTrend() { suite.Empty(ops) suite.Zero(es2.conf.evictedStore()) ops, _ = suite.bs.Schedule(suite.tc, false) - testutil.CheckTransferLeader(suite.Require(), ops[0], operator.OpLeader, 3, 1) + operatorutil.CheckTransferLeader(suite.Require(), ops[0], operator.OpLeader, 3, 1) // no slow store need to evict. ops, _ = suite.es.Schedule(suite.tc, false) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 4f024979394..11d86af508b 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/storage/endpoint" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -260,15 +260,15 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace switch op.Len() { case 1: // balance by leader selected - testutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) case 4: // balance by peer selected if op.RegionID() == 2 { // peer in store 1 of the region 2 can transfer to store 5 or store 6 because of the label - testutil.CheckTransferPeerWithLeaderTransferFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferPeerWithLeaderTransferFrom(re, op, operator.OpHotRegion, 1) } else { // peer in store 1 of the region 1,3 can only transfer to store 6 - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) } default: re.FailNow("wrong op: " + op.String()) @@ -288,10 +288,10 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace re.Equal(4, op.Len()) if op.RegionID() == 2 { // peer in store 1 of the region 2 can transfer to store 5 or store 6 because of the label - testutil.CheckTransferPeerWithLeaderTransferFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferPeerWithLeaderTransferFrom(re, op, operator.OpHotRegion, 1) } else { // peer in store 1 of the region 1,3 can only transfer to store 6 - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) } } @@ -354,16 +354,16 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace switch op.RegionID() { case 1, 2: if op.Len() == 3 { - testutil.CheckTransferPeer(re, op, operator.OpHotRegion, 3, 6) + operatorutil.CheckTransferPeer(re, op, operator.OpHotRegion, 3, 6) } else if op.Len() == 4 { - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) } else { re.FailNow("wrong operator: " + op.String()) } case 3: - testutil.CheckTransferPeer(re, op, operator.OpHotRegion, 1, 5) + operatorutil.CheckTransferPeer(re, op, operator.OpHotRegion, 1, 5) case 5: - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 3, 6) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 3, 6) default: re.FailNow("wrong operator: " + op.String()) } @@ -478,10 +478,10 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { switch op.Len() { case 1: // balance by leader selected - testutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) case 2: // balance by peer selected - testutil.CheckTransferLearner(re, op, operator.OpHotRegion, 8, 10) + operatorutil.CheckTransferLearner(re, op, operator.OpHotRegion, 8, 10) default: re.FailNow("wrong op: " + op.String()) } @@ -492,7 +492,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { clearPendingInfluence(hb) ops, _ := hb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) } // | store_id | write_bytes_rate | // |----------|------------------| @@ -569,15 +569,15 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { switch op.Len() { case 1: // balance by leader selected - testutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) case 4: // balance by peer selected if op.RegionID() == 2 { // peer in store 1 of the region 2 can transfer to store 5 or store 6 because of the label - testutil.CheckTransferPeerWithLeaderTransferFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferPeerWithLeaderTransferFrom(re, op, operator.OpHotRegion, 1) } else { // peer in store 1 of the region 1,3 can only transfer to store 6 - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 6) } default: re.FailNow("wrong op: " + op.String()) @@ -622,7 +622,7 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferLeader(re, op, operator.OpHotRegion, 1, 3) + operatorutil.CheckTransferLeader(re, op, operator.OpHotRegion, 1, 3) } } @@ -665,21 +665,21 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { ops, _ := hb.Schedule(tc, false) op := ops[0] // byteDecRatio <= 0.95 && keyDecRatio <= 0.95 - testutil.CheckTransferPeer(re, op, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op, operator.OpHotRegion, 1, 4) // store byte rate (min, max): (10, 10.5) | 9.5 | 9.5 | (9, 9.5) | 8.9 // store key rate (min, max): (10, 10.5) | 9.5 | 9.8 | (9, 9.5) | 9.2 ops, _ = hb.Schedule(tc, false) op = ops[0] // byteDecRatio <= 0.99 && keyDecRatio <= 0.95 - testutil.CheckTransferPeer(re, op, operator.OpHotRegion, 3, 5) + operatorutil.CheckTransferPeer(re, op, operator.OpHotRegion, 3, 5) // store byte rate (min, max): (10, 10.5) | 9.5 | (9.45, 9.5) | (9, 9.5) | (8.9, 8.95) // store key rate (min, max): (10, 10.5) | 9.5 | (9.7, 9.8) | (9, 9.5) | (9.2, 9.3) // byteDecRatio <= 0.95 // op = hb.Schedule(tc, false)[0] // FIXME: cover this case - // testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) + // operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) // store byte rate (min, max): (9.5, 10.5) | 9.5 | (9.45, 9.5) | (9, 9.5) | (8.9, 9.45) // store key rate (min, max): (9.2, 10.2) | 9.5 | (9.7, 9.8) | (9, 9.5) | (9.2, 9.8) } @@ -824,7 +824,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 2) + operatorutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 2) ops, _ = hb.Schedule(tc, false) re.Empty(ops) } @@ -906,10 +906,10 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim switch op.Len() { case 1: // balance by leader selected - testutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) + operatorutil.CheckTransferLeaderFrom(re, op, operator.OpHotRegion, 1) case 4: // balance by peer selected - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 4) cnt++ if cnt == 3 { re.True(op.Cancel()) @@ -1000,7 +1000,7 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { ops, _ := hb.Schedule(tc, false) op := ops[0] // The targetID should always be 1 as leader is only allowed to be placed in store1 or store2 by placement rule - testutil.CheckTransferLeader(re, op, operator.OpHotRegion, 2, 1) + operatorutil.CheckTransferLeader(re, op, operator.OpHotRegion, 2, 1) ops, _ = hb.Schedule(tc, false) re.Empty(ops) } @@ -1070,7 +1070,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // move leader from store 1 to store 5 // it is better than transfer leader from store 1 to store 3 - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) re.Contains(hb.regionPendings, uint64(1)) re.True(typeutil.Float64Equal(512.0*units.KiB, hb.regionPendings[1].origin.Loads[statistics.RegionReadBytes])) clearPendingInfluence(hb) @@ -1109,7 +1109,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // We will move leader peer of region 1 from 1 to 5 ops, _ = hb.Schedule(tc, false) op = ops[0] - testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion|operator.OpLeader, 1, 5) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion|operator.OpLeader, 1, 5) re.Contains(hb.regionPendings, uint64(1)) re.True(typeutil.Float64Equal(512.0*units.KiB, hb.regionPendings[1].origin.Loads[statistics.RegionReadBytes])) clearPendingInfluence(hb) @@ -1157,7 +1157,7 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferLeader(re, op, operator.OpHotRegion, 1, 3) + operatorutil.CheckTransferLeader(re, op, operator.OpHotRegion, 1, 3) } } @@ -1199,21 +1199,21 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { ops, _ := hb.Schedule(tc, false) op := ops[0] // byteDecRatio <= 0.95 && keyDecRatio <= 0.95 - testutil.CheckTransferLeader(re, op, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferLeader(re, op, operator.OpHotRegion, 1, 4) // store byte rate (min, max): (10, 10.5) | 9.5 | 9.5 | (9, 9.5) | 8.9 // store key rate (min, max): (10, 10.5) | 9.5 | 9.8 | (9, 9.5) | 9.2 ops, _ = hb.Schedule(tc, false) op = ops[0] // byteDecRatio <= 0.99 && keyDecRatio <= 0.95 - testutil.CheckTransferLeader(re, op, operator.OpHotRegion, 3, 5) + operatorutil.CheckTransferLeader(re, op, operator.OpHotRegion, 3, 5) // store byte rate (min, max): (10, 10.5) | 9.5 | (9.45, 9.5) | (9, 9.5) | (8.9, 8.95) // store key rate (min, max): (10, 10.5) | 9.5 | (9.7, 9.8) | (9, 9.5) | (9.2, 9.3) // byteDecRatio <= 0.95 // FIXME: cover this case // op = hb.Schedule(tc, false)[0] - // testutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) + // operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) // store byte rate (min, max): (9.5, 10.5) | 9.5 | (9.45, 9.5) | (9, 9.5) | (8.9, 9.45) // store key rate (min, max): (9.2, 10.2) | 9.5 | (9.7, 9.8) | (9, 9.5) | (9.2, 9.8) } @@ -1291,7 +1291,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim ops, _ := hb.Schedule(tc, false) op1 := ops[0] - testutil.CheckTransferPeer(re, op1, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op1, operator.OpHotRegion, 1, 4) // After move-peer, store byte/key rate (min, max): (6.6, 7.1) | 6.1 | 6 | (5, 5.5) pendingAmpFactor = old @@ -1301,7 +1301,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim ops, _ = hb.Schedule(tc, false) op2 := ops[0] - testutil.CheckTransferPeer(re, op2, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op2, operator.OpHotRegion, 1, 4) // After move-peer, store byte/key rate (min, max): (6.1, 7.1) | 6.1 | 6 | (5, 6) ops, _ = hb.Schedule(tc, false) @@ -1314,18 +1314,18 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim ops, _ := hb.Schedule(tc, false) op1 := ops[0] - testutil.CheckTransferPeer(re, op1, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op1, operator.OpHotRegion, 1, 4) // After move-peer, store byte/key rate (min, max): (6.6, 7.1) | 6.1 | 6 | (5, 5.5) ops, _ = hb.Schedule(tc, false) op2 := ops[0] - testutil.CheckTransferPeer(re, op2, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op2, operator.OpHotRegion, 1, 4) // After move-peer, store byte/key rate (min, max): (6.1, 7.1) | 6.1 | 6 | (5, 6) re.True(op2.Cancel()) ops, _ = hb.Schedule(tc, false) op2 = ops[0] - testutil.CheckTransferPeer(re, op2, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op2, operator.OpHotRegion, 1, 4) // After move-peer, store byte/key rate (min, max): (6.1, 7.1) | 6.1 | (6, 6.5) | (5, 5.5) re.True(op1.Cancel()) @@ -1333,7 +1333,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim ops, _ = hb.Schedule(tc, false) op3 := ops[0] - testutil.CheckTransferPeer(re, op3, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op3, operator.OpHotRegion, 1, 4) // store byte/key rate (min, max): (6.1, 7.1) | 6.1 | 6 | (5, 6) ops, _ = hb.Schedule(tc, false) @@ -1374,7 +1374,7 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) clearPendingInfluence(hb.(*hotScheduler)) - testutil.CheckTransferPeerWithLeaderTransfer(re, ops[0], operator.OpHotRegion|operator.OpLeader, 1, 4) + operatorutil.CheckTransferPeerWithLeaderTransfer(re, ops[0], operator.OpHotRegion|operator.OpLeader, 1, 4) // two dim are both enough uniform among three stores tc.SetStoreEvictLeader(4, true) ops, _ = hb.Schedule(tc, false) @@ -1934,7 +1934,7 @@ func checkHotReadPeerSchedule(re *require.Assertions, enablePlacementRules bool) tc.AddRegionWithPeerReadInfo(1, 3, 1, uint64(0.9*units.KiB*float64(10)), uint64(0.9*units.KiB*float64(10)), 10, []uint64{1, 2}, 3) ops, _ := hb.Schedule(tc, false) op := ops[0] - testutil.CheckTransferPeer(re, op, operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, op, operator.OpHotRegion, 1, 4) } func TestHotScheduleWithPriority(t *testing.T) { @@ -1977,12 +1977,12 @@ func TestHotScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) clearPendingInfluence(hb.(*hotScheduler)) hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5) clearPendingInfluence(hb.(*hotScheduler)) // assert read priority schedule @@ -1999,12 +1999,12 @@ func TestHotScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) clearPendingInfluence(hb.(*hotScheduler)) hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) hb, err = schedule.CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} @@ -2024,7 +2024,7 @@ func TestHotScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).conf.StrictPickingStore = false ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) clearPendingInfluence(hb.(*hotScheduler)) tc.UpdateStorageWrittenStats(1, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) @@ -2039,7 +2039,7 @@ func TestHotScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).conf.StrictPickingStore = false ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5) clearPendingInfluence(hb.(*hotScheduler)) } @@ -2080,7 +2080,7 @@ func TestHotScheduleWithStddev(t *testing.T) { stddevThreshold = -1.0 ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) clearPendingInfluence(hb.(*hotScheduler)) // skip -1 case (uniform cluster) @@ -2099,7 +2099,7 @@ func TestHotScheduleWithStddev(t *testing.T) { stddevThreshold = -1.0 ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) clearPendingInfluence(hb.(*hotScheduler)) } @@ -2139,11 +2139,11 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) } func TestCompatibility(t *testing.T) { diff --git a/pkg/schedule/schedulers/hot_region_v2_test.go b/pkg/schedule/schedulers/hot_region_v2_test.go index 681de0a139b..1449b3a5946 100644 --- a/pkg/schedule/schedulers/hot_region_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_v2_test.go @@ -23,7 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -72,8 +72,8 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { ops, _ = hb.Schedule(tc, false) /* The revert region is currently disabled for the -1 case. re.Len(ops, 2) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) - testutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) */ re.Empty(ops) re.True(hb.searchRevertRegions[writePeer]) @@ -84,7 +84,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { }) ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) re.False(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) } @@ -123,7 +123,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { // One operator can be generated when RankFormulaVersion == "v1". ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) re.False(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) @@ -136,8 +136,8 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { // Two operators can be generated when RankFormulaVersion == "v2". ops, _ = hb.Schedule(tc, false) re.Len(ops, 2) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) - testutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) re.True(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) } @@ -176,7 +176,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { // One operator can be generated when RankFormulaVersion == "v1". ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) re.False(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) @@ -189,7 +189,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { // There is still the solution with one operator after that. ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) re.True(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) // Two operators can be generated when there is a better solution @@ -198,8 +198,8 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { }) ops, _ = hb.Schedule(tc, false) re.Len(ops, 2) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) - testutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) re.True(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) } @@ -249,8 +249,8 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { ops, _ = hb.Schedule(tc, false) /* The revert region is currently disabled for the -1 case. re.Len(ops, 2) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 2, 5) - testutil.CheckTransferLeader(re, ops[1], operator.OpHotRegion, 5, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferLeader(re, ops[1], operator.OpHotRegion, 5, 2) */ re.Empty(ops) re.True(hb.searchRevertRegions[readLeader]) @@ -261,7 +261,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { }) ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 2, 5) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 2, 5) re.False(hb.searchRevertRegions[readLeader]) clearPendingInfluence(hb) } @@ -296,7 +296,7 @@ func TestSkipUniformStore(t *testing.T) { stddevThreshold = 0.0 ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) clearPendingInfluence(hb.(*hotScheduler)) // when there is uniform store filter, not schedule stddevThreshold = 0.1 @@ -316,13 +316,13 @@ func TestSkipUniformStore(t *testing.T) { stddevThreshold = 0.0 ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) clearPendingInfluence(hb.(*hotScheduler)) // when there is uniform store filter, schedule the second dim, which is no uniform stddevThreshold = 0.1 ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 3, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 3, 2) clearPendingInfluence(hb.(*hotScheduler)) // Case3: the second dim is enough uniform, we should schedule the first dim, although its rank is higher than the second dim @@ -337,12 +337,12 @@ func TestSkipUniformStore(t *testing.T) { stddevThreshold = 0.0 ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 3, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 3, 2) clearPendingInfluence(hb.(*hotScheduler)) // when there is uniform store filter, schedule the first dim, which is no uniform stddevThreshold = 0.1 ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 3, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 3, 2) clearPendingInfluence(hb.(*hotScheduler)) } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index b26de523be8..4b17751c3a5 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -96,12 +96,12 @@ func TestRejectLeader(t *testing.T) { sl, err := schedule.CreateScheduler(LabelType, oc, storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder(LabelType, []string{"", ""})) re.NoError(err) ops, _ := sl.Schedule(tc, false) - testutil.CheckTransferLeaderFrom(re, ops[0], operator.OpLeader, 1) + operatorutil.CheckTransferLeaderFrom(re, ops[0], operator.OpLeader, 1) // If store3 is disconnected, transfer leader to store 2. tc.SetStoreDisconnect(3) ops, _ = sl.Schedule(tc, false) - testutil.CheckTransferLeader(re, ops[0], operator.OpLeader, 1, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpLeader, 1, 2) // As store3 is disconnected, store1 rejects leader. Balancer will not create // any operators. @@ -128,7 +128,7 @@ func TestRejectLeader(t *testing.T) { origin, overlaps, rangeChanged := tc.SetRegion(region) tc.UpdateSubTree(region, origin, overlaps, rangeChanged) ops, _ = sl.Schedule(tc, false) - testutil.CheckTransferLeader(re, ops[0], operator.OpLeader, 1, 2) + operatorutil.CheckTransferLeader(re, ops[0], operator.OpLeader, 1, 2) } func TestRemoveRejectLeader(t *testing.T) { @@ -300,11 +300,11 @@ func TestShuffleRegionRole(t *testing.T) { conf.Roles = []string{"follower"} ops, _ := sl.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpKind(0), 2, 4) // transfer follower + operatorutil.CheckTransferPeer(re, ops[0], operator.OpKind(0), 2, 4) // transfer follower conf.Roles = []string{"learner"} ops, _ = sl.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferLearner(re, ops[0], operator.OpRegion, 3, 4) + operatorutil.CheckTransferLearner(re, ops[0], operator.OpRegion, 3, 4) } func TestSpecialUseHotRegion(t *testing.T) { @@ -335,7 +335,7 @@ func TestSpecialUseHotRegion(t *testing.T) { // balance region without label ops, _ := bs.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpKind(0), 1, 4) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpKind(0), 1, 4) // cannot balance to store 4 and 5 with label tc.AddLabelsStore(4, 0, map[string]string{"specialUse": "hotRegion"}) @@ -356,7 +356,7 @@ func TestSpecialUseHotRegion(t *testing.T) { tc.AddLeaderRegionWithWriteInfo(5, 3, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{1, 2}) ops, _ = hs.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 4) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 4) } func TestSpecialUseReserved(t *testing.T) { @@ -384,7 +384,7 @@ func TestSpecialUseReserved(t *testing.T) { // balance region without label ops, _ := bs.Schedule(tc, false) re.Len(ops, 1) - testutil.CheckTransferPeer(re, ops[0], operator.OpKind(0), 1, 4) + operatorutil.CheckTransferPeer(re, ops[0], operator.OpKind(0), 1, 4) // cannot balance to store 4 with label tc.AddLabelsStore(4, 0, map[string]string{"specialUse": "reserved"}) diff --git a/pkg/schedule/schedulers/transfer_witness_leader_test.go b/pkg/schedule/schedulers/transfer_witness_leader_test.go index 54384abb04b..7d2dfeb3224 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader_test.go +++ b/pkg/schedule/schedulers/transfer_witness_leader_test.go @@ -24,7 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/operatorutil" ) func TestTransferWitnessLeader(t *testing.T) { @@ -44,7 +44,7 @@ func TestTransferWitnessLeader(t *testing.T) { RecvRegionInfo(sl) <- tc.GetRegion(1) re.True(sl.IsScheduleAllowed(tc)) ops, _ := sl.Schedule(tc, false) - testutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2, 3}) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2, 3}) re.False(ops[0].Step(0).(operator.TransferLeader).IsFinish(tc.MockRegionInfo(1, 1, []uint64{2, 3}, []uint64{}, &metapb.RegionEpoch{ConfVer: 0, Version: 0}))) re.True(ops[0].Step(0).(operator.TransferLeader).IsFinish(tc.MockRegionInfo(1, 2, []uint64{1, 3}, []uint64{}, &metapb.RegionEpoch{ConfVer: 0, Version: 0}))) } @@ -74,14 +74,14 @@ func TestTransferWitnessLeaderWithUnhealthyPeer(t *testing.T) { tc.PutRegion(region.Clone(withPendingPeer)) RecvRegionInfo(sl) <- tc.GetRegion(1) ops, _ := sl.Schedule(tc, false) - testutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{3}) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{3}) ops, _ = sl.Schedule(tc, false) re.Nil(ops) // only down tc.PutRegion(region.Clone(withDownPeer)) RecvRegionInfo(sl) <- tc.GetRegion(1) ops, _ = sl.Schedule(tc, false) - testutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) // pending + down tc.PutRegion(region.Clone(withPendingPeer, withDownPeer)) ops, _ = sl.Schedule(tc, false) diff --git a/pkg/utils/testutil/operator_check.go b/pkg/utils/operatorutil/operator_check.go similarity index 99% rename from pkg/utils/testutil/operator_check.go rename to pkg/utils/operatorutil/operator_check.go index ee7aa2a9852..f6517be29d7 100644 --- a/pkg/utils/testutil/operator_check.go +++ b/pkg/utils/operatorutil/operator_check.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testutil +package operatorutil import ( "github.com/stretchr/testify/require" diff --git a/server/cluster/coordinator_test.go b/server/cluster/coordinator_test.go index 5a1b85b286a..b7652447678 100644 --- a/server/cluster/coordinator_test.go +++ b/server/cluster/coordinator_test.go @@ -39,6 +39,7 @@ import ( "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" + "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/server/config" @@ -202,10 +203,10 @@ func TestDispatch(t *testing.T) { // Wait for schedule and turn off balance. waitOperator(re, co, 1) - testutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) + operatorutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) waitOperator(re, co, 2) - testutil.CheckTransferLeader(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2) + operatorutil.CheckTransferLeader(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2) re.NoError(co.removeScheduler(schedulers.BalanceLeaderName)) stream := mockhbstream.NewHeartbeatStream() @@ -359,7 +360,7 @@ func TestCheckRegion(t *testing.T) { re.NoError(tc.addRegionStore(1, 1)) re.NoError(tc.addLeaderRegion(1, 2, 3)) checkRegionAndOperator(re, tc, co, 1, 1) - testutil.CheckAddPeer(re, co.opController.GetOperator(1), operator.OpReplica, 1) + operatorutil.CheckAddPeer(re, co.opController.GetOperator(1), operator.OpReplica, 1) checkRegionAndOperator(re, tc, co, 1, 0) r := tc.GetRegion(1) @@ -582,7 +583,7 @@ func TestPeerState(t *testing.T) { // Wait for schedule. waitOperator(re, co, 1) - testutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) + operatorutil.CheckTransferPeer(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) region := tc.GetRegion(1).Clone()