From 6542593fbd62ae8a9db7b2a8d31c75a2163a8dce Mon Sep 17 00:00:00 2001 From: xufei Date: Thu, 23 Sep 2021 13:22:46 +0800 Subject: [PATCH 1/5] cherry pick #28201 to release-5.0 Signed-off-by: ti-srebot --- store/copr/batch_coprocessor.go | 89 ++++++++++++++++++++++----------- 1 file changed, 60 insertions(+), 29 deletions(-) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 43c3ac90703b0..c3cd658136e42 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -102,11 +102,17 @@ func (rs *batchCopResponse) RespTime() time.Duration { // 2. for the remaining regions: // if there is only 1 available store, then put the region to the related store // otherwise, use a greedy algorithm to put it into the store with highest weight +<<<<<<< HEAD func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { if len(originalTasks) <= 1 { +======= +func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { + isMPP := mppStoreLastFailTime != nil + // for mpp, we still need to detect the store availability + if len(originalTasks) <= 1 && !isMPP { +>>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) return originalTasks } - isMPP := mppStoreLastFailTime != nil cache := kvStore.GetRegionCache() storeTaskMap := make(map[uint64]*batchCopTask) // storeCandidateRegionMap stores all the possible store->region map. Its content is @@ -223,16 +229,28 @@ func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTas } } } - if totalRemainingRegionNum == 0 { - return originalTasks - } - avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - findNextStore := func(candidateStores []uint64) uint64 { - store := uint64(math.MaxUint64) - weightedRegionNum := math.MaxFloat64 - if candidateStores != nil { - for _, storeID := range candidateStores { + if totalRemainingRegionNum > 0 { + avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + findNextStore := func(candidateStores []uint64) uint64 { + store := uint64(math.MaxUint64) + weightedRegionNum := math.MaxFloat64 + if candidateStores != nil { + for _, storeID := range candidateStores { + if _, validStore := storeCandidateRegionMap[storeID]; !validStore { + continue + } + num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) + if num < weightedRegionNum { + store = storeID + weightedRegionNum = num + } + } + if store != uint64(math.MaxUint64) { + return store + } + } + for storeID := range storeTaskMap { if _, validStore := storeCandidateRegionMap[storeID]; !validStore { continue } @@ -242,19 +260,21 @@ func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTas weightedRegionNum = num } } - if store != uint64(math.MaxUint64) { - return store - } + return store } - for storeID := range storeTaskMap { - if _, validStore := storeCandidateRegionMap[storeID]; !validStore { - continue + + store := findNextStore(nil) + for totalRemainingRegionNum > 0 { + if store == uint64(math.MaxUint64) { + break } - num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) - if num < weightedRegionNum { - store = storeID - weightedRegionNum = num + var key string + var ri RegionInfo + for key, ri = range storeCandidateRegionMap[store] { + // get the first region + break } +<<<<<<< HEAD } return store } @@ -278,21 +298,32 @@ func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTas totalRegionCandidateNum-- if len(storeCandidateRegionMap[id]) == 0 { delete(storeCandidateRegionMap, id) +======= + storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) + totalRemainingRegionNum-- + for _, id := range ri.AllStores { + if _, ok := storeCandidateRegionMap[id]; ok { + delete(storeCandidateRegionMap[id], key) + totalRegionCandidateNum-- + if len(storeCandidateRegionMap[id]) == 0 { + delete(storeCandidateRegionMap, id) + } +>>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) } } + if totalRemainingRegionNum > 0 { + avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + // it is not optimal because we only check the stores that affected by this region, in fact in order + // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think + // check only the affected stores is more simple and will get a good enough result + store = findNextStore(ri.AllStores) + } } if totalRemainingRegionNum > 0 { - avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - // it is not optimal because we only check the stores that affected by this region, in fact in order - // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think - // check only the affected stores is more simple and will get a good enough result - store = findNextStore(ri.AllStores) + logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") + return originalTasks } } - if totalRemainingRegionNum > 0 { - logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") - return originalTasks - } var ret []*batchCopTask for _, task := range storeTaskMap { From 723572bff7c9f263b538a92b2d66699403432ee1 Mon Sep 17 00:00:00 2001 From: xufei Date: Thu, 23 Sep 2021 14:40:09 +0800 Subject: [PATCH 2/5] save work --- store/copr/batch_coprocessor.go | 73 ++++++++++++--------------------- 1 file changed, 26 insertions(+), 47 deletions(-) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index c3cd658136e42..74a67fc16e448 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -102,15 +102,8 @@ func (rs *batchCopResponse) RespTime() time.Duration { // 2. for the remaining regions: // if there is only 1 available store, then put the region to the related store // otherwise, use a greedy algorithm to put it into the store with highest weight -<<<<<<< HEAD func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { if len(originalTasks) <= 1 { -======= -func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { - isMPP := mppStoreLastFailTime != nil - // for mpp, we still need to detect the store availability - if len(originalTasks) <= 1 && !isMPP { ->>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) return originalTasks } cache := kvStore.GetRegionCache() @@ -274,56 +267,42 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] // get the first region break } -<<<<<<< HEAD } return store } store := findNextStore(nil) - for totalRemainingRegionNum > 0 { - if store == uint64(math.MaxUint64) { - break - } - var key string - var ri tikv.RegionInfo - for key, ri = range storeCandidateRegionMap[store] { - // get the first region - break - } - storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) - totalRemainingRegionNum-- - for _, id := range ri.AllStores { - if _, ok := storeCandidateRegionMap[id]; ok { - delete(storeCandidateRegionMap[id], key) - totalRegionCandidateNum-- - if len(storeCandidateRegionMap[id]) == 0 { - delete(storeCandidateRegionMap, id) -======= - storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) - totalRemainingRegionNum-- - for _, id := range ri.AllStores { - if _, ok := storeCandidateRegionMap[id]; ok { - delete(storeCandidateRegionMap[id], key) - totalRegionCandidateNum-- - if len(storeCandidateRegionMap[id]) == 0 { - delete(storeCandidateRegionMap, id) - } ->>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) - } - } - if totalRemainingRegionNum > 0 { - avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - // it is not optimal because we only check the stores that affected by this region, in fact in order - // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think - // check only the affected stores is more simple and will get a good enough result - store = findNextStore(ri.AllStores) + if store == uint64(math.MaxUint64) { + break + } + var key string + var ri tikv.RegionInfo + for key, ri = range storeCandidateRegionMap[store] { + // get the first region + break + } + storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) + totalRemainingRegionNum-- + for _, id := range ri.AllStores { + if _, ok := storeCandidateRegionMap[id]; ok { + delete(storeCandidateRegionMap[id], key) + totalRegionCandidateNum-- + if len(storeCandidateRegionMap[id]) == 0 { + delete(storeCandidateRegionMap, id) } } if totalRemainingRegionNum > 0 { - logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") - return originalTasks + avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + // it is not optimal because we only check the stores that affected by this region, in fact in order + // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think + // check only the affected stores is more simple and will get a good enough result + store = findNextStore(ri.AllStores) } } + if totalRemainingRegionNum > 0 { + logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") + return originalTasks + } var ret []*batchCopTask for _, task := range storeTaskMap { From 9ed6e17084cbd89a47d8692983c9bb2224c92478 Mon Sep 17 00:00:00 2001 From: xufei Date: Thu, 23 Sep 2021 14:42:17 +0800 Subject: [PATCH 3/5] Revert "save work" This reverts commit 723572bff7c9f263b538a92b2d66699403432ee1. --- store/copr/batch_coprocessor.go | 73 +++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 74a67fc16e448..c3cd658136e42 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -102,8 +102,15 @@ func (rs *batchCopResponse) RespTime() time.Duration { // 2. for the remaining regions: // if there is only 1 available store, then put the region to the related store // otherwise, use a greedy algorithm to put it into the store with highest weight +<<<<<<< HEAD func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { if len(originalTasks) <= 1 { +======= +func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { + isMPP := mppStoreLastFailTime != nil + // for mpp, we still need to detect the store availability + if len(originalTasks) <= 1 && !isMPP { +>>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) return originalTasks } cache := kvStore.GetRegionCache() @@ -267,42 +274,56 @@ func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTas // get the first region break } +<<<<<<< HEAD } return store } store := findNextStore(nil) - if store == uint64(math.MaxUint64) { - break - } - var key string - var ri tikv.RegionInfo - for key, ri = range storeCandidateRegionMap[store] { - // get the first region - break - } - storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) - totalRemainingRegionNum-- - for _, id := range ri.AllStores { - if _, ok := storeCandidateRegionMap[id]; ok { - delete(storeCandidateRegionMap[id], key) - totalRegionCandidateNum-- - if len(storeCandidateRegionMap[id]) == 0 { - delete(storeCandidateRegionMap, id) + for totalRemainingRegionNum > 0 { + if store == uint64(math.MaxUint64) { + break + } + var key string + var ri tikv.RegionInfo + for key, ri = range storeCandidateRegionMap[store] { + // get the first region + break + } + storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) + totalRemainingRegionNum-- + for _, id := range ri.AllStores { + if _, ok := storeCandidateRegionMap[id]; ok { + delete(storeCandidateRegionMap[id], key) + totalRegionCandidateNum-- + if len(storeCandidateRegionMap[id]) == 0 { + delete(storeCandidateRegionMap, id) +======= + storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) + totalRemainingRegionNum-- + for _, id := range ri.AllStores { + if _, ok := storeCandidateRegionMap[id]; ok { + delete(storeCandidateRegionMap[id], key) + totalRegionCandidateNum-- + if len(storeCandidateRegionMap[id]) == 0 { + delete(storeCandidateRegionMap, id) + } +>>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) + } + } + if totalRemainingRegionNum > 0 { + avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + // it is not optimal because we only check the stores that affected by this region, in fact in order + // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think + // check only the affected stores is more simple and will get a good enough result + store = findNextStore(ri.AllStores) } } if totalRemainingRegionNum > 0 { - avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - // it is not optimal because we only check the stores that affected by this region, in fact in order - // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think - // check only the affected stores is more simple and will get a good enough result - store = findNextStore(ri.AllStores) + logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") + return originalTasks } } - if totalRemainingRegionNum > 0 { - logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") - return originalTasks - } var ret []*batchCopTask for _, task := range storeTaskMap { From 68150297b285f9259587c45296864b4a88b5e962 Mon Sep 17 00:00:00 2001 From: xufei Date: Thu, 23 Sep 2021 14:42:28 +0800 Subject: [PATCH 4/5] Revert "cherry pick #28201 to release-5.0" This reverts commit 6542593fbd62ae8a9db7b2a8d31c75a2163a8dce. --- store/copr/batch_coprocessor.go | 89 +++++++++++---------------------- 1 file changed, 29 insertions(+), 60 deletions(-) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index c3cd658136e42..43c3ac90703b0 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -102,17 +102,11 @@ func (rs *batchCopResponse) RespTime() time.Duration { // 2. for the remaining regions: // if there is only 1 available store, then put the region to the related store // otherwise, use a greedy algorithm to put it into the store with highest weight -<<<<<<< HEAD func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { if len(originalTasks) <= 1 { -======= -func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { - isMPP := mppStoreLastFailTime != nil - // for mpp, we still need to detect the store availability - if len(originalTasks) <= 1 && !isMPP { ->>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) return originalTasks } + isMPP := mppStoreLastFailTime != nil cache := kvStore.GetRegionCache() storeTaskMap := make(map[uint64]*batchCopTask) // storeCandidateRegionMap stores all the possible store->region map. Its content is @@ -229,28 +223,16 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] } } } + if totalRemainingRegionNum == 0 { + return originalTasks + } - if totalRemainingRegionNum > 0 { - avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - findNextStore := func(candidateStores []uint64) uint64 { - store := uint64(math.MaxUint64) - weightedRegionNum := math.MaxFloat64 - if candidateStores != nil { - for _, storeID := range candidateStores { - if _, validStore := storeCandidateRegionMap[storeID]; !validStore { - continue - } - num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) - if num < weightedRegionNum { - store = storeID - weightedRegionNum = num - } - } - if store != uint64(math.MaxUint64) { - return store - } - } - for storeID := range storeTaskMap { + avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + findNextStore := func(candidateStores []uint64) uint64 { + store := uint64(math.MaxUint64) + weightedRegionNum := math.MaxFloat64 + if candidateStores != nil { + for _, storeID := range candidateStores { if _, validStore := storeCandidateRegionMap[storeID]; !validStore { continue } @@ -260,21 +242,19 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] weightedRegionNum = num } } - return store + if store != uint64(math.MaxUint64) { + return store + } } - - store := findNextStore(nil) - for totalRemainingRegionNum > 0 { - if store == uint64(math.MaxUint64) { - break + for storeID := range storeTaskMap { + if _, validStore := storeCandidateRegionMap[storeID]; !validStore { + continue } - var key string - var ri RegionInfo - for key, ri = range storeCandidateRegionMap[store] { - // get the first region - break + num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) + if num < weightedRegionNum { + store = storeID + weightedRegionNum = num } -<<<<<<< HEAD } return store } @@ -298,32 +278,21 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] totalRegionCandidateNum-- if len(storeCandidateRegionMap[id]) == 0 { delete(storeCandidateRegionMap, id) -======= - storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) - totalRemainingRegionNum-- - for _, id := range ri.AllStores { - if _, ok := storeCandidateRegionMap[id]; ok { - delete(storeCandidateRegionMap[id], key) - totalRegionCandidateNum-- - if len(storeCandidateRegionMap[id]) == 0 { - delete(storeCandidateRegionMap, id) - } ->>>>>>> 624f7cab3... copr: Fix bug that mpp node availability detect does not work in some corner cases (#28201) } } - if totalRemainingRegionNum > 0 { - avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - // it is not optimal because we only check the stores that affected by this region, in fact in order - // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think - // check only the affected stores is more simple and will get a good enough result - store = findNextStore(ri.AllStores) - } } if totalRemainingRegionNum > 0 { - logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") - return originalTasks + avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + // it is not optimal because we only check the stores that affected by this region, in fact in order + // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think + // check only the affected stores is more simple and will get a good enough result + store = findNextStore(ri.AllStores) } } + if totalRemainingRegionNum > 0 { + logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") + return originalTasks + } var ret []*batchCopTask for _, task := range storeTaskMap { From c2b15400419169080a0d75882bca5a8feeb08d8e Mon Sep 17 00:00:00 2001 From: xufei Date: Thu, 23 Sep 2021 14:46:48 +0800 Subject: [PATCH 5/5] cherry pick #28201 --- store/copr/batch_coprocessor.go | 106 ++++++++++++++++---------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 43c3ac90703b0..eb01ebbbbdaa6 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -103,10 +103,11 @@ func (rs *batchCopResponse) RespTime() time.Duration { // if there is only 1 available store, then put the region to the related store // otherwise, use a greedy algorithm to put it into the store with highest weight func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTasks []*batchCopTask, mppStoreLastFailTime map[string]time.Time, ttl time.Duration) []*batchCopTask { - if len(originalTasks) <= 1 { + isMPP := mppStoreLastFailTime != nil + // for mpp, we still need to detect the store availability + if len(originalTasks) <= 1 && !isMPP { return originalTasks } - isMPP := mppStoreLastFailTime != nil cache := kvStore.GetRegionCache() storeTaskMap := make(map[uint64]*batchCopTask) // storeCandidateRegionMap stores all the possible store->region map. Its content is @@ -223,16 +224,28 @@ func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTas } } } - if totalRemainingRegionNum == 0 { - return originalTasks - } - avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - findNextStore := func(candidateStores []uint64) uint64 { - store := uint64(math.MaxUint64) - weightedRegionNum := math.MaxFloat64 - if candidateStores != nil { - for _, storeID := range candidateStores { + if totalRemainingRegionNum > 0 { + avgStorePerRegion := float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + findNextStore := func(candidateStores []uint64) uint64 { + store := uint64(math.MaxUint64) + weightedRegionNum := math.MaxFloat64 + if candidateStores != nil { + for _, storeID := range candidateStores { + if _, validStore := storeCandidateRegionMap[storeID]; !validStore { + continue + } + num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) + if num < weightedRegionNum { + store = storeID + weightedRegionNum = num + } + } + if store != uint64(math.MaxUint64) { + return store + } + } + for storeID := range storeTaskMap { if _, validStore := storeCandidateRegionMap[storeID]; !validStore { continue } @@ -242,57 +255,44 @@ func balanceBatchCopTask(ctx context.Context, kvStore *tikv.KVStore, originalTas weightedRegionNum = num } } - if store != uint64(math.MaxUint64) { - return store - } + return store } - for storeID := range storeTaskMap { - if _, validStore := storeCandidateRegionMap[storeID]; !validStore { - continue + + store := findNextStore(nil) + for totalRemainingRegionNum > 0 { + if store == uint64(math.MaxUint64) { + break } - num := float64(len(storeCandidateRegionMap[storeID]))/avgStorePerRegion + float64(len(storeTaskMap[storeID].regionInfos)) - if num < weightedRegionNum { - store = storeID - weightedRegionNum = num + var key string + var ri tikv.RegionInfo + for key, ri = range storeCandidateRegionMap[store] { + // get the first region + break } - } - return store - } - - store := findNextStore(nil) - for totalRemainingRegionNum > 0 { - if store == uint64(math.MaxUint64) { - break - } - var key string - var ri tikv.RegionInfo - for key, ri = range storeCandidateRegionMap[store] { - // get the first region - break - } - storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) - totalRemainingRegionNum-- - for _, id := range ri.AllStores { - if _, ok := storeCandidateRegionMap[id]; ok { - delete(storeCandidateRegionMap[id], key) - totalRegionCandidateNum-- - if len(storeCandidateRegionMap[id]) == 0 { - delete(storeCandidateRegionMap, id) + storeTaskMap[store].regionInfos = append(storeTaskMap[store].regionInfos, ri) + totalRemainingRegionNum-- + for _, id := range ri.AllStores { + if _, ok := storeCandidateRegionMap[id]; ok { + delete(storeCandidateRegionMap[id], key) + totalRegionCandidateNum-- + if len(storeCandidateRegionMap[id]) == 0 { + delete(storeCandidateRegionMap, id) + } } } + if totalRemainingRegionNum > 0 { + avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) + // it is not optimal because we only check the stores that affected by this region, in fact in order + // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think + // check only the affected stores is more simple and will get a good enough result + store = findNextStore(ri.AllStores) + } } if totalRemainingRegionNum > 0 { - avgStorePerRegion = float64(totalRegionCandidateNum) / float64(totalRemainingRegionNum) - // it is not optimal because we only check the stores that affected by this region, in fact in order - // to find out the store with the lowest weightedRegionNum, all stores should be checked, but I think - // check only the affected stores is more simple and will get a good enough result - store = findNextStore(ri.AllStores) + logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") + return originalTasks } } - if totalRemainingRegionNum > 0 { - logutil.BgLogger().Warn("Some regions are not used when trying to balance batch cop task, give up balancing") - return originalTasks - } var ret []*batchCopTask for _, task := range storeTaskMap {