From da95f0e3856399105859b252036dd18bab167663 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 30 Apr 2019 14:37:25 +0800 Subject: [PATCH] tiny clean up code (#1526) Signed-off-by: Ryan Leung --- server/config.go | 6 +++--- server/coordinator.go | 9 +++++---- server/leader.go | 12 ++++++------ server/schedule/hot_cache.go | 2 +- server/schedule/merge_checker.go | 2 +- server/schedule/operator.go | 4 ++-- server/schedule/opts.go | 2 +- server/schedule/replica_checker.go | 2 +- server/schedule/scheduler.go | 2 +- server/schedulers/balance_leader.go | 2 +- server/schedulers/balance_region.go | 9 +++++---- server/util.go | 1 + 12 files changed, 28 insertions(+), 25 deletions(-) diff --git a/server/config.go b/server/config.go index a4858a6d197..e7d9879bddd 100644 --- a/server/config.go +++ b/server/config.go @@ -61,7 +61,7 @@ type Config struct { // LeaderLease time, if leader doesn't update its TTL // in etcd after lease time, etcd will expire the leader key // and other servers can campaign the leader again. - // Etcd onlys support seoncds TTL, so here is second too. + // Etcd only supports seconds TTL, so here is second too. LeaderLease int64 `toml:"lease" json:"lease"` // Log related config. @@ -495,7 +495,7 @@ type ScheduleConfig struct { // removing down replicas. DisableRemoveDownReplica bool `toml:"disable-remove-down-replica" json:"disable-remove-down-replica,string"` // DisableReplaceOfflineReplica is the option to prevent replica checker from - // repalcing offline replicas. + // replacing offline replicas. DisableReplaceOfflineReplica bool `toml:"disable-replace-offline-replica" json:"disable-replace-offline-replica,string"` // DisableMakeUpReplica is the option to prevent replica checker from making up // replicas when replica count is less than expected. @@ -510,7 +510,7 @@ type ScheduleConfig struct { // from moving replica to the target namespace. DisableNamespaceRelocation bool `toml:"disable-namespace-relocation" json:"disable-namespace-relocation,string"` - // Schedulers support for loding customized schedulers + // Schedulers support for loading customized schedulers Schedulers SchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2"` // json v2 is for the sake of compatible upgrade } diff --git a/server/coordinator.go b/server/coordinator.go index b8b75cbaa33..df2463eecd6 100644 --- a/server/coordinator.go +++ b/server/coordinator.go @@ -91,7 +91,7 @@ func (c *coordinator) patrolRegions() { timer := time.NewTimer(c.cluster.GetPatrolRegionInterval()) defer timer.Stop() - log.Info("coordinator: start patrol regions") + log.Info("coordinator starts patrol regions") start := time.Now() var key []byte for { @@ -180,19 +180,20 @@ func (c *coordinator) checkRegion(region *core.RegionInfo) bool { func (c *coordinator) run() { ticker := time.NewTicker(runSchedulerCheckInterval) defer ticker.Stop() - log.Info("coordinator: Start collect cluster information") + log.Info("coordinator starts to collect cluster information") for { if c.shouldRun() { - log.Info("coordinator: Cluster information is prepared") + log.Info("coordinator has finished cluster information preparation") break } select { case <-ticker.C: case <-c.ctx.Done(): + log.Info("coordinator stops running") return } } - log.Info("coordinator: Run scheduler") + log.Info("coordinator starts to run schedulers") k := 0 scheduleCfg := c.cluster.opt.load().clone() diff --git a/server/leader.go b/server/leader.go index 27cf6912add..fcc03d401eb 100644 --- a/server/leader.go +++ b/server/leader.go @@ -140,7 +140,7 @@ func (s *Server) etcdLeaderLoop() { } leaderPriority, err := s.GetMemberLeaderPriority(etcdLeader) if err != nil { - log.Error("failed to load leader priority", zap.Error(err)) + log.Error("failed to load etcd leader priority", zap.Error(err)) break } if myPriority > leaderPriority { @@ -201,7 +201,7 @@ func (s *Server) memberInfo() (member *pdpb.Member, marshalStr string) { } func (s *Server) campaignLeader() error { - log.Debug("begin to campaign leader", zap.String("campaign-leader-name", s.Name())) + log.Info("start to campaign leader", zap.String("campaign-leader-name", s.Name())) lessor := clientv3.NewLease(s.client) defer func() { @@ -232,7 +232,7 @@ func (s *Server) campaignLeader() error { return errors.WithStack(err) } if !resp.Succeeded { - return errors.New("campaign leader failed, other server may campaign ok") + return errors.New("failed to campaign leader, other server may campaign ok") } // Make the leader keepalived. @@ -243,7 +243,7 @@ func (s *Server) campaignLeader() error { if err != nil { return errors.WithStack(err) } - log.Debug("campaign leader ok", zap.String("campaign-leader-name", s.Name())) + log.Info("campaign leader ok", zap.String("campaign-leader-name", s.Name())) err = s.reloadConfigFromKV() if err != nil { @@ -267,9 +267,8 @@ func (s *Server) campaignLeader() error { s.enableLeader() defer s.disableLeader() - log.Info("load cluster version", zap.Stringer("cluster-version", s.scheduleOpt.loadClusterVersion())) - log.Info("PD cluster leader is ready to serve", zap.String("leader-name", s.Name())) CheckPDVersion(s.scheduleOpt) + log.Info("PD cluster leader is ready to serve", zap.String("leader-name", s.Name())) tsTicker := time.NewTicker(updateTimestampStep) defer tsTicker.Stop() @@ -283,6 +282,7 @@ func (s *Server) campaignLeader() error { } case <-tsTicker.C: if err = s.updateTimestamp(); err != nil { + log.Info("failed to update timestamp") return err } etcdLeader := s.GetEtcdLeader() diff --git a/server/schedule/hot_cache.go b/server/schedule/hot_cache.go index 722264ca852..0714457c3ee 100644 --- a/server/schedule/hot_cache.go +++ b/server/schedule/hot_cache.go @@ -147,7 +147,7 @@ func (w *HotSpotCache) isNeedUpdateStatCache(region *core.RegionInfo, flowBytes newItem.Stats.Add(float64(flowBytes)) return true, newItem } - // smaller than hotReionThreshold + // smaller than hotRegionThreshold if oldItem == nil { return false, newItem } diff --git a/server/schedule/merge_checker.go b/server/schedule/merge_checker.go index ffa6abeaf4f..f5f92730044 100644 --- a/server/schedule/merge_checker.go +++ b/server/schedule/merge_checker.go @@ -119,7 +119,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*Operator { } func (m *MergeChecker) checkTarget(region, adjacent, target *core.RegionInfo) *core.RegionInfo { - // if is not hot region and under same namesapce + // if is not hot region and under same namespace if adjacent != nil && !m.cluster.IsRegionHot(adjacent.GetID()) && m.classifier.AllowMerge(region, adjacent) && len(adjacent.GetDownPeers()) == 0 && len(adjacent.GetPendingPeers()) == 0 && len(adjacent.GetLearners()) == 0 { diff --git a/server/schedule/operator.go b/server/schedule/operator.go index 821e7958bcb..3b1ed809662 100644 --- a/server/schedule/operator.go +++ b/server/schedule/operator.go @@ -180,7 +180,7 @@ type MergeRegion struct { // so to keep them from other scheduler, // both of them should add MerRegion operatorStep. // But actually, tikv just need the region want to be merged to get the merge request, - // thus use a IsPssive mark to indicate that + // thus use a IsPassive mark to indicate that // this region doesn't need to send merge request to tikv. IsPassive bool } @@ -299,7 +299,7 @@ func (o *Operator) RegionID() uint64 { return o.regionID } -// RegionEpoch returns the region's epoch that is attched to the operator. +// RegionEpoch returns the region's epoch that is attached to the operator. func (o *Operator) RegionEpoch() *metapb.RegionEpoch { return o.regionEpoch } diff --git a/server/schedule/opts.go b/server/schedule/opts.go index 5492ef3f20d..6365e93c152 100644 --- a/server/schedule/opts.go +++ b/server/schedule/opts.go @@ -68,7 +68,7 @@ type NamespaceOptions interface { } const ( - // RejectLeader is the label property type that sugguests a store should not + // RejectLeader is the label property type that suggests a store should not // have any region leaders. RejectLeader = "reject-leader" ) diff --git a/server/schedule/replica_checker.go b/server/schedule/replica_checker.go index e85d8b611d6..a1f9986dd96 100644 --- a/server/schedule/replica_checker.go +++ b/server/schedule/replica_checker.go @@ -27,7 +27,7 @@ import ( // Including the following: // Replica number management. // Unhealth replica management, mainly used for disaster recovery of TiKV. -// Location management, mainly used for corss data center deployment. +// Location management, mainly used for cross data center deployment. type ReplicaChecker struct { cluster Cluster classifier namespace.Classifier diff --git a/server/schedule/scheduler.go b/server/schedule/scheduler.go index 370a153b892..8a893eb8890 100644 --- a/server/schedule/scheduler.go +++ b/server/schedule/scheduler.go @@ -68,7 +68,7 @@ type Scheduler interface { IsScheduleAllowed(cluster Cluster) bool } -// CreateSchedulerFunc is for creating scheudler. +// CreateSchedulerFunc is for creating scheduler. type CreateSchedulerFunc func(opController *OperatorController, args []string) (Scheduler, error) var schedulerMap = make(map[string]CreateSchedulerFunc) diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index 45a1898d8dc..32f7abaf786 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -176,7 +176,7 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, sourceID := source.GetID() targetID := target.GetID() if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) { - log.Debug("skip balance region", + log.Debug("skip balance leader", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID), zap.Int64("source-size", source.GetLeaderSize()), zap.Float64("source-score", source.LeaderScore(0)), zap.Int64("source-influence", opInfluence.GetStoreInfluence(sourceID).ResourceSize(core.LeaderKind)), diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 4950fcb33bf..72e8807b469 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -179,15 +179,16 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * schedulerCounter.WithLabelValues(s.GetName(), "no_peer").Inc() return nil } - sourceLabel := strconv.FormatUint(sourceID, 10) - targetLabel := strconv.FormatUint(targetID, 10) - balanceRegionCounter.WithLabelValues("move_peer", source.GetAddress()+"-out", sourceLabel).Inc() - balanceRegionCounter.WithLabelValues("move_peer", target.GetAddress()+"-in", targetLabel).Inc() + op, err := schedule.CreateMovePeerOperator("balance-region", cluster, region, schedule.OpBalance, oldPeer.GetStoreId(), newPeer.GetStoreId(), newPeer.GetId()) if err != nil { schedulerCounter.WithLabelValues(s.GetName(), "create_operator_fail").Inc() return nil } + sourceLabel := strconv.FormatUint(sourceID, 10) + targetLabel := strconv.FormatUint(targetID, 10) + balanceRegionCounter.WithLabelValues("move_peer", source.GetAddress()+"-out", sourceLabel).Inc() + balanceRegionCounter.WithLabelValues("move_peer", target.GetAddress()+"-in", targetLabel).Inc() return op } diff --git a/server/util.go b/server/util.go index 534eee73235..6bf444f1699 100644 --- a/server/util.go +++ b/server/util.go @@ -78,6 +78,7 @@ func CheckPDVersion(opt *scheduleOption) { pdVersion = *MustParseVersion(PDReleaseVersion) } clusterVersion := opt.loadClusterVersion() + log.Info("load cluster version", zap.Stringer("cluster-version", clusterVersion)) if pdVersion.LessThan(clusterVersion) { log.Warn( "PD version less than cluster version, please upgrade PD",