Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: tiny clean up code #1526

Merged
merged 1 commit into from
Apr 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions server/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ type Config struct {
// LeaderLease time, if leader doesn't update its TTL
// in etcd after lease time, etcd will expire the leader key
// and other servers can campaign the leader again.
// Etcd onlys support seoncds TTL, so here is second too.
// Etcd only supports seconds TTL, so here is second too.
LeaderLease int64 `toml:"lease" json:"lease"`

// Log related config.
Expand Down Expand Up @@ -495,7 +495,7 @@ type ScheduleConfig struct {
// removing down replicas.
DisableRemoveDownReplica bool `toml:"disable-remove-down-replica" json:"disable-remove-down-replica,string"`
// DisableReplaceOfflineReplica is the option to prevent replica checker from
// repalcing offline replicas.
// replacing offline replicas.
DisableReplaceOfflineReplica bool `toml:"disable-replace-offline-replica" json:"disable-replace-offline-replica,string"`
// DisableMakeUpReplica is the option to prevent replica checker from making up
// replicas when replica count is less than expected.
Expand All @@ -510,7 +510,7 @@ type ScheduleConfig struct {
// from moving replica to the target namespace.
DisableNamespaceRelocation bool `toml:"disable-namespace-relocation" json:"disable-namespace-relocation,string"`

// Schedulers support for loding customized schedulers
// Schedulers support for loading customized schedulers
Schedulers SchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2"` // json v2 is for the sake of compatible upgrade
}

Expand Down
9 changes: 5 additions & 4 deletions server/coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (c *coordinator) patrolRegions() {
timer := time.NewTimer(c.cluster.GetPatrolRegionInterval())
defer timer.Stop()

log.Info("coordinator: start patrol regions")
log.Info("coordinator starts patrol regions")
start := time.Now()
var key []byte
for {
Expand Down Expand Up @@ -180,19 +180,20 @@ func (c *coordinator) checkRegion(region *core.RegionInfo) bool {
func (c *coordinator) run() {
ticker := time.NewTicker(runSchedulerCheckInterval)
defer ticker.Stop()
log.Info("coordinator: Start collect cluster information")
log.Info("coordinator starts to collect cluster information")
for {
if c.shouldRun() {
log.Info("coordinator: Cluster information is prepared")
log.Info("coordinator has finished cluster information preparation")
break
}
select {
case <-ticker.C:
case <-c.ctx.Done():
log.Info("coordinator stops running")
return
}
}
log.Info("coordinator: Run scheduler")
log.Info("coordinator starts to run schedulers")

k := 0
scheduleCfg := c.cluster.opt.load().clone()
Expand Down
12 changes: 6 additions & 6 deletions server/leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func (s *Server) etcdLeaderLoop() {
}
leaderPriority, err := s.GetMemberLeaderPriority(etcdLeader)
if err != nil {
log.Error("failed to load leader priority", zap.Error(err))
log.Error("failed to load etcd leader priority", zap.Error(err))
break
}
if myPriority > leaderPriority {
Expand Down Expand Up @@ -201,7 +201,7 @@ func (s *Server) memberInfo() (member *pdpb.Member, marshalStr string) {
}

func (s *Server) campaignLeader() error {
log.Debug("begin to campaign leader", zap.String("campaign-leader-name", s.Name()))
log.Info("start to campaign leader", zap.String("campaign-leader-name", s.Name()))

lessor := clientv3.NewLease(s.client)
defer func() {
Expand Down Expand Up @@ -232,7 +232,7 @@ func (s *Server) campaignLeader() error {
return errors.WithStack(err)
}
if !resp.Succeeded {
return errors.New("campaign leader failed, other server may campaign ok")
return errors.New("failed to campaign leader, other server may campaign ok")
}

// Make the leader keepalived.
Expand All @@ -243,7 +243,7 @@ func (s *Server) campaignLeader() error {
if err != nil {
return errors.WithStack(err)
}
log.Debug("campaign leader ok", zap.String("campaign-leader-name", s.Name()))
log.Info("campaign leader ok", zap.String("campaign-leader-name", s.Name()))

err = s.reloadConfigFromKV()
if err != nil {
Expand All @@ -267,9 +267,8 @@ func (s *Server) campaignLeader() error {
s.enableLeader()
defer s.disableLeader()

log.Info("load cluster version", zap.Stringer("cluster-version", s.scheduleOpt.loadClusterVersion()))
log.Info("PD cluster leader is ready to serve", zap.String("leader-name", s.Name()))
CheckPDVersion(s.scheduleOpt)
log.Info("PD cluster leader is ready to serve", zap.String("leader-name", s.Name()))

tsTicker := time.NewTicker(updateTimestampStep)
defer tsTicker.Stop()
Expand All @@ -283,6 +282,7 @@ func (s *Server) campaignLeader() error {
}
case <-tsTicker.C:
if err = s.updateTimestamp(); err != nil {
log.Info("failed to update timestamp")
return err
}
etcdLeader := s.GetEtcdLeader()
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/hot_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ func (w *HotSpotCache) isNeedUpdateStatCache(region *core.RegionInfo, flowBytes
newItem.Stats.Add(float64(flowBytes))
return true, newItem
}
// smaller than hotReionThreshold
// smaller than hotRegionThreshold
if oldItem == nil {
return false, newItem
}
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/merge_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*Operator {
}

func (m *MergeChecker) checkTarget(region, adjacent, target *core.RegionInfo) *core.RegionInfo {
// if is not hot region and under same namesapce
// if is not hot region and under same namespace
if adjacent != nil && !m.cluster.IsRegionHot(adjacent.GetID()) &&
m.classifier.AllowMerge(region, adjacent) &&
len(adjacent.GetDownPeers()) == 0 && len(adjacent.GetPendingPeers()) == 0 && len(adjacent.GetLearners()) == 0 {
Expand Down
4 changes: 2 additions & 2 deletions server/schedule/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ type MergeRegion struct {
// so to keep them from other scheduler,
// both of them should add MerRegion operatorStep.
// But actually, tikv just need the region want to be merged to get the merge request,
// thus use a IsPssive mark to indicate that
// thus use a IsPassive mark to indicate that
// this region doesn't need to send merge request to tikv.
IsPassive bool
}
Expand Down Expand Up @@ -299,7 +299,7 @@ func (o *Operator) RegionID() uint64 {
return o.regionID
}

// RegionEpoch returns the region's epoch that is attched to the operator.
// RegionEpoch returns the region's epoch that is attached to the operator.
func (o *Operator) RegionEpoch() *metapb.RegionEpoch {
return o.regionEpoch
}
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/opts.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ type NamespaceOptions interface {
}

const (
// RejectLeader is the label property type that sugguests a store should not
// RejectLeader is the label property type that suggests a store should not
// have any region leaders.
RejectLeader = "reject-leader"
)
2 changes: 1 addition & 1 deletion server/schedule/replica_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
// Including the following:
// Replica number management.
// Unhealth replica management, mainly used for disaster recovery of TiKV.
// Location management, mainly used for corss data center deployment.
// Location management, mainly used for cross data center deployment.
type ReplicaChecker struct {
cluster Cluster
classifier namespace.Classifier
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ type Scheduler interface {
IsScheduleAllowed(cluster Cluster) bool
}

// CreateSchedulerFunc is for creating scheudler.
// CreateSchedulerFunc is for creating scheduler.
type CreateSchedulerFunc func(opController *OperatorController, args []string) (Scheduler, error)

var schedulerMap = make(map[string]CreateSchedulerFunc)
Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/balance_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source,
sourceID := source.GetID()
targetID := target.GetID()
if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) {
log.Debug("skip balance region",
log.Debug("skip balance leader",
zap.String("scheduler", l.GetName()), zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID),
zap.Int64("source-size", source.GetLeaderSize()), zap.Float64("source-score", source.LeaderScore(0)),
zap.Int64("source-influence", opInfluence.GetStoreInfluence(sourceID).ResourceSize(core.LeaderKind)),
Expand Down
9 changes: 5 additions & 4 deletions server/schedulers/balance_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,15 +179,16 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *
schedulerCounter.WithLabelValues(s.GetName(), "no_peer").Inc()
return nil
}
sourceLabel := strconv.FormatUint(sourceID, 10)
targetLabel := strconv.FormatUint(targetID, 10)
balanceRegionCounter.WithLabelValues("move_peer", source.GetAddress()+"-out", sourceLabel).Inc()
balanceRegionCounter.WithLabelValues("move_peer", target.GetAddress()+"-in", targetLabel).Inc()

op, err := schedule.CreateMovePeerOperator("balance-region", cluster, region, schedule.OpBalance, oldPeer.GetStoreId(), newPeer.GetStoreId(), newPeer.GetId())
if err != nil {
schedulerCounter.WithLabelValues(s.GetName(), "create_operator_fail").Inc()
return nil
}
sourceLabel := strconv.FormatUint(sourceID, 10)
targetLabel := strconv.FormatUint(targetID, 10)
balanceRegionCounter.WithLabelValues("move_peer", source.GetAddress()+"-out", sourceLabel).Inc()
balanceRegionCounter.WithLabelValues("move_peer", target.GetAddress()+"-in", targetLabel).Inc()
return op
}

Expand Down
1 change: 1 addition & 0 deletions server/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ func CheckPDVersion(opt *scheduleOption) {
pdVersion = *MustParseVersion(PDReleaseVersion)
}
clusterVersion := opt.loadClusterVersion()
log.Info("load cluster version", zap.Stringer("cluster-version", clusterVersion))
if pdVersion.LessThan(clusterVersion) {
log.Warn(
"PD version less than cluster version, please upgrade PD",
Expand Down