Skip to content

Commit

Permalink
auto scaling noise reduction support
Browse files Browse the repository at this point in the history
* add ReadyToScaleOut, ReadyToScaleIn status
* record ReadyToScaleOut, ReadyToScaleIn timestamp
* ensure auto scale happens after cluster status remains ReadyToScaleOut, ReadyToScaleIn for user configured time
  • Loading branch information
vincent178 committed Apr 27, 2020
1 parent b67c9af commit de74eb8
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 1 deletion.
1 change: 1 addition & 0 deletions pkg/apis/pingcap/v1alpha1/tidbclusterautoscaler_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,7 @@ type TikvAutoScalerStatus struct {
// +k8s:openapi-gen=true
// BasicAutoScalerStatus describe the basic auto-scaling status
type BasicAutoScalerStatus struct {
Phase MemberPhase `json:"phase,omitempty"`
// MetricsStatusList describes the metrics status in the last auto-scaling reconciliation
// +optional
MetricsStatusList []MetricsStatus `json:"metrics,omitempty"`
Expand Down
4 changes: 4 additions & 0 deletions pkg/apis/pingcap/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ const (
NormalPhase MemberPhase = "Normal"
// UpgradePhase represents the upgrade state of TiDB cluster.
UpgradePhase MemberPhase = "Upgrade"
// ReadyToScaleOut represents the ready to scaleout state of TiDB cluster.
ReadyToScaleOut MemberPhase = "ReadyToScaleOut"
// ReadyToScaleIn represents the ready to scalein state of TiDB cluster.
ReadyToScaleIn MemberPhase = "ReadyToScaleIn"
)

// ConfigUpdateStrategy represents the strategy to update configuration
Expand Down
24 changes: 23 additions & 1 deletion pkg/autoscaler/autoscaler/tidb_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
}
targetReplicas = limitTargetReplicas(targetReplicas, tac, v1alpha1.TiDBMemberType)
if targetReplicas == tc.Spec.TiDB.Replicas {
tac.Status.TiDB.Phase = v1alpha1.NormalPhase
return nil
}
return syncTiDBAfterCalculated(tc, tac, currentReplicas, targetReplicas, sts)
Expand All @@ -59,8 +60,28 @@ func syncTiDBAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbCluster
intervalSeconds := tac.Spec.TiDB.ScaleInIntervalSeconds
if recommendedReplicas > currentReplicas {
intervalSeconds = tac.Spec.TiDB.ScaleOutIntervalSeconds
if tac.Status.TiDB.Phase != v1alpha1.ReadyToScaleOut {
tac.Status.TiDB.Phase = v1alpha1.ReadyToScaleOut
// phase could change from Normal to ReadyToScaleOut, ReadyToScaleIn to ReadyToScaleOut,
// reset timestamp in both cases.
tac.Annotations[label.AnnTiDBReadyToScaleTimestamp] = fmt.Sprintf("%d", time.Now().Unix())
}
} else {
if tac.Status.TiDB.Phase != v1alpha1.ReadyToScaleIn {
tac.Status.TiDB.Phase = v1alpha1.ReadyToScaleIn
// phase could change from Normal to ReadyToScaleIn, ReadyToScaleOut to ReadyToScaleIn,
// reset timestamp in both cases.
tac.Annotations[label.AnnTiDBReadyToScaleTimestamp] = fmt.Sprintf("%d", time.Now().Unix())
}
}
ableToScale, err := checkStsReadyAutoScalingTimestamp(tac, 123)
if err != nil {
return err
}
if !ableToScale {
return nil
}
ableToScale, err := checkStsAutoScalingInterval(tac, *intervalSeconds, v1alpha1.TiDBMemberType)
ableToScale, err = checkStsAutoScalingInterval(tac, *intervalSeconds, v1alpha1.TiDBMemberType)
if err != nil {
return err
}
Expand All @@ -72,6 +93,7 @@ func syncTiDBAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbCluster

// Currently we didnt' record the auto-scaling out slot for tidb, because it is pointless for now.
func updateTcTiDBIfScale(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, recommendedReplicas int32) error {
tac.Status.TiDB.Phase = v1alpha1.NormalPhase
tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp] = fmt.Sprintf("%d", time.Now().Unix())
tc.Spec.TiDB.Replicas = recommendedReplicas
tac.Status.TiDB.RecommendedReplicas = &recommendedReplicas
Expand Down
19 changes: 19 additions & 0 deletions pkg/autoscaler/autoscaler/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,25 @@ func checkStsAutoScalingPrerequisites(set *appsv1.StatefulSet) bool {
return true
}

func checkStsReadyAutoScalingTimestamp(tac *v1alpha1.TidbClusterAutoScaler, threshold int32) (bool, error) {
if tac.Annotations == nil {
tac.Annotations = map[string]string{}
}
readyAutoScalingTimestamp, existed := tac.Annotations[label.AnnTiDBReadyToScaleTimestamp]
if !existed {
tac.Annotations[label.AnnTiDBReadyToScaleTimestamp] = fmt.Sprintf("%d", time.Now().Unix())
return false, nil
}
t, err := strconv.ParseInt(readyAutoScalingTimestamp, 10, 64)
if err != nil {
return false, err
}
if threshold > int32(time.Now().Sub(time.Unix(t, 0)).Seconds()) {
return false, nil
}
return true, nil
}

// checkStsAutoScalingInterval would check whether there is enough interval duration between every two auto-scaling
func checkStsAutoScalingInterval(tac *v1alpha1.TidbClusterAutoScaler, intervalSeconds int32, memberType v1alpha1.MemberType) (bool, error) {
if tac.Annotations == nil {
Expand Down
4 changes: 4 additions & 0 deletions pkg/label/label.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ const (
// AnnTiKVLastAutoScalingTimestamp is annotation key of tidbclusterto which ordinal is created by tikv auto-scaling
AnnTiKVLastAutoScalingTimestamp = "tikv.tidb.pingcap.com/last-autoscaling-timestamp"

AnnTiDBReadyToScaleTimestamp = "tidb.tidb.pingcap.com/ready-to-scale-timestamp"

AnnTiKVReadyToScaleTimestamp = "tikv.tidb.pingcap.com/ready-to-scale-timestamp"

// AnnTiDBConsecutiveScaleOutCount describes the least consecutive count to scale-out for tidb
AnnTiDBConsecutiveScaleOutCount = "tidb.tidb.pingcap.com/consecutive-scale-out-count"
// AnnTiDBConsecutiveScaleInCount describes the least consecutive count to scale-in for tidb
Expand Down

0 comments on commit de74eb8

Please sign in to comment.