Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support disable scaling for specified direction #93

Merged
merged 1 commit into from
Aug 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,10 @@ type IntelligentHorizontalPodAutoscalerBehavior struct {

// ScalingBehavior defines the scaling behavior for one direction.
type ScalingBehavior struct {
// Disabled means if the scaling of this direction is disabled.
// +optional
Disabled bool `json:"disabled,omitempty"`

// GrayStrategy is the configuration of the strategy for gray change of replicas.
// If not set, gray change will be disabled.
// +optional
Expand Down
17 changes: 17 additions & 0 deletions apis/autoscaling/v1alpha1/replicaprofile_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,30 @@ type ReplicaProfileSpec struct {
// +optional
Paused bool `json:"paused,omitempty"`

// AllowedScalingDirection is the allowed scaling direction.
// Note that it only cares about online replicas.
// It defaults to Both.
// +optional
// +kubebuilder:validation:Enum=Both;Neither;Up;Down
// +kubebuilder:default=Both
AllowedScalingDirection ScalingDirection `json:"allowedScalingDirection"`

// Behavior configures the behavior of ReplicaProfile.
// If not set, default behavior will be set.
// +optional
// +kubebuilder:default={podSorter:{type:"WorkloadDefault"},podTrafficController:{type:"ReadinessGate"}}
Behavior ReplicaProfileBehavior `json:"behavior"`
}

type ScalingDirection string

const (
ScalingDirectionBoth ScalingDirection = "Both"
ScalingDirectionNeither ScalingDirection = "Neither"
ScalingDirectionUp ScalingDirection = "Up"
ScalingDirectionDown ScalingDirection = "Down"
)

// ReplicaProfileBehavior defines the behavior of ReplicaProfile.
type ReplicaProfileBehavior struct {
// PodSorter is used to decide the priority of pods when scaling.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,10 @@ spec:
description: ScaleDown is the behavior configuration for scaling
down.
properties:
disabled:
description: Disabled means if the scaling of this direction
is disabled.
type: boolean
grayStrategy:
description: GrayStrategy is the configuration of the strategy
for gray change of replicas. If not set, gray change will
Expand Down Expand Up @@ -171,6 +175,10 @@ spec:
description: ScaleUp is the behavior configuration for scaling
up.
properties:
disabled:
description: Disabled means if the scaling of this direction
is disabled.
type: boolean
grayStrategy:
description: GrayStrategy is the configuration of the strategy
for gray change of replicas. If not set, gray change will
Expand Down
10 changes: 10 additions & 0 deletions config/crd/bases/autoscaling.kapacitystack.io_replicaprofiles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,16 @@ spec:
spec:
description: ReplicaProfileSpec defines the desired state of ReplicaProfile.
properties:
allowedScalingDirection:
default: Both
description: AllowedScalingDirection is the allowed scaling direction.
Note that it only cares about online replicas. It defaults to Both.
enum:
- Both
- Neither
- Up
- Down
type: string
behavior:
default:
podSorter:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ func (r *IntelligentHorizontalPodAutoscalerReconciler) Reconcile(ctx context.Con
rp.Spec.CutoffReplicas = replicaData.CutoffReplicas
rp.Spec.StandbyReplicas = replicaData.StandbyReplicas
rp.Spec.Paused = ihpa.Spec.Paused
rp.Spec.AllowedScalingDirection = getAllowedScalingDirection(ihpa)
if ihpa.Spec.Behavior.ReplicaProfile != nil {
rp.Spec.Behavior = *ihpa.Spec.Behavior.ReplicaProfile
} else {
Expand Down Expand Up @@ -317,16 +318,31 @@ func newReplicaProfile(ihpa *autoscalingv1alpha1.IntelligentHorizontalPodAutosca
},
},
Spec: autoscalingv1alpha1.ReplicaProfileSpec{
ScaleTargetRef: ihpa.Spec.ScaleTargetRef,
OnlineReplicas: replicaData.OnlineReplicas,
CutoffReplicas: replicaData.CutoffReplicas,
StandbyReplicas: replicaData.StandbyReplicas,
Paused: ihpa.Spec.Paused,
Behavior: behavior,
ScaleTargetRef: ihpa.Spec.ScaleTargetRef,
OnlineReplicas: replicaData.OnlineReplicas,
CutoffReplicas: replicaData.CutoffReplicas,
StandbyReplicas: replicaData.StandbyReplicas,
Paused: ihpa.Spec.Paused,
AllowedScalingDirection: getAllowedScalingDirection(ihpa),
Behavior: behavior,
},
}
}

func getAllowedScalingDirection(ihpa *autoscalingv1alpha1.IntelligentHorizontalPodAutoscaler) autoscalingv1alpha1.ScalingDirection {
up, down := !ihpa.Spec.Behavior.ScaleUp.Disabled, !ihpa.Spec.Behavior.ScaleDown.Disabled
if up && down {
return autoscalingv1alpha1.ScalingDirectionBoth
}
if up {
return autoscalingv1alpha1.ScalingDirectionUp
}
if down {
return autoscalingv1alpha1.ScalingDirectionDown
}
return autoscalingv1alpha1.ScalingDirectionNeither
}

func defaultReplicaProfileBehavior() autoscalingv1alpha1.ReplicaProfileBehavior {
return autoscalingv1alpha1.ReplicaProfileBehavior{
PodSorter: autoscalingv1alpha1.PodSorter{
Expand Down
36 changes: 33 additions & 3 deletions controllers/autoscaling/replicaprofile_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (r *ReplicaProfileReconciler) Reconcile(ctx context.Context, req ctrl.Reque
rp.Status.CutoffReplicas = int32(len(currentRunningPods[autoscalingv1alpha1.PodStateCutoff]))
rp.Status.StandbyReplicas = int32(len(currentRunningPods[autoscalingv1alpha1.PodStateStandby]))

ensured := rp.Status.OnlineReplicas == rp.Spec.OnlineReplicas &&
ensured := isOnlineReplicasEnsured(rp) &&
rp.Status.CutoffReplicas == rp.Spec.CutoffReplicas &&
rp.Status.StandbyReplicas == rp.Spec.StandbyReplicas
if ensured {
Expand Down Expand Up @@ -218,7 +218,11 @@ func (r *ReplicaProfileReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, err
}

sm := pod.NewStateManager(rp, podSorter, currentRunningPods)
sm := pod.NewStateManager(map[autoscalingv1alpha1.PodState]int32{
autoscalingv1alpha1.PodStateOnline: getDesiredOnlineReplicas(rp),
autoscalingv1alpha1.PodStateCutoff: rp.Spec.CutoffReplicas,
autoscalingv1alpha1.PodStateStandby: rp.Spec.StandbyReplicas,
}, currentRunningPods, podSorter)
change, err := sm.CalculateStateChange(ctx)
if err != nil {
l.Error(err, "failed to calculate state change")
Expand Down Expand Up @@ -309,7 +313,7 @@ func (r *ReplicaProfileReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}

// Scale replicas if needed
desiredReplicas := rp.Spec.OnlineReplicas + rp.Spec.CutoffReplicas + rp.Spec.StandbyReplicas
desiredReplicas := getDesiredOnlineReplicas(rp) + rp.Spec.CutoffReplicas + rp.Spec.StandbyReplicas
if desiredReplicas != scale.Spec.Replicas {
l.Info("rescale target workload", "oldReplicas", scale.Spec.Replicas, "newReplicas", desiredReplicas)
r.Eventf(rp, corev1.EventTypeNormal, "UpdateScale", "rescale target workload from %d to %d replicas", scale.Spec.Replicas, desiredReplicas)
Expand Down Expand Up @@ -511,3 +515,29 @@ func (r *ReplicaProfileReconciler) setPodState(ctx context.Context, p *corev1.Po
func setReplicaProfileCondition(rp *autoscalingv1alpha1.ReplicaProfile, conditionType autoscalingv1alpha1.ReplicaProfileConditionType, status metav1.ConditionStatus, reason, message string) {
rp.Status.Conditions = util.SetConditionInList(rp.Status.Conditions, string(conditionType), status, rp.Generation, reason, message)
}

func isOnlineReplicasEnsured(rp *autoscalingv1alpha1.ReplicaProfile) bool {
switch rp.Spec.AllowedScalingDirection {
case autoscalingv1alpha1.ScalingDirectionUp:
return rp.Status.OnlineReplicas >= rp.Spec.OnlineReplicas
case autoscalingv1alpha1.ScalingDirectionDown:
return rp.Status.OnlineReplicas <= rp.Spec.OnlineReplicas
case autoscalingv1alpha1.ScalingDirectionNeither:
return true
default:
return rp.Status.OnlineReplicas == rp.Spec.OnlineReplicas
}
}

func getDesiredOnlineReplicas(rp *autoscalingv1alpha1.ReplicaProfile) int32 {
switch rp.Spec.AllowedScalingDirection {
case autoscalingv1alpha1.ScalingDirectionUp:
return util.MaxInt32(rp.Spec.OnlineReplicas, rp.Status.OnlineReplicas)
case autoscalingv1alpha1.ScalingDirectionDown:
return util.MinInt32(rp.Spec.OnlineReplicas, rp.Status.OnlineReplicas)
case autoscalingv1alpha1.ScalingDirectionNeither:
return rp.Status.OnlineReplicas
default:
return rp.Spec.OnlineReplicas
}
}
17 changes: 4 additions & 13 deletions pkg/pod/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,30 +106,21 @@ func newStateInfo() *stateInfo {

// StateManager provides a method to calculate pod state change.
type StateManager struct {
rp *autoscalingv1alpha1.ReplicaProfile
sorter sorter.Interface
statesInfo map[autoscalingv1alpha1.PodState]*stateInfo
podNameMap map[string]*corev1.Pod
sorter sorter.Interface
}

// NewStateManager build a state manager to calculate pod state change based on given spec and status.
func NewStateManager(rp *autoscalingv1alpha1.ReplicaProfile, sorter sorter.Interface, currentRunningPods map[autoscalingv1alpha1.PodState][]*corev1.Pod) *StateManager {
func NewStateManager(desiredReplicas map[autoscalingv1alpha1.PodState]int32, currentRunningPods map[autoscalingv1alpha1.PodState][]*corev1.Pod, sorter sorter.Interface) *StateManager {
sm := &StateManager{
rp: rp,
sorter: sorter,
statesInfo: make(map[autoscalingv1alpha1.PodState]*stateInfo, len(defaultStatesOrdered)),
podNameMap: make(map[string]*corev1.Pod),
sorter: sorter,
}
for _, state := range defaultStatesOrdered {
info := newStateInfo()
switch state {
case autoscalingv1alpha1.PodStateOnline:
info.DesiredReplicas = int(rp.Spec.OnlineReplicas)
case autoscalingv1alpha1.PodStateCutoff:
info.DesiredReplicas = int(rp.Spec.CutoffReplicas)
case autoscalingv1alpha1.PodStateStandby:
info.DesiredReplicas = int(rp.Spec.StandbyReplicas)
}
info.DesiredReplicas = int(desiredReplicas[state])
for _, pod := range currentRunningPods[state] {
info.CurrentPodNames.Insert(pod.Name)
sm.podNameMap[pod.Name] = pod
Expand Down
6 changes: 5 additions & 1 deletion pkg/pod/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,11 @@ func TestCalculateStateChange(t *testing.T) {

for _, testCase := range testCases {
statefulSet := &workload.StatefulSet{}
stateManager := NewStateManager(testCase.rp, statefulSet, currentPodMap)
stateManager := NewStateManager(map[autoscalingv1alpha1.PodState]int32{
autoscalingv1alpha1.PodStateOnline: testCase.rp.Spec.OnlineReplicas,
autoscalingv1alpha1.PodStateCutoff: testCase.rp.Spec.CutoffReplicas,
autoscalingv1alpha1.PodStateStandby: testCase.rp.Spec.StandbyReplicas,
}, currentPodMap, statefulSet)

stateChange, err := stateManager.CalculateStateChange(context.Background())
assert.Nil(t, err)
Expand Down
Loading