Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HPA Observer Controller #260

Merged
merged 1 commit into from
Apr 13, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cmd/craned/app/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func initializationControllers(ctx context.Context, mgr ctrl.Manager, opts *opti
Client: mgr.GetClient(),
}
if err := podOOMRecorder.SetupWithManager(mgr); err != nil {
klog.Exit(err, "unable to create controller", "controller", "PodOOMRecorder")
klog.Exit(err, "Unable to create controller", "PodOOMRecorder")
}
go func() {
if err := podOOMRecorder.Run(ctx.Done()); err != nil {
Expand Down Expand Up @@ -242,13 +242,13 @@ func initializationControllers(ctx context.Context, mgr ctrl.Manager, opts *opti
klog.Exit(err, "unable to create controller", "controller", "SubstituteController")
}

if err := (&ehpa.HPAReplicasController{
if err := (&ehpa.HPAObserverController{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
RestMapper: mgr.GetRESTMapper(),
Recorder: mgr.GetEventRecorderFor("hpareplicas-controller"),
}).SetupWithManager(mgr); err != nil {
klog.Exit(err, "unable to create controller", "controller", "HPAReplicasController")
klog.Exit(err, "unable to create controller", "controller", "HPAObserverController")
}

if err := (&evpa.EffectiveVPAController{
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/ehpa/effective_hpa_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ func setCondition(status *autoscalingapi.EffectiveHorizontalPodAutoscalerStatus,
func RecordMetrics(ehpa *autoscalingapi.EffectiveHorizontalPodAutoscaler) {
if ehpa.Status.ExpectReplicas != nil {
labels := map[string]string{
"identity": klog.KObj(ehpa).String(),
"resourceName": klog.KObj(ehpa).String(),
"strategy": string(ehpa.Spec.ScaleStrategy),
}
metrics.EHPAReplicas.With(labels).Set(float64(*ehpa.Status.ExpectReplicas))
Expand Down
64 changes: 64 additions & 0 deletions pkg/controller/ehpa/hpa_event_handler.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
package ehpa

import (
"fmt"
"strings"

autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"

"github.com/gocrane/crane/pkg/metrics"
)

type hpaEventHandler struct {
enqueueHandler handler.EnqueueRequestForObject
}

func (h *hpaEventHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
pod := evt.Object.(*autoscalingv2.HorizontalPodAutoscaler)
if pod.DeletionTimestamp != nil {
h.Delete(event.DeleteEvent{Object: evt.Object}, q)
return
}

h.enqueueHandler.Create(evt, q)
}

func (h *hpaEventHandler) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
h.enqueueHandler.Delete(evt, q)
}

func (h *hpaEventHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
newHpa := evt.ObjectNew.(*autoscalingv2.HorizontalPodAutoscaler)
oldHpa := evt.ObjectOld.(*autoscalingv2.HorizontalPodAutoscaler)
if oldHpa.Status.DesiredReplicas != newHpa.Status.DesiredReplicas {
for _, cond := range newHpa.Status.Conditions {
if cond.Reason == "SucceededRescale" || cond.Reason == "SucceededOverloadRescale" {
scaleType := "hpa"
if strings.HasPrefix("ehpa-", newHpa.Name) {
scaleType = "ehpa"
}

direction := "Down"
if newHpa.Status.DesiredReplicas > oldHpa.Status.DesiredReplicas {
direction = "Up"
}

labels := map[string]string{
"resourceName": fmt.Sprintf("%s/%s", newHpa.Namespace, newHpa.Name),
"type": scaleType,
"direction": direction,
}
metrics.HPAScaleCount.With(labels).Add(1)

break
}
}
}
h.enqueueHandler.Update(evt, q)
}

func (h *hpaEventHandler) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,22 @@ import (
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"

"github.com/gocrane/crane/pkg/metrics"
)

// HPAReplicasController is responsible for monitor and export replicas for hpa
type HPAReplicasController struct {
// HPAObserverController is responsible for observer metrics for hpa
type HPAObserverController struct {
client.Client
Scheme *runtime.Scheme
RestMapper meta.RESTMapper
Recorder record.EventRecorder
}

func (c *HPAReplicasController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
func (c *HPAObserverController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.V(8).Infof("Got hpa %s", req.NamespacedName)

hpa := &autoscalingv2.HorizontalPodAutoscaler{}
Expand All @@ -31,15 +34,28 @@ func (c *HPAReplicasController) Reconcile(ctx context.Context, req ctrl.Request)
}

labels := map[string]string{
"identity": klog.KObj(hpa).String(),
"resourceName": klog.KObj(hpa).String(),
}
metrics.HPAReplicas.With(labels).Set(float64(hpa.Status.DesiredReplicas))

return ctrl.Result{}, nil
}

func (c *HPAReplicasController) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&autoscalingv2.HorizontalPodAutoscaler{}).
Complete(c)
func (c *HPAObserverController) SetupWithManager(mgr ctrl.Manager) error {
// Create a new controller
controller, err := controller.New("broadcastjob-controller", mgr, controller.Options{
Reconciler: c})
if err != nil {
return err
}

// Watch for changes to HPA
err = controller.Watch(&source.Kind{Type: &autoscalingv2.HorizontalPodAutoscaler{}}, &hpaEventHandler{
enqueueHandler: handler.EnqueueRequestForObject{},
})
if err != nil {
return err
}

return nil
}
2 changes: 1 addition & 1 deletion pkg/controller/evpa/effective_vpa_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func (c *EffectiveVPAController) SetupWithManager(mgr ctrl.Manager) error {

func recordMetric(evpa *autoscalingapi.EffectiveVerticalPodAutoscaler, status *autoscalingapi.EffectiveVerticalPodAutoscalerStatus, podTemplate *v1.PodTemplateSpec) {
labels := map[string]string{
"target": fmt.Sprintf("%s/%s", evpa.Namespace, evpa.Spec.TargetRef.Name),
"resourceName": fmt.Sprintf("%s/%s", evpa.Namespace, evpa.Spec.TargetRef.Name),
}

for _, container := range status.Recommendation.ContainerRecommendations {
Expand Down
21 changes: 15 additions & 6 deletions pkg/metrics/autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ var (
Name: "hpa_replicas",
Help: "Replicas for HPA",
},
[]string{"identity"},
[]string{"resourceName"},
)
EHPAReplicas = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Expand All @@ -22,7 +22,16 @@ var (
Name: "effective_hpa_replicas",
Help: "Replicas for Effective HPA",
},
[]string{"identity", "strategy"},
[]string{"resourceName", "strategy"},
)
HPAScaleCount = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "crane",
Subsystem: "autoscaling",
Name: "hpa_scale_count",
Help: "Scale count for HPA",
},
[]string{"resourceName", "type", "direction"},
)
OOMCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
Expand All @@ -44,7 +53,7 @@ var (
Help: "The cpu scale up for Effective VPA",
},
[]string{
"target",
"resourceName",
},
)
EVPACpuScaleDownMilliCores = prometheus.NewGaugeVec(
Expand All @@ -55,7 +64,7 @@ var (
Help: "The cpu scale down for Effective VPA",
},
[]string{
"target",
"resourceName",
},
)
EVPAMemoryScaleUpMB = prometheus.NewGaugeVec(
Expand All @@ -66,7 +75,7 @@ var (
Help: "The memory scale up for Effective VPA",
},
[]string{
"target",
"resourceName",
},
)
EVPAMemoryScaleDownMB = prometheus.NewGaugeVec(
Expand All @@ -77,7 +86,7 @@ var (
Help: "The memory scale down for Effective VPA",
},
[]string{
"target",
"resourceName",
},
)
)
Expand Down