From a5b9fb6d31f485d80190d7ac1980f78d3258faf3 Mon Sep 17 00:00:00 2001 From: Kun Woo Yoo Date: Wed, 18 Sep 2024 20:07:11 +0900 Subject: [PATCH] add test for CPU/Memory trigger activation feature Signed-off-by: Kun Woo Yoo --- pkg/scalers/cpu_memory_scaler.go | 12 ++-- pkg/scalers/cpu_memory_scaler_test.go | 81 +++++++++++++++++++++++++-- tests/scalers/cpu/cpu_test.go | 16 ++++-- 3 files changed, 93 insertions(+), 16 deletions(-) diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index 28738d03876..1f99c633c23 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -21,7 +21,7 @@ type cpuMemoryScaler struct { metadata *cpuMemoryMetadata resourceName v1.ResourceName logger logr.Logger - client client.Client + kubeClient client.Client } type cpuMemoryMetadata struct { @@ -37,7 +37,7 @@ type cpuMemoryMetadata struct { } // NewCPUMemoryScaler creates a new cpuMemoryScaler -func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig, client client.Client) (Scaler, error) { +func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig, kubeClient client.Client) (Scaler, error) { logger := InitializeLogger(config, "cpu_memory_scaler") meta, parseErr := parseResourceMetadata(config, logger) @@ -49,7 +49,7 @@ func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.Scal metadata: meta, resourceName: resourceName, logger: logger, - client: client, + kubeClient: kubeClient, }, nil } @@ -121,7 +121,7 @@ func (s *cpuMemoryScaler) Close(context.Context) error { func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscaler, error) { if s.metadata.ScalableObjectType == "ScaledObject" { scaledObject := &kedav1alpha1.ScaledObject{} - err := s.client.Get(ctx, types.NamespacedName{ + err := s.kubeClient.Get(ctx, types.NamespacedName{ Name: s.metadata.ScalableObjectName, Namespace: s.metadata.ScalableObjectNamespace, }, scaledObject) @@ -131,7 +131,7 @@ func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscal } hpa := &v2.HorizontalPodAutoscaler{} - err = s.client.Get(ctx, types.NamespacedName{ + err = s.kubeClient.Get(ctx, types.NamespacedName{ Name: scaledObject.Status.HpaName, Namespace: s.metadata.ScalableObjectNamespace, }, hpa) @@ -143,7 +143,7 @@ func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscal return hpa, nil } else if s.metadata.ScalableObjectType == "ScaledJob" { scaledJob := &kedav1alpha1.ScaledJob{} - err := s.client.Get(ctx, types.NamespacedName{ + err := s.kubeClient.Get(ctx, types.NamespacedName{ Name: s.metadata.ScalableObjectName, Namespace: s.metadata.ScalableObjectNamespace, }, scaledJob) diff --git a/pkg/scalers/cpu_memory_scaler_test.go b/pkg/scalers/cpu_memory_scaler_test.go index 81f7ea9df9a..d0fdc817499 100644 --- a/pkg/scalers/cpu_memory_scaler_test.go +++ b/pkg/scalers/cpu_memory_scaler_test.go @@ -2,6 +2,9 @@ package scalers import ( "context" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" "testing" "github.com/go-logr/logr" @@ -20,8 +23,9 @@ type parseCPUMemoryMetadataTestData struct { // A complete valid metadata example for reference var validCPUMemoryMetadata = map[string]string{ - "type": "Utilization", - "value": "50", + "type": "Utilization", + "value": "50", + "activationValue": "40", } var validContainerCPUMemoryMetadata = map[string]string{ "type": "Utilization", @@ -37,6 +41,7 @@ var testCPUMemoryMetadata = []parseCPUMemoryMetadataTestData{ {v2.UtilizationMetricType, map[string]string{"value": "50"}, false}, {"", map[string]string{"type": "AverageValue", "value": "50"}, false}, {v2.AverageValueMetricType, map[string]string{"value": "50"}, false}, + {"", map[string]string{"type": "AverageValue", "value": "50", "activationValue": "40"}, false}, {"", map[string]string{"type": "Value", "value": "50"}, true}, {v2.ValueMetricType, map[string]string{"value": "50"}, true}, {"", map[string]string{"type": "AverageValue"}, true}, @@ -64,7 +69,8 @@ func TestGetMetricSpecForScaling(t *testing.T) { config := &scalersconfig.ScalerConfig{ TriggerMetadata: validCPUMemoryMetadata, } - scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config) + kubeClient := fake.NewFakeClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec := scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType) @@ -76,7 +82,7 @@ func TestGetMetricSpecForScaling(t *testing.T) { TriggerMetadata: map[string]string{"value": "50"}, MetricType: v2.UtilizationMetricType, } - scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config) + scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec = scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType) @@ -89,7 +95,8 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { config := &scalersconfig.ScalerConfig{ TriggerMetadata: validContainerCPUMemoryMetadata, } - scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config) + kubeClient := fake.NewFakeClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec := scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType) @@ -102,7 +109,7 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { TriggerMetadata: map[string]string{"value": "50", "containerName": "bar"}, MetricType: v2.UtilizationMetricType, } - scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config) + scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec = scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType) @@ -110,3 +117,65 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { assert.Equal(t, metricSpec[0].ContainerResource.Target.Type, v2.UtilizationMetricType) assert.Equal(t, metricSpec[0].ContainerResource.Container, "bar") } + +func createScaledObject() *kedav1alpha1.ScaledObject { + return &kedav1alpha1.ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-name", + Namespace: "test-namespace", + }, + Status: kedav1alpha1.ScaledObjectStatus{ + HpaName: "keda-hpa-test-name", + }, + } +} + +func createHPAWithAverageUtilization(averageUtilization int32) *v2.HorizontalPodAutoscaler { + return &v2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "keda-hpa-test-name", + Namespace: "test-namespace", + }, + Status: v2.HorizontalPodAutoscalerStatus{ + CurrentMetrics: []v2.MetricStatus{ + { + Type: v2.ResourceMetricSourceType, + Resource: &v2.ResourceMetricStatus{ + Name: v1.ResourceCPU, + Current: v2.MetricValueStatus{ + AverageUtilization: &averageUtilization, + }, + }, + }, + }, + }, + } +} + +func TestGetMetricsAndActivity_IsActive(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: validCPUMemoryMetadata, + ScalableObjectType: "ScaledObject", + ScalableObjectName: "test-name", + ScalableObjectNamespace: "test-namespace", + } + kubeClient := fake.NewClientBuilder().WithRuntimeObjects(createHPAWithAverageUtilization(50), createScaledObject()).Build() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) + + _, isActive, _ := scaler.GetMetricsAndActivity(context.TODO(), "cpu") + assert.Equal(t, isActive, true) +} + +func TestGetMetricsAndActivity_IsNotActive(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: validCPUMemoryMetadata, + ScalableObjectType: "ScaledObject", + ScalableObjectName: "test-name", + ScalableObjectNamespace: "test-namespace", + } + kubeClient := fake.NewClientBuilder().WithRuntimeObjects(createHPAWithAverageUtilization(30), createScaledObject()).Build() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) + + _, isActive, _ := scaler.GetMetricsAndActivity(context.TODO(), "cpu") + assert.Equal(t, isActive, false) +} diff --git a/tests/scalers/cpu/cpu_test.go b/tests/scalers/cpu/cpu_test.go index f24922dc61d..1c4564d1ad4 100644 --- a/tests/scalers/cpu/cpu_test.go +++ b/tests/scalers/cpu/cpu_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - - . "github.com/kedacore/keda/v2/tests/helper" ) // Load environment variables from .env file @@ -135,7 +133,8 @@ spec: - type: cpu metadata: type: Utilization - value: "50" + value: "10" + activationValue: "5" - type: kubernetes-workload metadata: podSelector: 'pod={{.WorkloadDeploymentName}}' @@ -245,9 +244,18 @@ func scaleToZero(t *testing.T, kc *kubernetes.Clientset, data templateData) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicas, 60, 1), "Replica count should be %v", maxReplicas) - // scale external trigger in (expect replicas back to 0 -- external trigger not active) + // activate cpu trigger + KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", triggerJob) + + // replica count should not change from maxReplicas + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60) + + // scale external trigger in (expect replicas to stay at maxReplicas -- external trigger not active) KubernetesScaleDeployment(t, kc, workloadDeploymentName, int64(minReplicas), testNamespace) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60) + // remove trigger job to deactivate cpu trigger + KubectlDeleteWithTemplate(t, data, "triggerJobTemplate", triggerJob) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicas, 60, 1), "Replica count should be %v", minReplicas) }