From eace528368794a3347079a47869358d021770bf0 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 19 Sep 2023 13:06:21 +0800 Subject: [PATCH] add tests for sidecar containers --- Makefile | 2 +- test/e2e_test.go | 145 +++++++++++++++--- test/kind-config-with-sidecar-containers.yaml | 17 ++ test/test-e2e.sh | 2 +- 4 files changed, 142 insertions(+), 24 deletions(-) create mode 100644 test/kind-config-with-sidecar-containers.yaml diff --git a/Makefile b/Makefile index 7fc3862bf..f215d826c 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-e2e-all: test-e2e-1.28 test-e2e-1.27 test-e2e-1.26 .PHONY: test-e2e-1.28 test-e2e-1.28: - NODE_IMAGE=kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 ./test/test-e2e.sh + NODE_IMAGE=kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 KIND_CONFIG="${PWD}/test/kind-config-with-sidecar-containers.yaml" ./test/test-e2e.sh .PHONY: test-e2e-1.27 test-e2e-1.27: diff --git a/test/e2e_test.go b/test/e2e_test.go index c6279e9f7..9b71c2c4c 100644 --- a/test/e2e_test.go +++ b/test/e2e_test.go @@ -47,17 +47,21 @@ import ( ) const ( - localPort = 10250 - cpuConsumerPodName = "cpu-consumer" - memoryConsumerPodName = "memory-consumer" - initContainerPodName = "cmwithinitcontainer-consumer" - sideCarContainerPodName = "sidecarpod-consumer" - labelSelector = "metrics-server-skip!=true" - skipLabel = "metrics-server-skip==true" - labelKey = "metrics-server-skip" + localPort = 10250 + cpuConsumerPodName = "cpu-consumer" + memoryConsumerPodName = "memory-consumer" + initContainerPodName = "cmwithinitcontainer-consumer" + sideCarContainerPodName = "sidecarpod-consumer" + initSidecarContainersPodName = "initsidecarpod-consumer" + labelSelector = "metrics-server-skip!=true" + skipLabel = "metrics-server-skip==true" + labelKey = "metrics-server-skip" ) -var client *clientset.Clientset +var ( + client *clientset.Clientset + testSideCarsContainers bool +) func TestMetricsServer(t *testing.T) { RegisterFailHandler(Fail) @@ -85,6 +89,13 @@ var _ = BeforeSuite(func() { if err != nil { panic(err) } + if testSideCarsContainers { + deletePod(client, initSidecarContainersPodName) + err = consumeWithInitSideCarContainer(client, initSidecarContainersPodName, labelKey) + if err != nil { + panic(err) + } + } }) var _ = AfterSuite(func() { @@ -92,6 +103,7 @@ var _ = AfterSuite(func() { deletePod(client, memoryConsumerPodName) deletePod(client, initContainerPodName) deletePod(client, sideCarContainerPodName) + deletePod(client, initSidecarContainersPodName) }) var _ = Describe("MetricsServer", func() { @@ -108,6 +120,8 @@ var _ = Describe("MetricsServer", func() { panic(err) } + testSideCarsContainers = hasSidecarFeatureEnabled(client) + It("exposes metrics from at least one pod in cluster", func() { podMetrics, err := mclient.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred(), "Failed to list pod metrics") @@ -194,6 +208,29 @@ var _ = Describe("MetricsServer", func() { Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU of Container %q should not be equal zero", ms.Containers[1].Name) Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory of Container %q should not be equal zero", ms.Containers[1].Name) }) + + if testSideCarsContainers { + It("returns metric for pod with init sidecar container", func() { + Expect(err).NotTo(HaveOccurred(), "Failed to create %q pod", initSidecarContainersPodName) + deadline := time.Now().Add(60 * time.Second) + var ms *v1beta1.PodMetrics + for { + ms, err = mclient.MetricsV1beta1().PodMetricses(metav1.NamespaceDefault).Get(context.TODO(), initSidecarContainersPodName, metav1.GetOptions{}) + if err == nil || time.Now().After(deadline) { + break + } + time.Sleep(5 * time.Second) + } + Expect(err).NotTo(HaveOccurred(), "Failed to get %q pod", initSidecarContainersPodName) + Expect(ms.Containers).To(HaveLen(2), "Unexpected number of containers") + usage := ms.Containers[0].Usage + Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU should not be equal zero") + Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory should not be equal zero") + usage = ms.Containers[1].Usage + Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU should not be equal zero") + Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory should not be equal zero") + }) + } It("passes readyz probe", func() { msPods := mustGetMetricsServerPods(client) for _, pod := range msPods { @@ -418,15 +455,8 @@ func watchPodReadyStatus(client clientset.Interface, podNamespace string, podNam if !ok { return fmt.Errorf("Watch pod failed") } - var containerReady = false if pod.Name == podName { - for _, containerStatus := range pod.Status.ContainerStatuses { - if !containerStatus.Ready { - break - } - containerReady = true - } - if containerReady { + if checkPodContainersReady(pod) { return nil } } @@ -442,7 +472,7 @@ func consumeCPU(client clientset.Interface, podName, nodeSelector string) error { Name: podName, Command: []string{"./consume-cpu/consume-cpu"}, - Args: []string{"--duration-sec=60", "--millicores=50"}, + Args: []string{"--duration-sec=600", "--millicores=50"}, Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ @@ -470,7 +500,7 @@ func consumeMemory(client clientset.Interface, podName, nodeSelector string) err { Name: podName, Command: []string{"stress"}, - Args: []string{"-m", "1", "--vm-bytes", "50M", "--vm-hang", "0", "-t", "60"}, + Args: []string{"-m", "1", "--vm-bytes", "50M", "--vm-hang", "0", "-t", "600"}, Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ @@ -497,7 +527,7 @@ func consumeWithInitContainer(client clientset.Interface, podName, nodeSelector { Name: podName, Command: []string{"./consume-cpu/consume-cpu"}, - Args: []string{"--duration-sec=60", "--millicores=50"}, + Args: []string{"--duration-sec=600", "--millicores=50"}, Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ @@ -534,7 +564,7 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect { Name: podName, Command: []string{"./consume-cpu/consume-cpu"}, - Args: []string{"--duration-sec=60", "--millicores=50"}, + Args: []string{"--duration-sec=600", "--millicores=50"}, Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ @@ -546,7 +576,7 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect { Name: "sidecar-container", Command: []string{"./consume-cpu/consume-cpu"}, - Args: []string{"--duration-sec=60", "--millicores=50"}, + Args: []string{"--duration-sec=600", "--millicores=50"}, Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ @@ -567,6 +597,51 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect return watchPodReadyStatus(client, metav1.NamespaceDefault, podName, currentPod.ResourceVersion) } +func consumeWithInitSideCarContainer(client clientset.Interface, podName, nodeSelector string) error { + startPolicy := corev1.ContainerRestartPolicyAlways + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: podName, + Command: []string{"./consume-cpu/consume-cpu"}, + Args: []string{"--duration-sec=600", "--millicores=50"}, + Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: mustQuantity("100m"), + corev1.ResourceMemory: mustQuantity("100Mi"), + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init-container", + Command: []string{"./consume-cpu/consume-cpu"}, + Args: []string{"--duration-sec=600", "--millicores=50"}, + Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9", + RestartPolicy: &startPolicy, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: mustQuantity("100m"), + corev1.ResourceMemory: mustQuantity("100Mi"), + }, + }, + }, + }, + Affinity: affinity(nodeSelector), + }, + } + + currentPod, err := client.CoreV1().Pods(metav1.NamespaceDefault).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + return err + } + return watchPodReadyStatus(client, metav1.NamespaceDefault, podName, currentPod.ResourceVersion) +} + func deletePod(client clientset.Interface, podName string) { var gracePeriodSeconds int64 = 0 _ = client.CoreV1().Pods(metav1.NamespaceDefault).Delete(context.TODO(), podName, metav1.DeleteOptions{ @@ -599,3 +674,29 @@ func affinity(key string) *corev1.Affinity { }, } } + +func checkPodContainersReady(pod *corev1.Pod) bool { + for _, containerStatus := range pod.Status.InitContainerStatuses { + if !containerStatus.Ready { + return false + } + } + for _, containerStatus := range pod.Status.ContainerStatuses { + if !containerStatus.Ready { + return false + } + } + return true +} + +func hasSidecarFeatureEnabled(client clientset.Interface) bool { + if apiServerPod, err := client.CoreV1().Pods("kube-system").Get(context.TODO(), "kube-apiserver-e2e-control-plane", metav1.GetOptions{}); err == nil { + cmds := apiServerPod.Spec.Containers[0].Command + for index := range cmds { + if strings.Contains(cmds[index], "--feature-gates") && strings.Contains(cmds[index], "SidecarContainers=true") { + return true + } + } + } + return false +} diff --git a/test/kind-config-with-sidecar-containers.yaml b/test/kind-config-with-sidecar-containers.yaml new file mode 100644 index 000000000..13d8d08fa --- /dev/null +++ b/test/kind-config-with-sidecar-containers.yaml @@ -0,0 +1,17 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + "SidecarContainers": true +nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + "enable-aggregator-routing": "true" + - role: worker + - role: worker + labels: + metrics-server-skip: true + diff --git a/test/test-e2e.sh b/test/test-e2e.sh index 18b4ad234..612209e04 100755 --- a/test/test-e2e.sh +++ b/test/test-e2e.sh @@ -4,6 +4,7 @@ set -e : ${NODE_IMAGE:?Need to set NODE_IMAGE to test} : ${SKAFFOLD_PROFILE:="test"} +: ${KIND_CONFIG:="$PWD/test/kind-config.yaml"} KIND_VERSION=0.20.0 @@ -76,7 +77,6 @@ setup_kubectl() { } create_cluster() { - KIND_CONFIG="$PWD/test/kind-config.yaml" if ! (${KIND} create cluster --name=e2e --image=${NODE_IMAGE} --config=${KIND_CONFIG}) ; then echo "Could not create KinD cluster" exit 1