Skip to content

Commit

Permalink
add tests for sidecar containers
Browse files Browse the repository at this point in the history
  • Loading branch information
yangjunmyfm192085 committed Dec 4, 2023
1 parent 4bb8fa5 commit eace528
Show file tree
Hide file tree
Showing 4 changed files with 142 additions and 24 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ test-e2e-all: test-e2e-1.28 test-e2e-1.27 test-e2e-1.26

.PHONY: test-e2e-1.28
test-e2e-1.28:
NODE_IMAGE=kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 ./test/test-e2e.sh
NODE_IMAGE=kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 KIND_CONFIG="${PWD}/test/kind-config-with-sidecar-containers.yaml" ./test/test-e2e.sh

.PHONY: test-e2e-1.27
test-e2e-1.27:
Expand Down
145 changes: 123 additions & 22 deletions test/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,21 @@ import (
)

const (
localPort = 10250
cpuConsumerPodName = "cpu-consumer"
memoryConsumerPodName = "memory-consumer"
initContainerPodName = "cmwithinitcontainer-consumer"
sideCarContainerPodName = "sidecarpod-consumer"
labelSelector = "metrics-server-skip!=true"
skipLabel = "metrics-server-skip==true"
labelKey = "metrics-server-skip"
localPort = 10250
cpuConsumerPodName = "cpu-consumer"
memoryConsumerPodName = "memory-consumer"
initContainerPodName = "cmwithinitcontainer-consumer"
sideCarContainerPodName = "sidecarpod-consumer"
initSidecarContainersPodName = "initsidecarpod-consumer"
labelSelector = "metrics-server-skip!=true"
skipLabel = "metrics-server-skip==true"
labelKey = "metrics-server-skip"
)

var client *clientset.Clientset
var (
client *clientset.Clientset
testSideCarsContainers bool
)

func TestMetricsServer(t *testing.T) {
RegisterFailHandler(Fail)
Expand Down Expand Up @@ -85,13 +89,21 @@ var _ = BeforeSuite(func() {
if err != nil {
panic(err)
}
if testSideCarsContainers {
deletePod(client, initSidecarContainersPodName)
err = consumeWithInitSideCarContainer(client, initSidecarContainersPodName, labelKey)
if err != nil {
panic(err)
}
}
})

var _ = AfterSuite(func() {
deletePod(client, cpuConsumerPodName)
deletePod(client, memoryConsumerPodName)
deletePod(client, initContainerPodName)
deletePod(client, sideCarContainerPodName)
deletePod(client, initSidecarContainersPodName)
})

var _ = Describe("MetricsServer", func() {
Expand All @@ -108,6 +120,8 @@ var _ = Describe("MetricsServer", func() {
panic(err)
}

testSideCarsContainers = hasSidecarFeatureEnabled(client)

It("exposes metrics from at least one pod in cluster", func() {
podMetrics, err := mclient.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to list pod metrics")
Expand Down Expand Up @@ -194,6 +208,29 @@ var _ = Describe("MetricsServer", func() {
Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU of Container %q should not be equal zero", ms.Containers[1].Name)
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory of Container %q should not be equal zero", ms.Containers[1].Name)
})

if testSideCarsContainers {
It("returns metric for pod with init sidecar container", func() {
Expect(err).NotTo(HaveOccurred(), "Failed to create %q pod", initSidecarContainersPodName)
deadline := time.Now().Add(60 * time.Second)
var ms *v1beta1.PodMetrics
for {
ms, err = mclient.MetricsV1beta1().PodMetricses(metav1.NamespaceDefault).Get(context.TODO(), initSidecarContainersPodName, metav1.GetOptions{})
if err == nil || time.Now().After(deadline) {
break
}
time.Sleep(5 * time.Second)
}
Expect(err).NotTo(HaveOccurred(), "Failed to get %q pod", initSidecarContainersPodName)
Expect(ms.Containers).To(HaveLen(2), "Unexpected number of containers")
usage := ms.Containers[0].Usage
Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU should not be equal zero")
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory should not be equal zero")
usage = ms.Containers[1].Usage
Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU should not be equal zero")
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory should not be equal zero")
})
}
It("passes readyz probe", func() {
msPods := mustGetMetricsServerPods(client)
for _, pod := range msPods {
Expand Down Expand Up @@ -418,15 +455,8 @@ func watchPodReadyStatus(client clientset.Interface, podNamespace string, podNam
if !ok {
return fmt.Errorf("Watch pod failed")
}
var containerReady = false
if pod.Name == podName {
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
break
}
containerReady = true
}
if containerReady {
if checkPodContainersReady(pod) {
return nil
}
}
Expand All @@ -442,7 +472,7 @@ func consumeCPU(client clientset.Interface, podName, nodeSelector string) error
{
Name: podName,
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=60", "--millicores=50"},
Args: []string{"--duration-sec=600", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
Expand Down Expand Up @@ -470,7 +500,7 @@ func consumeMemory(client clientset.Interface, podName, nodeSelector string) err
{
Name: podName,
Command: []string{"stress"},
Args: []string{"-m", "1", "--vm-bytes", "50M", "--vm-hang", "0", "-t", "60"},
Args: []string{"-m", "1", "--vm-bytes", "50M", "--vm-hang", "0", "-t", "600"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
Expand All @@ -497,7 +527,7 @@ func consumeWithInitContainer(client clientset.Interface, podName, nodeSelector
{
Name: podName,
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=60", "--millicores=50"},
Args: []string{"--duration-sec=600", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
Expand Down Expand Up @@ -534,7 +564,7 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect
{
Name: podName,
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=60", "--millicores=50"},
Args: []string{"--duration-sec=600", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
Expand All @@ -546,7 +576,7 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect
{
Name: "sidecar-container",
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=60", "--millicores=50"},
Args: []string{"--duration-sec=600", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
Expand All @@ -567,6 +597,51 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect
return watchPodReadyStatus(client, metav1.NamespaceDefault, podName, currentPod.ResourceVersion)
}

func consumeWithInitSideCarContainer(client clientset.Interface, podName, nodeSelector string) error {
startPolicy := corev1.ContainerRestartPolicyAlways
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: podName,
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=600", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: mustQuantity("100m"),
corev1.ResourceMemory: mustQuantity("100Mi"),
},
},
},
},
InitContainers: []corev1.Container{
{
Name: "init-container",
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=600", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
RestartPolicy: &startPolicy,
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: mustQuantity("100m"),
corev1.ResourceMemory: mustQuantity("100Mi"),
},
},
},
},
Affinity: affinity(nodeSelector),
},
}

currentPod, err := client.CoreV1().Pods(metav1.NamespaceDefault).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
return err
}
return watchPodReadyStatus(client, metav1.NamespaceDefault, podName, currentPod.ResourceVersion)
}

func deletePod(client clientset.Interface, podName string) {
var gracePeriodSeconds int64 = 0
_ = client.CoreV1().Pods(metav1.NamespaceDefault).Delete(context.TODO(), podName, metav1.DeleteOptions{
Expand Down Expand Up @@ -599,3 +674,29 @@ func affinity(key string) *corev1.Affinity {
},
}
}

func checkPodContainersReady(pod *corev1.Pod) bool {
for _, containerStatus := range pod.Status.InitContainerStatuses {
if !containerStatus.Ready {
return false
}
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
return false
}
}
return true
}

func hasSidecarFeatureEnabled(client clientset.Interface) bool {
if apiServerPod, err := client.CoreV1().Pods("kube-system").Get(context.TODO(), "kube-apiserver-e2e-control-plane", metav1.GetOptions{}); err == nil {
cmds := apiServerPod.Spec.Containers[0].Command
for index := range cmds {
if strings.Contains(cmds[index], "--feature-gates") && strings.Contains(cmds[index], "SidecarContainers=true") {
return true
}
}
}
return false
}
17 changes: 17 additions & 0 deletions test/kind-config-with-sidecar-containers.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
featureGates:
"SidecarContainers": true
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
extraArgs:
"enable-aggregator-routing": "true"
- role: worker
- role: worker
labels:
metrics-server-skip: true

2 changes: 1 addition & 1 deletion test/test-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ set -e

: ${NODE_IMAGE:?Need to set NODE_IMAGE to test}
: ${SKAFFOLD_PROFILE:="test"}
: ${KIND_CONFIG:="$PWD/test/kind-config.yaml"}


KIND_VERSION=0.20.0
Expand Down Expand Up @@ -76,7 +77,6 @@ setup_kubectl() {
}

create_cluster() {
KIND_CONFIG="$PWD/test/kind-config.yaml"
if ! (${KIND} create cluster --name=e2e --image=${NODE_IMAGE} --config=${KIND_CONFIG}) ; then
echo "Could not create KinD cluster"
exit 1
Expand Down

0 comments on commit eace528

Please sign in to comment.