diff --git a/charts/crds/kdoctor.io_apphttphealthies.yaml b/charts/crds/kdoctor.io_apphttphealthies.yaml index a3da6714..10b3d591 100644 --- a/charts/crds/kdoctor.io_apphttphealthies.yaml +++ b/charts/crds/kdoctor.io_apphttphealthies.yaml @@ -59,10 +59,6 @@ spec: spec: properties: agentSpec: - default: - hostNetwork: false - kind: DaemonSet - terminationGracePeriodMinutes: 60 properties: affinity: description: Affinity is a group of affinity scheduling rules. diff --git a/charts/crds/kdoctor.io_netdnses.yaml b/charts/crds/kdoctor.io_netdnses.yaml index 425e1660..59fb1284 100644 --- a/charts/crds/kdoctor.io_netdnses.yaml +++ b/charts/crds/kdoctor.io_netdnses.yaml @@ -57,10 +57,6 @@ spec: spec: properties: agentSpec: - default: - hostNetwork: false - kind: DaemonSet - terminationGracePeriodMinutes: 60 properties: affinity: description: Affinity is a group of affinity scheduling rules. diff --git a/charts/crds/kdoctor.io_netreaches.yaml b/charts/crds/kdoctor.io_netreaches.yaml index 613a4092..69f17904 100644 --- a/charts/crds/kdoctor.io_netreaches.yaml +++ b/charts/crds/kdoctor.io_netreaches.yaml @@ -59,10 +59,6 @@ spec: spec: properties: agentSpec: - default: - hostNetwork: false - kind: DaemonSet - terminationGracePeriodMinutes: 60 properties: affinity: description: Affinity is a group of affinity scheduling rules. diff --git a/charts/templates/_helpers.tpl b/charts/templates/_helpers.tpl index f557186e..6f04849e 100644 --- a/charts/templates/_helpers.tpl +++ b/charts/templates/_helpers.tpl @@ -40,7 +40,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} kdoctorAgent Selector labels */}} {{- define "project.kdoctorAgent.selectorLabels" -}} -app.kubernetes.io/name: {{ include "project.name" . }} +app.kubernetes.io/name: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} {{- end }} @@ -49,7 +49,7 @@ app.kubernetes.io/component: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffi kdoctorController Selector labels */}} {{- define "project.kdoctorController.selectorLabels" -}} -app.kubernetes.io/name: {{ include "project.name" . }} +app.kubernetes.io/name: {{ .Values.kdoctorController.name | trunc 63 | trimSuffix "-" }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: {{ .Values.kdoctorController.name | trunc 63 | trimSuffix "-" }} {{- end }} diff --git a/charts/templates/daemonset.yaml b/charts/templates/daemonset.yaml new file mode 100644 index 00000000..202ec660 --- /dev/null +++ b/charts/templates/daemonset.yaml @@ -0,0 +1,242 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "project.kdoctorAgent.labels" . | nindent 4 }} + {{- if .Values.global.commonLabels }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.global.commonAnnotations }} + annotations: + {{- include "tplvalues.render" ( dict "value" .Values.global.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "project.kdoctorAgent.selectorLabels" . | nindent 6 }} + {{- with .Values.kdoctorAgent.updateStrategy }} + updateStrategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + {{- if or .Values.kdoctorAgent.podAnnotations .Values.global.commonAnnotations }} + annotations: + {{- if .Values.global.commonAnnotations }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.kdoctorAgent.podAnnotations }} + {{- include "tplvalues.render" ( dict "value" .Values.kdoctorAgent.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + labels: + {{- include "project.kdoctorAgent.selectorLabels" . | nindent 8 }} + {{- if .Values.global.commonLabels }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.kdoctorAgent.podLabels }} + {{- include "tplvalues.render" ( dict "value" .Values.kdoctorAgent.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.kdoctorAgent.image.imagePullSecrets }} + imagePullSecrets: + {{- with .Values.kdoctorAgent.image.imagePullSecrets }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} + {{- end }} + serviceAccountName: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} + priorityClassName: {{ default "system-node-critical" .Values.kdoctorAgent.priorityClassName }} + {{- if .Values.kdoctorAgent.hostnetwork }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + {{- else }} + hostNetwork: false + dnsPolicy: ClusterFirst + {{- end }} + restartPolicy: Always + {{- with .Values.kdoctorAgent.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.kdoctorAgent.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.kdoctorAgent.affinity }} + affinity: + {{- include "tplvalues.render" (dict "value" .Values.kdoctorAgent.affinity "context" $) | nindent 6 }} + {{- else }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + # Compatible with Kubernetes 1.12.x and 1.13.x + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + {{- end }} + containers: + - name: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} + image: {{ include "project.kdoctorAgent.image" . | quote }} + imagePullPolicy: {{ .Values.kdoctorAgent.image.pullPolicy }} + command: + - {{ .Values.kdoctorAgent.cmdBinName }} + args: + - --config-path=/tmp/config-map/conf.yml + - --tls-ca-cert=/etc/tls/ca.crt + - --tls-ca-key=/etc/tls/ca.key + - --general-agent=true + {{- if .Values.feature.enableIPv4 }} + - --service-ipv4-name={{ include "project.kdoctorAgent.serviceIpv4Name" . }} + {{- end }} + {{- if .Values.feature.enableIPv6 }} + - --service-ipv6-name={{ include "project.kdoctorAgent.serviceIpv6Name" . }} + {{- end }} + {{- with .Values.kdoctorAgent.extraArgs }} + {{- toYaml . | trim | nindent 8 }} + {{- end }} + ports: + {{- if .Values.kdoctorAgent.prometheus.enabled }} + - name: metrics + containerPort: {{ .Values.kdoctorAgent.prometheus.port }} + protocol: TCP + {{- end }} + {{- if or .Values.feature.enableIPv4 .Values.feature.enableIPv6 }} + - name: health + containerPort: {{ .Values.kdoctorAgent.httpServer.healthPort }} + protocol: TCP + - name: http + containerPort: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} + protocol: TCP + - name: https + containerPort: {{ .Values.kdoctorAgent.httpServer.appHttpsPort }} + protocol: TCP + {{- end }} + {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} + startupProbe: + httpGet: + path: /healthy/startup + port: {{ .Values.kdoctorAgent.httpServer.healthPort }} + scheme: HTTP + failureThreshold: {{ .Values.kdoctorAgent.httpServer.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.kdoctorAgent.httpServer.startupProbe.periodSeconds }} + successThreshold: 1 + {{- end }} + livenessProbe: + httpGet: + path: /healthy/liveness + port: {{ .Values.kdoctorAgent.httpServer.healthPort }} + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: {{ .Values.kdoctorAgent.httpServer.livenessProbe.periodSeconds }} + successThreshold: 1 + failureThreshold: {{ .Values.kdoctorAgent.httpServer.livenessProbe.failureThreshold }} + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthy/readiness + port: {{ .Values.kdoctorAgent.httpServer.healthPort }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: {{ .Values.kdoctorAgent.httpServer.readinessProbe.periodSeconds }} + successThreshold: 1 + failureThreshold: {{ .Values.kdoctorAgent.httpServer.readinessProbe.failureThreshold }} + timeoutSeconds: 5 + {{- with .Values.kdoctorAgent.resources }} + resources: + {{- toYaml . | trim | nindent 12 }} + {{- end }} + env: + - name: ENV_LOG_LEVEL + value: {{ .Values.kdoctorAgent.debug.logLevel | quote }} + - name: ENV_ENABLED_METRIC + value: {{ .Values.kdoctorAgent.prometheus.enabled | quote }} + - name: ENV_METRIC_HTTP_PORT + value: {{ .Values.kdoctorAgent.prometheus.port | quote }} + - name: ENV_AGENT_HEALTH_HTTP_PORT + value: {{ .Values.kdoctorAgent.httpServer.healthPort | quote }} + - name: ENV_AGENT_APP_HTTP_PORT + value: {{ .Values.kdoctorAgent.httpServer.appHttpPort | quote }} + - name: ENV_AGENT_APP_HTTPS_PORT + value: {{ .Values.kdoctorAgent.httpServer.appHttpsPort | quote }} + - name: ENV_GOPS_LISTEN_PORT + value: {{ .Values.kdoctorAgent.debug.gopsPort | quote }} + - name: ENV_AGENT_GRPC_LISTEN_PORT + value: {{ .Values.kdoctorAgent.grpcServer.port | quote }} + - name: ENV_CLUSTER_DNS_DOMAIN + value: {{ .Values.global.clusterDnsDomain | quote }} + - name: ENV_ENABLE_AGGREGATE_AGENT_REPORT + value: {{ .Values.feature.aggregateReport.enabled | quote }} + {{- if .Values.feature.aggregateReport.enabled }} + - name: ENV_AGENT_REPORT_STORAGE_PATH + value: {{ .Values.feature.aggregateReport.agent.reportPath | quote }} + - name: ENV_CLEAN_AGED_REPORT_INTERVAL_IN_MINUTE + value: {{ .Values.feature.aggregateReport.cleanAgedReportIntervalInMinute | quote }} + {{- end }} + - name: ENV_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ENV_LOCAL_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: ENV_LOCAL_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: ENV_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- with .Values.kdoctorAgent.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.kdoctorAgent.securityContext }} + securityContext: + {{- toYaml . | nindent 10 }} + {{- end }} + volumeMounts: + - name: config-path + mountPath: /tmp/config-map + readOnly: true + - name: report-data + mountPath: /report + - name: tls + mountPath: /etc/tls + {{- if .Values.kdoctorAgent.extraVolumes }} + {{- include "tplvalues.render" ( dict "value" .Values.kdoctorAgent.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + volumes: + # To read the configuration from the config map + - name: config-path + configMap: + defaultMode: 0400 + name: {{ .Values.global.configName }} + - name: report-data + hostPath: + path: {{ .Values.kdoctorAgent.reportHostPath }} + type: DirectoryOrCreate + - name: tls + projected: + defaultMode: 0400 + sources: + - secret: + items: + - key: tls.key + path: ca.key + - key: tls.crt + path: ca.crt + name: {{ .Values.tls.ca.secretName }} + {{- if .Values.kdoctorAgent.extraVolumeMounts }} + {{- include "tplvalues.render" ( dict "value" .Values.kdoctorAgent.extraVolumeMounts "context" $ ) | nindent 6 }} + {{- end }} \ No newline at end of file diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index d0acdae3..61fc6d5f 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -148,8 +148,6 @@ spec: value: {{ .Values.kdoctorController.httpServer.port | quote }} - name: ENV_AGENT_GRPC_LISTEN_PORT value: {{ .Values.kdoctorAgent.grpcServer.port | quote }} - - name: ENV_AGENT_DAEMONSET_NAME - value: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} - name: ENV_ENABLE_AGGREGATE_AGENT_REPORT value: {{ .Values.feature.aggregateReport.enabled | quote }} {{- if .Values.feature.aggregateReport.enabled }} @@ -163,6 +161,18 @@ spec: value: {{ .Values.feature.aggregateReport.cleanAgedReportIntervalInMinute | quote }} - name: ENV_COLLECT_AGENT_REPORT_INTERVAL_IN_SECOND value: {{ .Values.feature.aggregateReport.controller.collectAgentReportIntervalInSecond | quote }} + - name: ENV_GENERAL_AGENT_NAME + value: {{ .Values.kdoctorAgent.name | trunc 63 | trimSuffix "-" }} + - name: ENV_GENERAL_AGENT_TYPE + value: DaemonSet + {{- if .Values.feature.enableIPv4 }} + - name: ENV_GENERAL_AGENT_SERVICE_V4_NAME + value: {{ include "project.kdoctorAgent.serviceIpv4Name" . }} + {{- end }} + {{- if .Values.feature.enableIPv6 }} + - name: ENV_GENERAL_AGENT_SERVICE_V6_NAME + value: {{ include "project.kdoctorAgent.serviceIpv6Name" . }} + {{- end }} {{- end }} - name: ENV_POD_NAME valueFrom: diff --git a/charts/templates/ingress.yaml b/charts/templates/ingress.yaml new file mode 100644 index 00000000..2029ae0d --- /dev/null +++ b/charts/templates/ingress.yaml @@ -0,0 +1,25 @@ +{{- if .Values.kdoctorAgent.ingress.enable }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "project.kdoctorAgent.ingressName" . }} + namespace: {{ .Release.Namespace | quote }} +spec: + {{- if .Values.kdoctorAgent.ingress.ingressClass }} + ingressClassName: {{ .Values.kdoctorAgent.ingress.ingressClass | quote }} + {{- end }} + rules: + - http: + paths: + - path: {{ .Values.kdoctorAgent.ingress.route | quote }} + pathType: Exact + backend: + service: + {{- if .Values.feature.enableIPv4 }} + name: {{ include "project.kdoctorAgent.serviceIpv4Name" . }} + {{- else }} + name: {{ include "project.kdoctorAgent.serviceIpv6Name" . }} + {{- end }} + port: + number: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} +{{- end }} \ No newline at end of file diff --git a/charts/templates/service.yaml b/charts/templates/service.yaml index 474c3cea..f004abbc 100644 --- a/charts/templates/service.yaml +++ b/charts/templates/service.yaml @@ -1,3 +1,95 @@ +{{- if .Values.feature.enableIPv4 }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "project.kdoctorAgent.serviceIpv4Name" . }} + namespace: {{ .Release.Namespace | quote }} + {{- if or .Values.global.commonAnnotations .Values.kdoctorAgent.service.annotations }} + annotations: + {{- if .Values.global.commonAnnotations }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.kdoctorAgent.service.annotations }} + {{- include "tplvalues.render" ( dict "value" .Values.kdoctorAgent.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- if .Values.global.commonLabels }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- include "project.kdoctorAgent.labels" . | nindent 4 }} +spec: + type: {{ .Values.kdoctorAgent.service.type }} + ports: + {{- if .Values.kdoctorAgent.prometheus.enabled }} + - name: metrics + port: {{ .Values.kdoctorAgent.prometheus.port }} + targetPort: metrics + protocol: TCP + {{- end }} + {{- if .Values.feature.enableIPv4 }} + - name: http + port: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} + targetPort: http + protocol: TCP + - name: https + port: {{ .Values.kdoctorAgent.httpServer.appHttpsPort }} + targetPort: https + protocol: TCP + {{- end }} + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + selector: + {{- include "project.kdoctorAgent.selectorLabels" . | nindent 4 }} +{{- end }} +--- +{{- if .Values.feature.enableIPv6 }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "project.kdoctorAgent.serviceIpv6Name" . }} + namespace: {{ .Release.Namespace | quote }} + {{- if or .Values.global.commonAnnotations .Values.kdoctorAgent.service.annotations }} + annotations: + {{- if .Values.global.commonAnnotations }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.kdoctorAgent.service.annotations }} + {{- include "tplvalues.render" ( dict "value" .Values.kdoctorAgent.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- if .Values.global.commonLabels }} + {{- include "tplvalues.render" ( dict "value" .Values.global.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- include "project.kdoctorAgent.labels" . | nindent 4 }} +spec: + type: {{ .Values.kdoctorAgent.service.type }} + ports: + {{- if .Values.kdoctorAgent.prometheus.enabled }} + - name: metrics + port: {{ .Values.kdoctorAgent.prometheus.port }} + targetPort: metrics + protocol: TCP + {{- end }} + {{- if .Values.feature.enableIPv6 }} + - name: http + port: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} + targetPort: http + protocol: TCP + - name: https + port: {{ .Values.kdoctorAgent.httpServer.appHttpsPort }} + targetPort: https + protocol: TCP + {{- end }} + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv6 + selector: + {{- include "project.kdoctorAgent.selectorLabels" . | nindent 4 }} + {{- end }} +--- apiVersion: v1 kind: Service metadata: @@ -35,4 +127,4 @@ spec: targetPort: apiserver protocol: TCP selector: - {{- include "project.kdoctorController.selectorLabels" . | nindent 4 }} + {{- include "project.kdoctorController.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/cmd/agent/cmd/config.go b/cmd/agent/cmd/config.go index 22ddbcde..63e6d986 100644 --- a/cmd/agent/cmd/config.go +++ b/cmd/agent/cmd/config.go @@ -76,6 +76,7 @@ func init() { globalFlag.StringVar(&types.AgentConfig.ServiceV6Name, "service-ipv6-name", "", "agent IPv6 service name") globalFlag.BoolVarP(&types.AgentConfig.AppMode, "app-mode", "A", false, "app mode") + globalFlag.BoolVarP(&types.AgentConfig.GeneralAgent, "general-agent", "G", false, "general agent") globalFlag.BoolVarP(&types.AgentConfig.TlsInsecure, "tls-insecure", "K", true, "skip verify tls") globalFlag.BoolVarP(&types.AgentConfig.AppDnsUpstream, "dns-upstream", "D", true, "core dns upstream") globalFlag.StringVarP(&types.AgentConfig.TlsCaCertPath, "tls-ca-cert", "R", "/etc/tls/ca.crt", "ca file path") diff --git a/docs/concepts/runtime-zh_CN.md b/docs/concepts/runtime-zh_CN.md index 5d6cbc0e..f48f9c49 100644 --- a/docs/concepts/runtime-zh_CN.md +++ b/docs/concepts/runtime-zh_CN.md @@ -2,25 +2,36 @@ [**English**](./runtime.md) | **简体中文** -当下发任务 CR 后,kdoctor-controller 会根据 CR 中的 AgentSpec 生成对应的任务载体(DaemonSet 或 Deployment)当所有 Pod 就绪后,开始按照 Spec 中的任务定义执行任务,每一个任务独立使用一个载体。 +部署 kdoctor 后,会创建一个默认任务载体(Daemonset),当下发任务 CR 后,kdoctor-controller 会根据 CR 中的是否定义 AgentSpec 字段选择生成对应的任务载体(DaemonSet 或 Deployment)或使用默认的载体资源,当所有 Pod 就绪后,开始按照 Spec 中的任务定义执行任务。 ### 载体资源 -当任务 CR 下发后,kdocotr-controller 会创建如下资源进行任务。 +当任务 CR 下发后,kdocotr-controller 会创建或复用如下资源进行任务。 ### 工作负载 -工作负载为 DaemonSet 或 Deployment,默认为 DaemonSet,负载中的每一个 Pod 根据任务配置进行的请求,并将执行结果落盘到 Pod 中,可通过 AgentSpec 中设置 -工作负载的销毁时间,默认任务执行完 60 分钟后,销毁工作负载,当删除 CR 任务时,工作负载会一并被删除。 +1. 默认工作负载 + >默认工作负载(DaemonSet)在部署 kdoctor 后生成,在未定义 AgentSpec 时,使用此载体进行任务,此载体不会因为任务删除或结束而被删除。 + +2. 新建工作负载 + >工作负载为 DaemonSet 或 Deployment,默认为 DaemonSet,负载中的每一个 Pod 根据任务配置进行的请求,并将执行结果落盘到 Pod 中,可通过 AgentSpec 中设置 + > 工作负载的销毁时间,默认任务执行完 60 分钟后,销毁工作负载,当删除 CR 任务时,工作负载会一并被删除。 ### Service -在创建工作负载时,kdoctor-controller 同时会根据 IP Family 的配置,创建对应的 service 并于工作负载的 pod 绑定。用于测试 service 网络连通性。与工作负载 -的销毁逻辑相同。 +1. 默认工作载体 Service + >与默认工作负载一样,在部署 kdoctor 后生成,与默认负载关联且不会因为任务删除或结束而被删除。 +2. 新建工作载体 Service + >在创建工作负载时,kdoctor-controller 同时会根据 IP Family 的配置,创建对应的 service 并于工作负载的 pod 绑定。用于测试 service 网络连通性。与工作负载 + >的销毁逻辑相同。 ### Ingress -当任务为 NetReach 时,若测试目标包含 Ingress 时,会创建一个 Ingress,用于测试 Ingress 的网络联通性,与工作负载的销毁逻辑相同。 +1. 默认工作载体 Ingress + >与默认工作负载一样,在部署 kdoctor 后生成,与默认负载 service 关联且不会因为任务删除或结束而被删除。 +2. 新建工作载体 Ingress + >当任务为 NetReach 时,若测试目标包含 Ingress 时,会创建一个 Ingress,用于测试 Ingress 的网络联通性,与工作负载的销毁逻辑相同。 + ### 报告收取 diff --git a/pkg/k8s/apis/kdoctor.io/v1beta1/apphttphealthy_types.go b/pkg/k8s/apis/kdoctor.io/v1beta1/apphttphealthy_types.go index 3add6c50..e55a6dee 100644 --- a/pkg/k8s/apis/kdoctor.io/v1beta1/apphttphealthy_types.go +++ b/pkg/k8s/apis/kdoctor.io/v1beta1/apphttphealthy_types.go @@ -11,8 +11,7 @@ type AppHttpHealthySpec struct { // for the nested field, you should add the kubebuilder default tag even if the nested field properties own the default value. // +kubebuilder:validation:Optional - // +kubebuilder:default={kind: DaemonSet, hostNetwork: false, terminationGracePeriodMinutes: 60} - AgentSpec AgentSpec `json:"agentSpec,omitempty"` + AgentSpec *AgentSpec `json:"agentSpec,omitempty"` // +kubebuilder:validation:Optional Schedule *SchedulePlan `json:"schedule,omitempty"` diff --git a/pkg/k8s/apis/kdoctor.io/v1beta1/netdns_types.go b/pkg/k8s/apis/kdoctor.io/v1beta1/netdns_types.go index 371d43f1..85768691 100644 --- a/pkg/k8s/apis/kdoctor.io/v1beta1/netdns_types.go +++ b/pkg/k8s/apis/kdoctor.io/v1beta1/netdns_types.go @@ -11,8 +11,7 @@ type NetdnsSpec struct { // for the nested field, you should add the kubebuilder default tag even if the nested field properties own the default value. // +kubebuilder:validation:Optional - // +kubebuilder:default={kind: DaemonSet, hostNetwork: false, terminationGracePeriodMinutes: 60} - AgentSpec AgentSpec `json:"agentSpec,omitempty"` + AgentSpec *AgentSpec `json:"agentSpec,omitempty"` // +kubebuilder:validation:Optional Schedule *SchedulePlan `json:"schedule,omitempty"` diff --git a/pkg/k8s/apis/kdoctor.io/v1beta1/netreach_types.go b/pkg/k8s/apis/kdoctor.io/v1beta1/netreach_types.go index 0601e73a..dcc12a4b 100644 --- a/pkg/k8s/apis/kdoctor.io/v1beta1/netreach_types.go +++ b/pkg/k8s/apis/kdoctor.io/v1beta1/netreach_types.go @@ -11,8 +11,7 @@ type NetReachSpec struct { // for the nested field, you should add the kubebuilder default tag even if the nested field properties own the default value. // +kubebuilder:validation:Optional - // +kubebuilder:default={kind: DaemonSet, hostNetwork: false, terminationGracePeriodMinutes: 60} - AgentSpec AgentSpec `json:"agentSpec,omitempty"` + AgentSpec *AgentSpec `json:"agentSpec,omitempty"` // +kubebuilder:validation:Optional Schedule *SchedulePlan `json:"schedule,omitempty"` diff --git a/pkg/k8s/apis/kdoctor.io/v1beta1/zz_generated.deepcopy.go b/pkg/k8s/apis/kdoctor.io/v1beta1/zz_generated.deepcopy.go index 39dcd001..aff34521 100644 --- a/pkg/k8s/apis/kdoctor.io/v1beta1/zz_generated.deepcopy.go +++ b/pkg/k8s/apis/kdoctor.io/v1beta1/zz_generated.deepcopy.go @@ -122,7 +122,11 @@ func (in *AppHttpHealthyList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AppHttpHealthySpec) DeepCopyInto(out *AppHttpHealthySpec) { *out = *in - in.AgentSpec.DeepCopyInto(&out.AgentSpec) + if in.AgentSpec != nil { + in, out := &in.AgentSpec, &out.AgentSpec + *out = new(AgentSpec) + (*in).DeepCopyInto(*out) + } if in.Schedule != nil { in, out := &in.Schedule, &out.Schedule *out = new(SchedulePlan) @@ -357,7 +361,11 @@ func (in *NetReachList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetReachSpec) DeepCopyInto(out *NetReachSpec) { *out = *in - in.AgentSpec.DeepCopyInto(&out.AgentSpec) + if in.AgentSpec != nil { + in, out := &in.AgentSpec, &out.AgentSpec + *out = new(AgentSpec) + (*in).DeepCopyInto(*out) + } if in.Schedule != nil { in, out := &in.Schedule, &out.Schedule *out = new(SchedulePlan) @@ -572,7 +580,11 @@ func (in *NetdnsRequest) DeepCopy() *NetdnsRequest { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetdnsSpec) DeepCopyInto(out *NetdnsSpec) { *out = *in - in.AgentSpec.DeepCopyInto(&out.AgentSpec) + if in.AgentSpec != nil { + in, out := &in.AgentSpec, &out.AgentSpec + *out = new(AgentSpec) + (*in).DeepCopyInto(*out) + } if in.Schedule != nil { in, out := &in.Schedule, &out.Schedule *out = new(SchedulePlan) diff --git a/pkg/pluginManager/agentManager.go b/pkg/pluginManager/agentManager.go index 577a6d58..ac2069e3 100644 --- a/pkg/pluginManager/agentManager.go +++ b/pkg/pluginManager/agentManager.go @@ -81,12 +81,12 @@ func (s *pluginManager) RunAgentController() { logger.Sugar().Fatalf("failed to Initk8sObjManager, error=%v", e) } - if !slices.Contains(types.TaskKinds, types.AgentConfig.TaskKind) { + if !slices.Contains(types.TaskKinds, types.AgentConfig.TaskKind) && !types.AgentConfig.GeneralAgent { logger.Sugar().Fatalf("unsupported TaskKind %s in %v", types.AgentConfig.TaskKind, types.TaskKinds) } for name, plugin := range s.chainingPlugins { - if name != types.AgentConfig.TaskKind { + if name != types.AgentConfig.TaskKind && !types.AgentConfig.GeneralAgent { continue } @@ -115,9 +115,12 @@ func (s *pluginManager) RunAgentController() { time.Sleep(5 * time.Second) }() - err = checkTaskExist(mgr) - if nil != err { - s.logger.Sugar().Fatalf("failed to get agent task '%s/%s', error: %v", types.AgentConfig.TaskKind, types.AgentConfig.TaskName, err) + // general agent skip check + if !types.AgentConfig.GeneralAgent { + err = checkTaskExist(mgr) + if nil != err { + s.logger.Sugar().Fatalf("failed to get agent task '%s/%s', error: %v", types.AgentConfig.TaskKind, types.AgentConfig.TaskName, err) + } } } diff --git a/pkg/pluginManager/agentReconciler.go b/pkg/pluginManager/agentReconciler.go index e8041871..fc627aa5 100644 --- a/pkg/pluginManager/agentReconciler.go +++ b/pkg/pluginManager/agentReconciler.go @@ -42,12 +42,13 @@ func (s *pluginAgentReconciler) SetupWithManager(mgr ctrl.Manager) error { // or else, c.Queue.Forget(obj) func (s *pluginAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { // filter other tasks - if req.NamespacedName.Name != types.AgentConfig.TaskName { - s.logger.With(zap.String(types.AgentConfig.TaskKind, types.AgentConfig.TaskName)). - Sugar().Debugf("ignore Task %s", req.NamespacedName.Name) - return ctrl.Result{}, nil + if !types.AgentConfig.GeneralAgent { + if req.NamespacedName.Name != types.AgentConfig.TaskName { + s.logger.With(zap.String(types.AgentConfig.TaskKind, types.AgentConfig.TaskName)). + Sugar().Debugf("ignore Task %s", req.NamespacedName.Name) + return ctrl.Result{}, nil + } } - // ------ add crd ------ switch s.crdKind { case KindNameNetReach: @@ -59,6 +60,13 @@ func (s *pluginAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger := s.logger.With(zap.String(instance.Kind, instance.Name)) logger.Sugar().Debugf("reconcile handle %v", instance) + + // filter work agent + if instance.Spec.AgentSpec != nil && types.AgentConfig.GeneralAgent { + s.logger.Sugar().Debugf("general agent ignore custom agent task %v", req) + return ctrl.Result{}, nil + } + if instance.DeletionTimestamp != nil { s.logger.Sugar().Debugf("ignore deleting task %v", req) return ctrl.Result{}, nil @@ -96,6 +104,13 @@ func (s *pluginAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger := s.logger.With(zap.String(instance.Kind, instance.Name)) logger.Sugar().Debugf("reconcile handle %v", instance) + + // filter work agent + if instance.Spec.AgentSpec != nil && types.AgentConfig.GeneralAgent { + s.logger.Sugar().Debugf("general agent ignore custom agent task %v", req) + return ctrl.Result{}, nil + } + if instance.DeletionTimestamp != nil { s.logger.Sugar().Debugf("ignore deleting task %v", req) return ctrl.Result{}, nil @@ -132,6 +147,13 @@ func (s *pluginAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) } logger := s.logger.With(zap.String(instance.Kind, instance.Name)) logger.Sugar().Debugf("reconcile handle %v", instance) + + // filter work agent + if instance.Spec.AgentSpec != nil && types.AgentConfig.GeneralAgent { + s.logger.Sugar().Debugf("general agent ignore custom agent task %v", req) + return ctrl.Result{}, nil + } + if instance.DeletionTimestamp != nil { s.logger.Sugar().Debugf("ignore deleting task %v", req) return ctrl.Result{}, nil diff --git a/pkg/pluginManager/apphttphealthy/webhook.go b/pkg/pluginManager/apphttphealthy/webhook.go index e3833b64..7db68ea7 100644 --- a/pkg/pluginManager/apphttphealthy/webhook.go +++ b/pkg/pluginManager/apphttphealthy/webhook.go @@ -65,11 +65,12 @@ func (s *PluginAppHttpHealthy) WebhookMutating(logger *zap.Logger, ctx context.C // agentSpec if true { - if req.Spec.AgentSpec.TerminationGracePeriodMinutes == nil { - req.Spec.AgentSpec.TerminationGracePeriodMinutes = &types.ControllerConfig.Configmap.AgentDefaultTerminationGracePeriodMinutes + if req.Spec.AgentSpec != nil { + if req.Spec.AgentSpec.TerminationGracePeriodMinutes == nil { + req.Spec.AgentSpec.TerminationGracePeriodMinutes = &types.ControllerConfig.Configmap.AgentDefaultTerminationGracePeriodMinutes + } } } - return nil } @@ -185,8 +186,10 @@ func (s *PluginAppHttpHealthy) WebhookValidateCreate(logger *zap.Logger, ctx con // validate AgentSpec if true { - if !slices.Contains(types.TaskRuntimes, r.Spec.AgentSpec.Kind) { - return apierrors.NewBadRequest(fmt.Sprintf("Invalid agent runtime kind %s", r.Spec.AgentSpec.Kind)) + if r.Spec.AgentSpec != nil { + if !slices.Contains(types.TaskRuntimes, r.Spec.AgentSpec.Kind) { + return apierrors.NewBadRequest(fmt.Sprintf("Invalid agent runtime kind %s", r.Spec.AgentSpec.Kind)) + } } } diff --git a/pkg/pluginManager/controllerReconciler.go b/pkg/pluginManager/controllerReconciler.go index 686d4bbe..16e78272 100644 --- a/pkg/pluginManager/controllerReconciler.go +++ b/pkg/pluginManager/controllerReconciler.go @@ -5,6 +5,7 @@ package pluginManager import ( "context" + "github.com/kdoctor-io/kdoctor/pkg/types" "reflect" "time" @@ -49,7 +50,7 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil if err := s.client.Get(ctx, req.NamespacedName, &instance); err != nil { s.logger.Sugar().Errorf("unable to fetch obj , error=%v", err) // since we have OwnerReference for task corresponding runtime and service, we could just delete the tracker DB record directly - if errors.IsNotFound(err) && instance.Status.Resource != nil { + if errors.IsNotFound(err) && instance.DeletionTimestamp != nil && instance.Spec.AgentSpec != nil { s.tracker.DB.Delete(scheduler.BuildItem(*instance.Status.Resource, KindNameNetReach, instance.Name, nil)) } return ctrl.Result{}, client.IgnoreNotFound(err) @@ -84,8 +85,15 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil } // the task corresponding agent pods have this unique label - runtimePodMatchLabels := client.MatchingLabels{ - s.runtimeUniqueMatchLabelKey: scheduler.UniqueMatchLabelValue(KindNameNetReach, instance.Name), + var runtimePodMatchLabels client.MatchingLabels + if instance.Spec.AgentSpec == nil { + runtimePodMatchLabels = client.MatchingLabels{ + scheduler.UniqueMatchLabelKey: types.ControllerConfig.GeneralAgentName, + } + } else { + runtimePodMatchLabels = client.MatchingLabels{ + s.runtimeUniqueMatchLabelKey: scheduler.UniqueMatchLabelValue(KindNameNetReach, instance.Name), + } } oldStatus := instance.Status.DeepCopy() @@ -107,8 +115,9 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil } // update tracker database - if newStatus.FinishTime != nil { - deletionTime := newStatus.FinishTime.DeepCopy() + var deletionTime *metav1.Time + if newStatus.FinishTime != nil && instance.Spec.AgentSpec != nil { + deletionTime = newStatus.FinishTime.DeepCopy() if instance.Spec.AgentSpec.TerminationGracePeriodMinutes != nil { newTime := metav1.NewTime(deletionTime.Add(time.Duration(*instance.Spec.AgentSpec.TerminationGracePeriodMinutes) * time.Minute)) deletionTime = newTime.DeepCopy() @@ -120,6 +129,12 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil logger.Error(err.Error()) return ctrl.Result{}, err } + } else if newStatus.FinishTime != nil && instance.Spec.AgentSpec == nil { + err := s.tracker.DB.Apply(scheduler.BuildItem(*instance.Status.Resource, KindNameNetReach, instance.Name, deletionTime)) + if nil != err { + logger.Error(err.Error()) + return ctrl.Result{}, err + } } } @@ -135,7 +150,7 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil if err := s.client.Get(ctx, req.NamespacedName, &instance); err != nil { s.logger.Sugar().Errorf("unable to fetch obj , error=%v", err) // since we have OwnerReference for task corresponding runtime and service, we could just delete the tracker DB record directly - if errors.IsNotFound(err) && instance.DeletionTimestamp != nil { + if errors.IsNotFound(err) && instance.DeletionTimestamp != nil && instance.Spec.AgentSpec != nil { s.tracker.DB.Delete(scheduler.BuildItem(*instance.Status.Resource, KindNameAppHttpHealthy, instance.Name, nil)) } @@ -171,8 +186,15 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil } // the task corresponding agent pods have this unique label - runtimePodMatchLabels := client.MatchingLabels{ - s.runtimeUniqueMatchLabelKey: scheduler.UniqueMatchLabelValue(KindNameAppHttpHealthy, instance.Name), + var runtimePodMatchLabels client.MatchingLabels + if instance.Spec.AgentSpec == nil { + runtimePodMatchLabels = client.MatchingLabels{ + scheduler.UniqueMatchLabelKey: types.ControllerConfig.GeneralAgentName, + } + } else { + runtimePodMatchLabels = client.MatchingLabels{ + s.runtimeUniqueMatchLabelKey: scheduler.UniqueMatchLabelValue(KindNameAppHttpHealthy, instance.Name), + } } oldStatus := instance.Status.DeepCopy() @@ -194,13 +216,21 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil } // update tracker database - if newStatus.FinishTime != nil { - deletionTime := newStatus.FinishTime.DeepCopy() + var deletionTime *metav1.Time + if newStatus.FinishTime != nil && instance.Spec.AgentSpec != nil { + deletionTime = newStatus.FinishTime.DeepCopy() if instance.Spec.AgentSpec.TerminationGracePeriodMinutes != nil { newTime := metav1.NewTime(deletionTime.Add(time.Duration(*instance.Spec.AgentSpec.TerminationGracePeriodMinutes) * time.Minute)) deletionTime = newTime.DeepCopy() } logger.Sugar().Debugf("task finish time '%s' and runtime deletion time '%s'", newStatus.FinishTime, deletionTime) + // record the task resource to the tracker DB, and the tracker will update the task subresource resource status asynchronously + err := s.tracker.DB.Apply(scheduler.BuildItem(*instance.Status.Resource, KindNameAppHttpHealthy, instance.Name, deletionTime)) + if nil != err { + logger.Error(err.Error()) + return ctrl.Result{}, err + } + } else if newStatus.FinishTime != nil && instance.Spec.AgentSpec == nil { err := s.tracker.DB.Apply(scheduler.BuildItem(*instance.Status.Resource, KindNameAppHttpHealthy, instance.Name, deletionTime)) if nil != err { logger.Error(err.Error()) @@ -221,7 +251,7 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil if err := s.client.Get(ctx, req.NamespacedName, &instance); err != nil { s.logger.Sugar().Errorf("unable to fetch obj , error=%v", err) // since we have OwnerReference for task corresponding runtime and service, we could just delete the tracker DB record directly - if errors.IsNotFound(err) && instance.DeletionTimestamp != nil { + if errors.IsNotFound(err) && instance.DeletionTimestamp != nil && instance.Spec.AgentSpec != nil { s.tracker.DB.Delete(scheduler.BuildItem(*instance.Status.Resource, KindNameNetdns, instance.Name, nil)) } return ctrl.Result{}, client.IgnoreNotFound(err) @@ -256,8 +286,15 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil } // the task corresponding agent pods have this unique label - runtimePodMatchLabels := client.MatchingLabels{ - s.runtimeUniqueMatchLabelKey: scheduler.TaskRuntimeName(KindNameNetdns, instance.Name), + var runtimePodMatchLabels client.MatchingLabels + if instance.Spec.AgentSpec == nil { + runtimePodMatchLabels = client.MatchingLabels{ + scheduler.UniqueMatchLabelKey: types.ControllerConfig.GeneralAgentName, + } + } else { + runtimePodMatchLabels = client.MatchingLabels{ + s.runtimeUniqueMatchLabelKey: scheduler.UniqueMatchLabelValue(KindNameNetdns, instance.Name), + } } oldStatus := instance.Status.DeepCopy() @@ -279,13 +316,21 @@ func (s *pluginControllerReconciler) Reconcile(ctx context.Context, req reconcil } // update tracker database - if newStatus.FinishTime != nil { - deletionTime := newStatus.FinishTime.DeepCopy() + var deletionTime *metav1.Time + if newStatus.FinishTime != nil && instance.Spec.AgentSpec != nil { + deletionTime = newStatus.FinishTime.DeepCopy() if instance.Spec.AgentSpec.TerminationGracePeriodMinutes != nil { newTime := metav1.NewTime(deletionTime.Add(time.Duration(*instance.Spec.AgentSpec.TerminationGracePeriodMinutes) * time.Minute)) deletionTime = newTime.DeepCopy() } logger.Sugar().Debugf("task finish time '%s' and runtime deletion time '%s'", newStatus.FinishTime, deletionTime) + // record the task resource to the tracker DB, and the tracker will update the task subresource resource status asynchronously + err := s.tracker.DB.Apply(scheduler.BuildItem(*instance.Status.Resource, KindNameNetdns, instance.Name, deletionTime)) + if nil != err { + logger.Error(err.Error()) + return ctrl.Result{}, err + } + } else if newStatus.FinishTime != nil && instance.Spec.AgentSpec == nil { err := s.tracker.DB.Apply(scheduler.BuildItem(*instance.Status.Resource, KindNameNetdns, instance.Name, deletionTime)) if nil != err { logger.Error(err.Error()) diff --git a/pkg/pluginManager/controllerTools.go b/pkg/pluginManager/controllerTools.go index f1ca8108..37044dcc 100644 --- a/pkg/pluginManager/controllerTools.go +++ b/pkg/pluginManager/controllerTools.go @@ -339,24 +339,35 @@ func (s *pluginControllerReconciler) UpdateStatus(logger *zap.Logger, ctx contex } -func (s *pluginControllerReconciler) TaskResourceReconcile(ctx context.Context, taskKind string, ownerTask metav1.Object, agentSpec crd.AgentSpec, taskStatus *crd.TaskStatus, logger *zap.Logger) (*crd.TaskStatus, error) { +func (s *pluginControllerReconciler) TaskResourceReconcile(ctx context.Context, taskKind string, ownerTask metav1.Object, agentSpec *crd.AgentSpec, taskStatus *crd.TaskStatus, logger *zap.Logger) (*crd.TaskStatus, error) { var resource crd.TaskResource var err error var deletionTime *metav1.Time if taskStatus.Resource == nil { - logger.Sugar().Debugf("task '%s/%s' just created, try to initial its corresponding runtime resource", taskKind, ownerTask.GetName()) - newScheduler := scheduler.NewScheduler(s.client, s.apiReader, taskKind, ownerTask.GetName(), s.runtimeUniqueMatchLabelKey, logger) - // create the task corresponding resources(runtime,service) and record them to the task CR object subresource with 'Creating' status - resource, err = newScheduler.CreateTaskRuntimeIfNotExist(ctx, ownerTask, agentSpec) - if nil != err { - return nil, err + if agentSpec == nil { + resource.RuntimeName = types.ControllerConfig.GeneralAgentName + resource.RuntimeType = types.ControllerConfig.GeneralAgentType + if types.ControllerConfig.GereralAgentServiceV4Name != "" { + resource.ServiceNameV4 = &types.ControllerConfig.GereralAgentServiceV4Name + } + if types.ControllerConfig.GereralAgentServiceV6Name != "" { + resource.ServiceNameV6 = &types.ControllerConfig.GereralAgentServiceV6Name + } + resource.RuntimeStatus = crd.RuntimeCreated + } else { + logger.Sugar().Debugf("task '%s/%s' just created, try to initial its corresponding runtime resource", taskKind, ownerTask.GetName()) + newScheduler := scheduler.NewScheduler(s.client, s.apiReader, taskKind, ownerTask.GetName(), s.runtimeUniqueMatchLabelKey, logger) + // create the task corresponding resources(runtime,service) and record them to the task CR object subresource with 'Creating' status + resource, err = newScheduler.CreateTaskRuntimeIfNotExist(ctx, ownerTask, *agentSpec) + if nil != err { + return nil, err + } } - taskStatus.Resource = &resource } else { // we need to track it again, in order to avoid controller restart resource = *taskStatus.Resource - if taskStatus.FinishTime != nil { + if taskStatus.FinishTime != nil && agentSpec != nil { deletionTime = taskStatus.FinishTime.DeepCopy() if agentSpec.TerminationGracePeriodMinutes != nil { newTime := metav1.NewTime(deletionTime.Add(time.Duration(*agentSpec.TerminationGracePeriodMinutes) * time.Minute)) @@ -367,6 +378,8 @@ func (s *pluginControllerReconciler) TaskResourceReconcile(ctx context.Context, } } + taskStatus.Resource = &resource + // record the task resource to the tracker DB, and the tracker will update the task subresource resource status or delete corresponding runtime asynchronously err = s.tracker.DB.Apply(scheduler.BuildItem(resource, taskKind, ownerTask.GetName(), deletionTime)) if nil != err { diff --git a/pkg/pluginManager/netdns/webhook.go b/pkg/pluginManager/netdns/webhook.go index 467194a5..80a65fa9 100644 --- a/pkg/pluginManager/netdns/webhook.go +++ b/pkg/pluginManager/netdns/webhook.go @@ -27,10 +27,13 @@ func (s *PluginNetDns) WebhookMutating(logger *zap.Logger, ctx context.Context, } logger.Sugar().Infof("obj: %+v", r) + // agentSpec // agentSpec if true { - if r.Spec.AgentSpec.TerminationGracePeriodMinutes == nil { - r.Spec.AgentSpec.TerminationGracePeriodMinutes = &types.ControllerConfig.Configmap.AgentDefaultTerminationGracePeriodMinutes + if r.Spec.AgentSpec != nil { + if r.Spec.AgentSpec.TerminationGracePeriodMinutes == nil { + r.Spec.AgentSpec.TerminationGracePeriodMinutes = &types.ControllerConfig.Configmap.AgentDefaultTerminationGracePeriodMinutes + } } } // TODO: mutating default value @@ -106,11 +109,12 @@ func (s *PluginNetDns) WebhookValidateCreate(logger *zap.Logger, ctx context.Con // validate AgentSpec if true { - if !slices.Contains(types.TaskRuntimes, r.Spec.AgentSpec.Kind) { - return apierrors.NewBadRequest(fmt.Sprintf("Invalid agent runtime kind %s", r.Spec.AgentSpec.Kind)) + if r.Spec.AgentSpec != nil { + if !slices.Contains(types.TaskRuntimes, r.Spec.AgentSpec.Kind) { + return apierrors.NewBadRequest(fmt.Sprintf("Invalid agent runtime kind %s", r.Spec.AgentSpec.Kind)) + } } } - return nil } diff --git a/pkg/pluginManager/netreach/agentExecuteTask.go b/pkg/pluginManager/netreach/agentExecuteTask.go index 014bcac3..8d87b67b 100644 --- a/pkg/pluginManager/netreach/agentExecuteTask.go +++ b/pkg/pluginManager/netreach/agentExecuteTask.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "github.com/kdoctor-io/kdoctor/pkg/resource" + networkingv1 "k8s.io/api/networking/v1" "sync" "go.uber.org/zap" @@ -249,10 +250,19 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex if *target.Ingress { if runtimeResource.ServiceNameV4 != nil { - agentIngress, e := k8sObjManager.GetK8sObjManager().GetIngress(ctx, *runtimeResource.ServiceNameV4, config.AgentConfig.PodNamespace) - if e != nil { - logger.Sugar().Errorf("failed to get v4 ingress , error=%v", e) + var agentIngress *networkingv1.Ingress + if runtimetype.AgentConfig.GeneralAgent { + agentIngress, e = k8sObjManager.GetK8sObjManager().GetIngress(ctx, runtimetype.AgentConfig.Configmap.AgentIngressName, config.AgentConfig.PodNamespace) + if e != nil { + logger.Sugar().Errorf("failed to get v4 ingress , error=%v", e) + } + } else { + agentIngress, e = k8sObjManager.GetK8sObjManager().GetIngress(ctx, *runtimeResource.ServiceNameV4, config.AgentConfig.PodNamespace) + if e != nil { + logger.Sugar().Errorf("failed to get v4 ingress , error=%v", e) + } } + if agentIngress != nil && len(agentIngress.Status.LoadBalancer.Ingress) > 0 { http := "http" if len(agentIngress.Spec.TLS) > 0 { diff --git a/pkg/pluginManager/netreach/webhook.go b/pkg/pluginManager/netreach/webhook.go index 071d2193..676e1fed 100644 --- a/pkg/pluginManager/netreach/webhook.go +++ b/pkg/pluginManager/netreach/webhook.go @@ -109,8 +109,10 @@ func (s *PluginNetReach) WebhookMutating(logger *zap.Logger, ctx context.Context // agentSpec if true { - if req.Spec.AgentSpec.TerminationGracePeriodMinutes == nil { - req.Spec.AgentSpec.TerminationGracePeriodMinutes = &types.ControllerConfig.Configmap.AgentDefaultTerminationGracePeriodMinutes + if req.Spec.AgentSpec != nil { + if req.Spec.AgentSpec.TerminationGracePeriodMinutes == nil { + req.Spec.AgentSpec.TerminationGracePeriodMinutes = &types.ControllerConfig.Configmap.AgentDefaultTerminationGracePeriodMinutes + } } } return nil @@ -191,8 +193,10 @@ func (s *PluginNetReach) WebhookValidateCreate(logger *zap.Logger, ctx context.C // validate AgentSpec if true { - if !slices.Contains(types.TaskRuntimes, r.Spec.AgentSpec.Kind) { - return apierrors.NewBadRequest(fmt.Sprintf("Invalid agent runtime kind %s", r.Spec.AgentSpec.Kind)) + if r.Spec.AgentSpec != nil { + if !slices.Contains(types.TaskRuntimes, r.Spec.AgentSpec.Kind) { + return apierrors.NewBadRequest(fmt.Sprintf("Invalid agent runtime kind %s", r.Spec.AgentSpec.Kind)) + } } } diff --git a/pkg/reportManager/worker.go b/pkg/reportManager/worker.go index f49c918a..bdfb01f0 100644 --- a/pkg/reportManager/worker.go +++ b/pkg/reportManager/worker.go @@ -8,7 +8,7 @@ import ( "fmt" "github.com/kdoctor-io/kdoctor/pkg/grpcManager" k8sObjManager "github.com/kdoctor-io/kdoctor/pkg/k8ObjManager" - crd "github.com/kdoctor-io/kdoctor/pkg/k8s/apis/kdoctor.io/v1beta1" + "github.com/kdoctor-io/kdoctor/pkg/scheduler" "github.com/kdoctor-io/kdoctor/pkg/types" "github.com/kdoctor-io/kdoctor/pkg/utils" "go.uber.org/zap" @@ -66,11 +66,6 @@ func (s *reportManager) syncReportFromOneAgent(ctx context.Context, logger *zap. // -- v := strings.Split(remoteFileName, "_") timeSuffix := v[len(v)-1] - taskName := v[1] - if !strings.Contains(podName, taskName) { - logger.Sugar().Debugf("task %s not task of pod %s ,skip sync report %s", taskName, podName, remoteFileName) - continue - } remoteFilePre := strings.TrimSuffix(remoteFileName, "_"+timeSuffix) // file name format: fmt.Sprintf("%s_%s_round%d_%s_%s", kindName, taskName, roundNumber, nodeName, suffix) t := time.Duration(types.ControllerConfig.ReportAgeInDay*24) * time.Hour @@ -88,7 +83,9 @@ func (s *reportManager) syncReportFromOneAgent(ctx context.Context, logger *zap. } func (s *reportManager) runControllerAggregateReportOnce(ctx context.Context, logger *zap.Logger, taskName string) error { - + var task scheduler.Item + var err error + var podIP k8sObjManager.PodIps // grpc client grpcClient := grpcManager.NewGrpcClient(s.logger.Named("grpc"), true) @@ -101,60 +98,57 @@ func (s *reportManager) runControllerAggregateReportOnce(ctx context.Context, lo logger.Sugar().Debugf("before sync, local report files: %v", localFileList) // get all runtime obj for _, v := range s.runtimeDB { + task, err = v.Get(taskName) + if err != nil { + logger.Sugar().Debugf(err.Error()) + continue + } else { + break + } + } + if err != nil { + return err + } - for _, m := range v.List() { - // only collect created runtime report - if m.RuntimeStatus != crd.RuntimeCreated { - logger.Sugar().Debugf("task %s runtime %s status %s not created finish", m.TaskName, m.RuntimeName, m.RuntimeStatus) - continue - } - if m.TaskName != taskName { - logger.Sugar().Debugf("this agent %s is not ccurrent sync task %s ,skip ", m.RuntimeName, taskName) - continue - } - var podIP k8sObjManager.PodIps - var err error - if m.RuntimeKind == types.KindDaemonSet { - podIP, err = k8sObjManager.GetK8sObjManager().ListDaemonsetPodIPs(context.Background(), m.RuntimeName, types.ControllerConfig.PodNamespace) - } - if m.RuntimeKind == types.KindDeployment { - podIP, err = k8sObjManager.GetK8sObjManager().ListDeploymentPodIPs(context.Background(), m.RuntimeName, types.ControllerConfig.PodNamespace) - } - logger.Sugar().Debugf("podIP : %v", podIP) - if err != nil { - m := fmt.Sprintf("failed to get kind %s name %s agent ip, error=%v", m.RuntimeKind, m.RuntimeName, err) - logger.Error(m) - // retry - return fmt.Errorf(m) - } + if task.RuntimeKind == types.KindDaemonSet { + podIP, err = k8sObjManager.GetK8sObjManager().ListDaemonsetPodIPs(context.Background(), task.RuntimeName, types.ControllerConfig.PodNamespace) + } + if task.RuntimeKind == types.KindDeployment { + podIP, err = k8sObjManager.GetK8sObjManager().ListDeploymentPodIPs(context.Background(), task.RuntimeName, types.ControllerConfig.PodNamespace) + } + if err != nil { + m := fmt.Sprintf("failed to get kind %s name %s agent ip, error=%v", task.RuntimeKind, task.RuntimeName, err) + logger.Error(m) + // retry + return fmt.Errorf(m) + } + logger.Sugar().Debugf("podIP : %v", podIP) - for podName, podIpInfo := range podIP { - // get pod ip - if len(podIpInfo) == 0 { - logger.Sugar().Errorf("failed to get agent %s ip ", podName) - continue - } - var podip string - if types.ControllerConfig.Configmap.EnableIPv4 { - podip = podIpInfo[0].IPv4 - } else { - podip = podIpInfo[0].IPv6 - } - if len(podip) == 0 { - logger.Sugar().Errorf("failed to get agent %s ip ", podName) - continue - } - - ip := net.ParseIP(podip) - var address string - if ip.To4() == nil { - address = fmt.Sprintf("[%s]:%d", podip, types.ControllerConfig.AgentGrpcListenPort) - } else { - address = fmt.Sprintf("%s:%d", podip, types.ControllerConfig.AgentGrpcListenPort) - } - s.syncReportFromOneAgent(ctx, logger, grpcClient, localFileList, podName, address) - } + for podName, podIpInfo := range podIP { + // get pod ip + if len(podIpInfo) == 0 { + logger.Sugar().Errorf("failed to get agent %s ip ", podName) + continue + } + var podip string + if types.ControllerConfig.Configmap.EnableIPv4 { + podip = podIpInfo[0].IPv4 + } else { + podip = podIpInfo[0].IPv6 + } + if len(podip) == 0 { + logger.Sugar().Errorf("failed to get agent %s ip ", podName) + continue + } + + ip := net.ParseIP(podip) + var address string + if ip.To4() == nil { + address = fmt.Sprintf("[%s]:%d", podip, types.ControllerConfig.AgentGrpcListenPort) + } else { + address = fmt.Sprintf("%s:%d", podip, types.ControllerConfig.AgentGrpcListenPort) } + s.syncReportFromOneAgent(ctx, logger, grpcClient, localFileList, podName, address) } return nil diff --git a/pkg/scheduler/cachedb.go b/pkg/scheduler/cachedb.go index 03b2bcf6..00b0554e 100644 --- a/pkg/scheduler/cachedb.go +++ b/pkg/scheduler/cachedb.go @@ -20,6 +20,7 @@ type DB interface { Apply(item Item) error List() []Item Delete(item Item) + Get(taskName string) (Item, error) } func NewDB(maxCap int, log *zap.Logger) DB { @@ -46,6 +47,14 @@ type RuntimeKey struct { RuntimeName string } +type task map[string]string + +func (t task) Join(new task) { + for k, v := range new { + t[k] = v + } +} + type Item struct { RuntimeKey @@ -54,8 +63,7 @@ type Item struct { ServiceNameV4 *string ServiceNameV6 *string - TaskKind string - TaskName string + Task task } func BuildItem(resource crd.TaskResource, taskKind, taskName string, deletionTime *metav1.Time) Item { @@ -68,8 +76,7 @@ func BuildItem(resource crd.TaskResource, taskKind, taskName string, deletionTim RuntimeDeletionTime: deletionTime, ServiceNameV4: resource.ServiceNameV4, ServiceNameV6: resource.ServiceNameV6, - TaskKind: taskKind, - TaskName: taskName, + Task: task{taskName: taskKind}, } return item @@ -91,6 +98,7 @@ func (d *Database) Apply(item Item) error { return nil } else { if !reflect.DeepEqual(old, item) { + item.Task.Join(old.Task) d.cache[item.RuntimeKey] = item d.Unlock() d.log.Sugar().Debugf("item %v has changed, the old one is %v, and the new one is %v", @@ -128,3 +136,18 @@ func (d *Database) Delete(item Item) { d.Unlock() d.log.Sugar().Debugf("delete item %v successfully", item.RuntimeKey) } + +func (d *Database) Get(taskName string) (Item, error) { + d.Lock() + defer d.Unlock() + var tmp Item + for _, v := range d.cache { + _, ok := v.Task[taskName] + if ok { + tmp = v + d.log.Sugar().Debugf("successfully get task %s item %v ", taskName, tmp) + return tmp, nil + } + } + return tmp, fmt.Errorf("failed get task %s,the task not exists", taskName) +} diff --git a/pkg/scheduler/tracing.go b/pkg/scheduler/tracing.go index 8e2935cc..ac28459a 100644 --- a/pkg/scheduler/tracing.go +++ b/pkg/scheduler/tracing.go @@ -84,7 +84,7 @@ func (t *Tracker) trace(ctx context.Context) { func (t *Tracker) signaling(item Item) { select { case t.itemSignal <- item: - t.log.Sugar().Debugf("sending signal to handle Task %s/%s", item.TaskKind, item.TaskName) + t.log.Sugar().Debugf("sending signal to handle Task %s", item.Task) case <-time.After(t.SignalTimeOutDuration): t.log.Sugar().Warnf("failed to send signal, itemSignal length %d, item %v will be dropped", len(t.itemSignal), item) @@ -116,7 +116,7 @@ func (t *Tracker) executor(ctx context.Context, workerIndex int) { } // 2. update status - innerLog.Sugar().Infof("try to update task %s/%s resource status from %s to %s", item.TaskKind, item.TaskName, item.RuntimeStatus, crd.RuntimeDeleted) + innerLog.Sugar().Infof("try to update task %s resource status from %s to %s", item.Task, item.RuntimeStatus, crd.RuntimeDeleted) err = t.updateRuntimeStatus(ctx, item, crd.RuntimeDeleted) if client.IgnoreNotFound(err) != nil { innerLog.Error(err.Error()) @@ -133,7 +133,7 @@ func (t *Tracker) executor(ctx context.Context, workerIndex int) { // update created if item.RuntimeStatus == crd.RuntimeCreating && runtime.IsReady(ctx) { - innerLog.Sugar().Infof("try to update task %s/%s resource status from %s to %s", item.TaskKind, item.TaskName, item.RuntimeStatus, crd.RuntimeCreated) + innerLog.Sugar().Infof("try to update task %s resource status from %s to %s", item.Task, item.RuntimeStatus, crd.RuntimeCreated) err := t.updateRuntimeStatus(ctx, item, crd.RuntimeCreated) if nil != err { innerLog.Error(err.Error()) @@ -161,69 +161,71 @@ func (t *Tracker) updateRuntimeStatus(ctx context.Context, item Item, status str RuntimeStatus: status, } - switch item.TaskKind { - case types.KindNameNetReach: - instance := crd.NetReach{} - err := t.apiReader.Get(ctx, k8types.NamespacedName{Name: item.TaskName}, &instance) - if nil != err { - return err - } + for taskName, taskKind := range item.Task { + switch taskKind { + case types.KindNameNetReach: + instance := crd.NetReach{} + err := t.apiReader.Get(ctx, k8types.NamespacedName{Name: taskName}, &instance) + if nil != err { + return err + } - // check the resource whether is already equal - if reflect.DeepEqual(instance.Status.Resource, resource) { - t.log.Sugar().Debugf("task %v resource already updatede, skip it", item.RuntimeKey) - return nil - } + // check the resource whether is already equal + if reflect.DeepEqual(instance.Status.Resource, resource) { + t.log.Sugar().Debugf("task %v resource already updatede, skip it", item.RuntimeKey) + return nil + } - t.log.Sugar().Debugf("task %v old resource is %v, the new resource is %v", item.RuntimeKey, *instance.Status.Resource, *resource) - instance.Status.Resource = resource - err = t.client.Status().Update(ctx, &instance) - if nil != err { - return err - } + t.log.Sugar().Debugf("task %v old resource is %v, the new resource is %v", item.RuntimeKey, *instance.Status.Resource, *resource) + instance.Status.Resource = resource + err = t.client.Status().Update(ctx, &instance) + if nil != err { + return err + } - case types.KindNameAppHttpHealthy: - instance := crd.AppHttpHealthy{} - err := t.apiReader.Get(ctx, k8types.NamespacedName{Name: item.TaskName}, &instance) - if nil != err { - return err - } + case types.KindNameAppHttpHealthy: + instance := crd.AppHttpHealthy{} + err := t.apiReader.Get(ctx, k8types.NamespacedName{Name: taskName}, &instance) + if nil != err { + return err + } - // check the resource whether is already equal - if reflect.DeepEqual(instance.Status.Resource, resource) { - t.log.Sugar().Debugf("task %v resource already updatede, skip it", item.RuntimeKey) - return nil - } + // check the resource whether is already equal + if reflect.DeepEqual(instance.Status.Resource, resource) { + t.log.Sugar().Debugf("task %v resource already updatede, skip it", item.RuntimeKey) + return nil + } - t.log.Sugar().Debugf("task %v old resource is %v, the new resource is %v", item.RuntimeKey, *instance.Status.Resource, *resource) - instance.Status.Resource = resource - err = t.client.Status().Update(ctx, &instance) - if nil != err { - return err - } + t.log.Sugar().Debugf("task %v old resource is %v, the new resource is %v", item.RuntimeKey, *instance.Status.Resource, *resource) + instance.Status.Resource = resource + err = t.client.Status().Update(ctx, &instance) + if nil != err { + return err + } - case types.KindNameNetdns: - instance := crd.Netdns{} - err := t.apiReader.Get(ctx, k8types.NamespacedName{Name: item.TaskName}, &instance) - if nil != err { - return err - } + case types.KindNameNetdns: + instance := crd.Netdns{} + err := t.apiReader.Get(ctx, k8types.NamespacedName{Name: taskName}, &instance) + if nil != err { + return err + } - // check the resource whether is already equal - if reflect.DeepEqual(instance.Status.Resource, resource) { - t.log.Sugar().Debugf("task %v resource already updatede, skip it", item.RuntimeKey) - return nil - } + // check the resource whether is already equal + if reflect.DeepEqual(instance.Status.Resource, resource) { + t.log.Sugar().Debugf("task %v resource already updatede, skip it", item.RuntimeKey) + return nil + } - t.log.Sugar().Debugf("task %v old resource is %v, the new resource is %v", item.RuntimeKey, *instance.Status.Resource, *resource) - instance.Status.Resource = resource - err = t.client.Status().Update(ctx, &instance) - if nil != err { - return err - } + t.log.Sugar().Debugf("task %v old resource is %v, the new resource is %v", item.RuntimeKey, *instance.Status.Resource, *resource) + instance.Status.Resource = resource + err = t.client.Status().Update(ctx, &instance) + if nil != err { + return err + } - default: - return fmt.Errorf("unsupported task '%s/%s'", item.TaskKind, item.TaskName) + default: + return fmt.Errorf("unsupported task '%s/%s'", taskKind, taskName) + } } return nil diff --git a/pkg/types/agent_config.go b/pkg/types/agent_config.go index fe7d24fe..7ce2534e 100644 --- a/pkg/types/agent_config.go +++ b/pkg/types/agent_config.go @@ -67,6 +67,7 @@ type AgentConfigStruct struct { TaskName string ServiceV4Name string ServiceV6Name string + GeneralAgent bool // from configmap Configmap ConfigmapConfig diff --git a/pkg/types/controller_config.go b/pkg/types/controller_config.go index e890eebe..484de169 100644 --- a/pkg/types/controller_config.go +++ b/pkg/types/controller_config.go @@ -20,6 +20,10 @@ var ControllerEnvMapping = []EnvMapping{ {"ENV_POD_NAMESPACE", "", &ControllerConfig.PodNamespace}, {"ENV_GOLANG_MAXPROCS", "8", &ControllerConfig.GolangMaxProcs}, {"ENV_AGENT_GRPC_LISTEN_PORT", "3000", &ControllerConfig.AgentGrpcListenPort}, + {"ENV_GENERAL_AGENT_NAME", "kdoctor-agent", &ControllerConfig.GeneralAgentName}, + {"ENV_GENERAL_AGENT_TYPE", "Daemonset", &ControllerConfig.GeneralAgentType}, + {"ENV_GENERAL_AGENT_SERVICE_V4_NAME", "", &ControllerConfig.GereralAgentServiceV4Name}, + {"ENV_GENERAL_AGENT_SERVICE_V6_NAME", "", &ControllerConfig.GereralAgentServiceV6Name}, {"ENV_ENABLE_AGGREGATE_AGENT_REPORT", "false", &ControllerConfig.EnableAggregateAgentReport}, {"ENV_CONTROLLER_REPORT_STORAGE_PATH", "/report", &ControllerConfig.DirPathControllerReport}, {"ENV_AGENT_REPORT_STORAGE_PATH", "", &ControllerConfig.DirPathAgentReport}, @@ -44,8 +48,12 @@ type ControllerConfigStruct struct { PyroscopeServerAddress string GolangMaxProcs int32 - PodName string - PodNamespace string + PodName string + PodNamespace string + GeneralAgentName string + GeneralAgentType string + GereralAgentServiceV4Name string + GereralAgentServiceV6Name string EnableAggregateAgentReport bool CleanAgedReportInMinute int32 diff --git a/test/docs/Runtime.md b/test/docs/Runtime.md index 8b14037c..9c61d5fe 100644 --- a/test/docs/Runtime.md +++ b/test/docs/Runtime.md @@ -1,16 +1,22 @@ # E2E Cases for Task Runtime -| Case ID | Title | Priority | Smoke | Status | Other | -|---------|------------------------------------------------------------------------------------------------|----------|-------|--------|-------------| -| E00001 | Successfully testing Task NetReach Runtime DaemonSet Service Ingress creation | p1 | | done | | -| E00002 | Successfully testing Task NetAppHttpHealthy Runtime DaemonSet Service creation | p1 | | done | | -| E00003 | Successfully testing Task NetDns Runtime DaemonSet Service creation | p1 | | done | | -| E00004 | Successfully testing Task NetReach Runtime Deployment Service Ingress creation | p1 | | done | | -| E00005 | Successfully testing Task NetAppHttpHealthy Runtime Deployment Service creation | p1 | | done | | -| E00006 | Successfully testing Task NetDns Runtime Deployment Service creation | p1 | | done | | -| E00007 | Successfully testing cascading deletion with Task NetReach DaemonSet Service and Ingress | p1 | | done | | -| E00008 | Successfully testing cascading deletion with Task NetAppHttpHealthy DaemonSet Service | p1 | | done | | -| E00009 | Successfully testing cascading deletion with Task NetDns DaemonSet Service | p1 | | done | | -| E00010 | Successfully testing cascading deletion with Task NetReach Deployment Service and Ingress | p1 | | done | | -| E00011 | Successfully testing cascading deletion with Task NetAppHttpHealthy Deployment Service | p1 | | done | | -| E00012 | Successfully testing cascading deletion with Task NetDns Deployment Service | p1 | | done | | \ No newline at end of file +| Case ID | Title | Priority | Smoke | Status | Other | +|---------|-------------------------------------------------------------------------------------------|----------|-------|--------|-------------| +| E00001 | Successfully testing Task NetReach Runtime DaemonSet Service Ingress creation | p1 | | done | | +| E00002 | Successfully testing Task NetAppHttpHealthy Runtime DaemonSet Service creation | p1 | | done | | +| E00003 | Successfully testing Task NetDns Runtime DaemonSet Service creation | p1 | | done | | +| E00004 | Successfully testing Task NetReach Runtime Deployment Service Ingress creation | p1 | | done | | +| E00005 | Successfully testing Task NetAppHttpHealthy Runtime Deployment Service creation | p1 | | done | | +| E00006 | Successfully testing Task NetDns Runtime Deployment Service creation | p1 | | done | | +| E00007 | Successfully testing cascading deletion with Task NetReach DaemonSet Service and Ingress | p1 | | done | | +| E00008 | Successfully testing cascading deletion with Task NetAppHttpHealthy DaemonSet Service | p1 | | done | | +| E00009 | Successfully testing cascading deletion with Task NetDns DaemonSet Service | p1 | | done | | +| E00010 | Successfully testing cascading deletion with Task NetReach Deployment Service and Ingress | p1 | | done | | +| E00011 | Successfully testing cascading deletion with Task NetAppHttpHealthy Deployment Service | p1 | | done | | +| E00012 | Successfully testing cascading deletion with Task NetDns Deployment Service | p1 | | done | | +| E00013 | Successfully testing using default daemonSet as workload with Task NetReach | p1 | | done | | +| E00014 | Successfully testing using default daemonSet as workload with Task AppHttpHealthy | p1 | | done | | +| E00015 | Successfully testing using default daemonSet as workload with Task NetDns | p1 | | done | | +| E00016 | Successfully testing using default daemonSet as workload with more Task NetReach | p1 | | done | | +| E00017 | Successfully testing using default daemonSet as workload with more Task AppHttpHealthy | p1 | | done | | +| E00018 | Successfully testing using default daemonSet as workload with more Task NetDns | p1 | | done | | \ No newline at end of file diff --git a/test/e2e/apphttphealth/apphttphealth_test.go b/test/e2e/apphttphealth/apphttphealth_test.go index eb2b85b6..8b63a970 100644 --- a/test/e2e/apphttphealth/apphttphealth_test.go +++ b/test/e2e/apphttphealth/apphttphealth_test.go @@ -30,7 +30,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agent agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -93,7 +93,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -156,7 +156,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -217,7 +217,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -281,7 +281,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -346,7 +346,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -409,7 +409,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -474,7 +474,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -536,7 +536,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -598,7 +598,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -660,7 +660,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -722,7 +722,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -787,7 +787,7 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -835,4 +835,114 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { e = common.CheckRuntimeDeadLine(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, 120) Expect(e).NotTo(HaveOccurred(), "check task runtime resource delete") }) + + It("Successfully testing using default daemonSet as workload with Task AppHttpHealthy ", Label("E00014"), func() { + var e error + successRate := float64(1) + successMean := int64(1500) + crontab := "0 1" + appHttpHealthName := "apphttphealth-get" + tools.RandomName() + + appHttpHealth := new(v1beta1.AppHttpHealthy) + appHttpHealth.Name = appHttpHealthName + + // successCondition + successCondition := new(v1beta1.NetSuccessCondition) + successCondition.SuccessRate = &successRate + successCondition.MeanAccessDelayInMs = &successMean + appHttpHealth.Spec.SuccessCondition = successCondition + + // target + target := new(v1beta1.AppHttpHealthyTarget) + target.Method = "GET" + if net.ParseIP(testSvcIP).To4() == nil { + target.Host = fmt.Sprintf("http://[%s]:%d/?task=%s", testSvcIP, httpPort, appHttpHealthName) + } else { + target.Host = fmt.Sprintf("http://%s:%d?task=%s", testSvcIP, httpPort, appHttpHealthName) + } + appHttpHealth.Spec.Target = target + + // request + request := new(v1beta1.NetHttpRequest) + request.PerRequestTimeoutInMS = 2000 + request.QPS = 10 + request.DurationInSecond = 10 + appHttpHealth.Spec.Request = request + + // Schedule + Schedule := new(v1beta1.SchedulePlan) + Schedule.Schedule = &crontab + Schedule.RoundNumber = 1 + Schedule.RoundTimeoutMinute = 1 + appHttpHealth.Spec.Schedule = Schedule + + e = frame.CreateResource(appHttpHealth) + Expect(e).NotTo(HaveOccurred(), "create appHttpHealth resource") + + e = common.CheckRuntime(frame, appHttpHealth, pluginManager.KindNameAppHttpHealthy, 60) + Expect(e).NotTo(HaveOccurred(), "check task runtime spec") + + e = common.WaitKdoctorTaskDone(frame, appHttpHealth, pluginManager.KindNameAppHttpHealthy, 120) + Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") + + success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum, appHttpHealth) + Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") + + }) + + It("Successfully testing using default daemonSet as workload with Task AppHttpHealthy ", Label("E00017"), func() { + var e error + successRate := float64(1) + successMean := int64(1500) + crontab := "0 1" + appHttpHealthName := "apphttphealth-get" + tools.RandomName() + + appHttpHealth := new(v1beta1.AppHttpHealthy) + appHttpHealth.Name = appHttpHealthName + + // successCondition + successCondition := new(v1beta1.NetSuccessCondition) + successCondition.SuccessRate = &successRate + successCondition.MeanAccessDelayInMs = &successMean + appHttpHealth.Spec.SuccessCondition = successCondition + + // target + target := new(v1beta1.AppHttpHealthyTarget) + target.Method = "GET" + if net.ParseIP(testSvcIP).To4() == nil { + target.Host = fmt.Sprintf("http://[%s]:%d/?task=%s", testSvcIP, httpPort, appHttpHealthName) + } else { + target.Host = fmt.Sprintf("http://%s:%d?task=%s", testSvcIP, httpPort, appHttpHealthName) + } + appHttpHealth.Spec.Target = target + + // request + request := new(v1beta1.NetHttpRequest) + request.PerRequestTimeoutInMS = 2000 + request.QPS = 10 + request.DurationInSecond = 10 + appHttpHealth.Spec.Request = request + + // Schedule + Schedule := new(v1beta1.SchedulePlan) + Schedule.Schedule = &crontab + Schedule.RoundNumber = 1 + Schedule.RoundTimeoutMinute = 1 + appHttpHealth.Spec.Schedule = Schedule + + e = frame.CreateResource(appHttpHealth) + Expect(e).NotTo(HaveOccurred(), "create appHttpHealth resource") + + e = common.CheckRuntime(frame, appHttpHealth, pluginManager.KindNameAppHttpHealthy, 60) + Expect(e).NotTo(HaveOccurred(), "check task runtime spec") + + e = common.WaitKdoctorTaskDone(frame, appHttpHealth, pluginManager.KindNameAppHttpHealthy, 120) + Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") + + success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum, appHttpHealth) + Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") + + }) }) diff --git a/test/e2e/common/tools.go b/test/e2e/common/tools.go index 6bc6c8ce..53b6d260 100644 --- a/test/e2e/common/tools.go +++ b/test/e2e/common/tools.go @@ -596,7 +596,11 @@ func CheckRuntime(f *frame.Framework, task client.Object, taskKind string, timeo } // checkAgentSpec check agentSpec generate deployment or daemonSet is right -func checkAgentSpec(f *frame.Framework, task client.Object, agentSpec v1beta1.AgentSpec, taskStatus v1beta1.TaskStatus, taskKind string) error { +func checkAgentSpec(f *frame.Framework, task client.Object, agentSpec *v1beta1.AgentSpec, taskStatus v1beta1.TaskStatus, taskKind string) error { + + if agentSpec == nil { + return nil + } switch agentSpec.Kind { case kdoctor_types.KindDaemonSet: diff --git a/test/e2e/netdns/netdns_test.go b/test/e2e/netdns/netdns_test.go index d45b7a4b..40121c6d 100644 --- a/test/e2e/netdns/netdns_test.go +++ b/test/e2e/netdns/netdns_test.go @@ -30,7 +30,7 @@ var _ = Describe("testing netDns ", Label("netDns"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - netDns.Spec.AgentSpec = *agentSpec + netDns.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -99,7 +99,7 @@ var _ = Describe("testing netDns ", Label("netDns"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - netDns.Spec.AgentSpec = *agentSpec + netDns.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -152,4 +152,126 @@ var _ = Describe("testing netDns ", Label("netDns"), func() { e = common.CheckRuntimeDeadLine(frame, netDnsName, pluginManager.KindNameNetdns, 120) Expect(e).NotTo(HaveOccurred(), "check task runtime resource delete") }) + + It("Successfully testing using default daemonSet as workload with Task NetDns ", Label("E00015"), func() { + var e error + successRate := float64(1) + successMean := int64(1500) + crontab := "0 1" + netDnsName := "netdns-e2e-" + tools.RandomName() + // netdns + netDns := new(v1beta1.Netdns) + netDns.Name = netDnsName + + // successCondition + successCondition := new(v1beta1.NetSuccessCondition) + successCondition.SuccessRate = &successRate + successCondition.MeanAccessDelayInMs = &successMean + netDns.Spec.SuccessCondition = successCondition + + // target + target := new(v1beta1.NetDnsTarget) + targetDns := new(v1beta1.NetDnsTargetDnsSpec) + targetDns.TestIPv4 = &common.TestIPv4 + targetDns.TestIPv6 = &common.TestIPv6 + targetDns.ServiceName = &KubeDnsName + targetDns.ServiceNamespace = &KubeDnsNamespace + target.NetDnsTargetDns = targetDns + netDns.Spec.Target = target + + // request + request := new(v1beta1.NetdnsRequest) + var perRequestTimeoutInMS = uint64(1000) + var qps = uint64(10) + var durationInSecond = uint64(10) + request.PerRequestTimeoutInMS = &perRequestTimeoutInMS + request.QPS = &qps + request.DurationInSecond = &durationInSecond + request.Domain = fmt.Sprintf(targetDomain, netDnsName) + protocol := "udp" + request.Protocol = &protocol + netDns.Spec.Request = request + + // Schedule + Schedule := new(v1beta1.SchedulePlan) + Schedule.Schedule = &crontab + Schedule.RoundNumber = 1 + Schedule.RoundTimeoutMinute = 1 + netDns.Spec.Schedule = Schedule + + e = frame.CreateResource(netDns) + Expect(e).NotTo(HaveOccurred(), "create netDns resource") + + e = common.CheckRuntime(frame, netDns, pluginManager.KindNameNetdns, 60) + Expect(e).NotTo(HaveOccurred(), "check task runtime spec") + + e = common.WaitKdoctorTaskDone(frame, netDns, pluginManager.KindNameNetdns, 120) + Expect(e).NotTo(HaveOccurred(), "wait netDns task finish") + + success, e := common.CompareResult(frame, netDnsName, pluginManager.KindNameNetdns, []string{}, reportNum, netDns) + Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).NotTo(BeFalse(), "compare report and task result") + + }) + + It("Successfully testing using default daemonSet as workload with more Task NetDns ", Label("E00018"), func() { + var e error + successRate := float64(1) + successMean := int64(1500) + crontab := "0 1" + netDnsName := "netdns-e2e-" + tools.RandomName() + // netdns + netDns := new(v1beta1.Netdns) + netDns.Name = netDnsName + + // successCondition + successCondition := new(v1beta1.NetSuccessCondition) + successCondition.SuccessRate = &successRate + successCondition.MeanAccessDelayInMs = &successMean + netDns.Spec.SuccessCondition = successCondition + + // target + target := new(v1beta1.NetDnsTarget) + targetDns := new(v1beta1.NetDnsTargetDnsSpec) + targetDns.TestIPv4 = &common.TestIPv4 + targetDns.TestIPv6 = &common.TestIPv6 + targetDns.ServiceName = &KubeDnsName + targetDns.ServiceNamespace = &KubeDnsNamespace + target.NetDnsTargetDns = targetDns + netDns.Spec.Target = target + + // request + request := new(v1beta1.NetdnsRequest) + var perRequestTimeoutInMS = uint64(1000) + var qps = uint64(10) + var durationInSecond = uint64(10) + request.PerRequestTimeoutInMS = &perRequestTimeoutInMS + request.QPS = &qps + request.DurationInSecond = &durationInSecond + request.Domain = fmt.Sprintf(targetDomain, netDnsName) + protocol := "udp" + request.Protocol = &protocol + netDns.Spec.Request = request + + // Schedule + Schedule := new(v1beta1.SchedulePlan) + Schedule.Schedule = &crontab + Schedule.RoundNumber = 1 + Schedule.RoundTimeoutMinute = 1 + netDns.Spec.Schedule = Schedule + + e = frame.CreateResource(netDns) + Expect(e).NotTo(HaveOccurred(), "create netDns resource") + + e = common.CheckRuntime(frame, netDns, pluginManager.KindNameNetdns, 60) + Expect(e).NotTo(HaveOccurred(), "check task runtime spec") + + e = common.WaitKdoctorTaskDone(frame, netDns, pluginManager.KindNameNetdns, 120) + Expect(e).NotTo(HaveOccurred(), "wait netDns task finish") + + success, e := common.CompareResult(frame, netDnsName, pluginManager.KindNameNetdns, []string{}, reportNum, netDns) + Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).NotTo(BeFalse(), "compare report and task result") + + }) }) diff --git a/test/e2e/netreach/netreach_test.go b/test/e2e/netreach/netreach_test.go index e2286157..9b43da68 100644 --- a/test/e2e/netreach/netreach_test.go +++ b/test/e2e/netreach/netreach_test.go @@ -27,7 +27,7 @@ var _ = Describe("testing netReach ", Label("netReach"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - netReach.Spec.AgentSpec = *agentSpec + netReach.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -83,4 +83,127 @@ var _ = Describe("testing netReach ", Label("netReach"), func() { Expect(e).NotTo(HaveOccurred(), "check task runtime resource delete") }) + It("Successfully testing using default daemonSet as workload with Task NetReach", Label("E00013"), func() { + var e error + successRate := float64(1) + successMean := int64(1500) + crontab := "0 1" + netReachName := "netreach-" + tools.RandomName() + + netReach := new(v1beta1.NetReach) + netReach.Name = netReachName + + // successCondition + successCondition := new(v1beta1.NetSuccessCondition) + successCondition.SuccessRate = &successRate + successCondition.MeanAccessDelayInMs = &successMean + netReach.Spec.SuccessCondition = successCondition + enable := true + disable := false + // target + target := new(v1beta1.NetReachTarget) + if !common.TestIPv4 && common.TestIPv6 { + target.Ingress = &disable + } else { + target.Ingress = &enable + } + target.LoadBalancer = &enable + target.ClusterIP = &enable + target.Endpoint = &enable + target.NodePort = &enable + target.MultusInterface = &disable + target.IPv4 = &common.TestIPv4 + target.IPv6 = &common.TestIPv6 + netReach.Spec.Target = target + + // request + request := new(v1beta1.NetHttpRequest) + request.PerRequestTimeoutInMS = 1000 + request.QPS = 10 + request.DurationInSecond = 10 + netReach.Spec.Request = request + + // Schedule + Schedule := new(v1beta1.SchedulePlan) + Schedule.Schedule = &crontab + Schedule.RoundNumber = 1 + Schedule.RoundTimeoutMinute = 1 + netReach.Spec.Schedule = Schedule + + e = frame.CreateResource(netReach) + Expect(e).NotTo(HaveOccurred(), "create netReach resource") + + e = common.CheckRuntime(frame, netReach, pluginManager.KindNameNetReach, 60) + Expect(e).NotTo(HaveOccurred(), "check task runtime spec") + + e = common.WaitKdoctorTaskDone(frame, netReach, pluginManager.KindNameNetReach, 120) + Expect(e).NotTo(HaveOccurred(), "wait netReach task finish") + + success, e := common.CompareResult(frame, netReachName, pluginManager.KindNameNetReach, []string{}, reportNum, netReach) + Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") + + }) + + It("Successfully testing using default daemonSet as workload with more Task NetReach", Label("E00016"), func() { + var e error + successRate := float64(1) + successMean := int64(1500) + crontab := "0 1" + netReachName := "netreach-" + tools.RandomName() + + netReach := new(v1beta1.NetReach) + netReach.Name = netReachName + + // successCondition + successCondition := new(v1beta1.NetSuccessCondition) + successCondition.SuccessRate = &successRate + successCondition.MeanAccessDelayInMs = &successMean + netReach.Spec.SuccessCondition = successCondition + enable := true + disable := false + // target + target := new(v1beta1.NetReachTarget) + if !common.TestIPv4 && common.TestIPv6 { + target.Ingress = &disable + } else { + target.Ingress = &enable + } + target.LoadBalancer = &enable + target.ClusterIP = &enable + target.Endpoint = &enable + target.NodePort = &enable + target.MultusInterface = &disable + target.IPv4 = &common.TestIPv4 + target.IPv6 = &common.TestIPv6 + netReach.Spec.Target = target + + // request + request := new(v1beta1.NetHttpRequest) + request.PerRequestTimeoutInMS = 1000 + request.QPS = 10 + request.DurationInSecond = 10 + netReach.Spec.Request = request + + // Schedule + Schedule := new(v1beta1.SchedulePlan) + Schedule.Schedule = &crontab + Schedule.RoundNumber = 1 + Schedule.RoundTimeoutMinute = 1 + netReach.Spec.Schedule = Schedule + + e = frame.CreateResource(netReach) + Expect(e).NotTo(HaveOccurred(), "create netReach resource") + + e = common.CheckRuntime(frame, netReach, pluginManager.KindNameNetReach, 60) + Expect(e).NotTo(HaveOccurred(), "check task runtime spec") + + e = common.WaitKdoctorTaskDone(frame, netReach, pluginManager.KindNameNetReach, 120) + Expect(e).NotTo(HaveOccurred(), "wait netReach task finish") + + success, e := common.CompareResult(frame, netReachName, pluginManager.KindNameNetReach, []string{}, reportNum, netReach) + Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") + + }) }) diff --git a/test/e2e/runtime/runtime_test.go b/test/e2e/runtime/runtime_test.go index 46a59d68..ca0eab9d 100644 --- a/test/e2e/runtime/runtime_test.go +++ b/test/e2e/runtime/runtime_test.go @@ -32,7 +32,7 @@ var _ = Describe("testing runtime ", Label("runtime"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - netReach.Spec.AgentSpec = *agentSpec + netReach.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -109,7 +109,7 @@ var _ = Describe("testing runtime ", Label("runtime"), func() { // agent agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -174,7 +174,7 @@ var _ = Describe("testing runtime ", Label("runtime"), func() { // agentSpec agentSpec := new(v1beta1.AgentSpec) agentSpec.TerminationGracePeriodMinutes = &termMin - netDns.Spec.AgentSpec = *agentSpec + netDns.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -254,7 +254,7 @@ var _ = Describe("testing runtime ", Label("runtime"), func() { agentSpec.TerminationGracePeriodMinutes = &termMin agentSpec.Kind = types.KindDeployment agentSpec.DeploymentReplicas = &replicas - netReach.Spec.AgentSpec = *agentSpec + netReach.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -333,7 +333,7 @@ var _ = Describe("testing runtime ", Label("runtime"), func() { agentSpec.TerminationGracePeriodMinutes = &termMin agentSpec.Kind = types.KindDeployment agentSpec.DeploymentReplicas = &replicas - appHttpHealth.Spec.AgentSpec = *agentSpec + appHttpHealth.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition) @@ -400,7 +400,7 @@ var _ = Describe("testing runtime ", Label("runtime"), func() { agentSpec.TerminationGracePeriodMinutes = &termMin agentSpec.Kind = types.KindDeployment agentSpec.DeploymentReplicas = &replicas - netDns.Spec.AgentSpec = *agentSpec + netDns.Spec.AgentSpec = agentSpec // successCondition successCondition := new(v1beta1.NetSuccessCondition)