diff --git a/build/charts/antrea/conf/antrea-agent.conf b/build/charts/antrea/conf/antrea-agent.conf index 3d6dee19dbe..6bbe0824a23 100644 --- a/build/charts/antrea/conf/antrea-agent.conf +++ b/build/charts/antrea/conf/antrea-agent.conf @@ -24,9 +24,12 @@ featureGates: # be enabled, otherwise this flag will not take effect. {{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "CleanupStaleUDPSvcConntrack" "default" true) }} -# Enable traceflow which provides packet tracing feature to diagnose network issue. +# Enable Traceflow which provides packet tracing feature to diagnose network issue. {{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "Traceflow" "default" true) }} +# Enable PacketCapture feature which supports capturing packets to diagnose network issues. +{{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "PacketCapture" "default" false) }} + # Enable NodePortLocal feature to make the Pods reachable externally through NodePort {{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "NodePortLocal" "default" true) }} diff --git a/build/charts/antrea/crds/packetcapture.yaml b/build/charts/antrea/crds/packetcapture.yaml new file mode 100644 index 00000000000..48d7be27040 --- /dev/null +++ b/build/charts/antrea/crds/packetcapture.yaml @@ -0,0 +1,193 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap diff --git a/build/charts/antrea/templates/agent/clusterrole.yaml b/build/charts/antrea/templates/agent/clusterrole.yaml index a2a74e45beb..f0810484241 100644 --- a/build/charts/antrea/templates/agent/clusterrole.yaml +++ b/build/charts/antrea/templates/agent/clusterrole.yaml @@ -39,6 +39,14 @@ rules: - pods/status verbs: - patch + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - antrea-packetcapture-fileserver-auth + verbs: + - get - apiGroups: - "" resources: @@ -160,6 +168,20 @@ rules: - patch - create - delete + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures + verbs: + - get + - watch + - list + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures/status + verbs: + - patch - apiGroups: - crd.antrea.io resources: diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index c1cbf2da1e2..09b506927c4 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -2896,6 +2896,202 @@ spec: shortNames: - nlm +--- +# Source: antrea/crds/packetcapture.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap + --- # Source: antrea/crds/supportbundlecollection.yaml apiVersion: apiextensions.k8s.io/v1 @@ -3770,9 +3966,12 @@ data: # be enabled, otherwise this flag will not take effect. # CleanupStaleUDPSvcConntrack: true - # Enable traceflow which provides packet tracing feature to diagnose network issue. + # Enable Traceflow which provides packet tracing feature to diagnose network issue. # Traceflow: true + # Enable PacketCapture feature which supports capturing packets to diagnose network issues. + # PacketCapture: false + # Enable NodePortLocal feature to make the Pods reachable externally through NodePort # NodePortLocal: true @@ -4354,6 +4553,14 @@ rules: - pods/status verbs: - patch + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - antrea-packetcapture-fileserver-auth + verbs: + - get - apiGroups: - "" resources: @@ -4475,6 +4682,20 @@ rules: - patch - create - delete + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures + verbs: + - get + - watch + - list + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures/status + verbs: + - patch - apiGroups: - crd.antrea.io resources: @@ -5168,7 +5389,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 4325a243ab510df539883b6384a30cf8b04ff862796444a6c5c10999159479c5 + checksum/config: e2d1d8af083c88667ac4c22c87dea63e595b2f4f770190c32afb00c480440fe3 labels: app: antrea component: antrea-agent @@ -5406,7 +5627,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 4325a243ab510df539883b6384a30cf8b04ff862796444a6c5c10999159479c5 + checksum/config: e2d1d8af083c88667ac4c22c87dea63e595b2f4f770190c32afb00c480440fe3 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-crds.yml b/build/yamls/antrea-crds.yml index 6b6a2fb3091..494a33e5880 100644 --- a/build/yamls/antrea-crds.yml +++ b/build/yamls/antrea-crds.yml @@ -2873,6 +2873,200 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: name: supportbundlecollections.crd.antrea.io spec: diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index 1ec9870744a..8cde950c34e 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -2896,6 +2896,202 @@ spec: shortNames: - nlm +--- +# Source: antrea/crds/packetcapture.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap + --- # Source: antrea/crds/supportbundlecollection.yaml apiVersion: apiextensions.k8s.io/v1 @@ -3770,9 +3966,12 @@ data: # be enabled, otherwise this flag will not take effect. # CleanupStaleUDPSvcConntrack: true - # Enable traceflow which provides packet tracing feature to diagnose network issue. + # Enable Traceflow which provides packet tracing feature to diagnose network issue. # Traceflow: true + # Enable PacketCapture feature which supports capturing packets to diagnose network issues. + # PacketCapture: false + # Enable NodePortLocal feature to make the Pods reachable externally through NodePort # NodePortLocal: true @@ -4354,6 +4553,14 @@ rules: - pods/status verbs: - patch + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - antrea-packetcapture-fileserver-auth + verbs: + - get - apiGroups: - "" resources: @@ -4475,6 +4682,20 @@ rules: - patch - create - delete + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures + verbs: + - get + - watch + - list + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures/status + verbs: + - patch - apiGroups: - crd.antrea.io resources: @@ -5168,7 +5389,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 4325a243ab510df539883b6384a30cf8b04ff862796444a6c5c10999159479c5 + checksum/config: e2d1d8af083c88667ac4c22c87dea63e595b2f4f770190c32afb00c480440fe3 labels: app: antrea component: antrea-agent @@ -5407,7 +5628,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 4325a243ab510df539883b6384a30cf8b04ff862796444a6c5c10999159479c5 + checksum/config: e2d1d8af083c88667ac4c22c87dea63e595b2f4f770190c32afb00c480440fe3 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index 5890d414a44..1542c175873 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -2896,6 +2896,202 @@ spec: shortNames: - nlm +--- +# Source: antrea/crds/packetcapture.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap + --- # Source: antrea/crds/supportbundlecollection.yaml apiVersion: apiextensions.k8s.io/v1 @@ -3770,9 +3966,12 @@ data: # be enabled, otherwise this flag will not take effect. # CleanupStaleUDPSvcConntrack: true - # Enable traceflow which provides packet tracing feature to diagnose network issue. + # Enable Traceflow which provides packet tracing feature to diagnose network issue. # Traceflow: true + # Enable PacketCapture feature which supports capturing packets to diagnose network issues. + # PacketCapture: false + # Enable NodePortLocal feature to make the Pods reachable externally through NodePort # NodePortLocal: true @@ -4354,6 +4553,14 @@ rules: - pods/status verbs: - patch + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - antrea-packetcapture-fileserver-auth + verbs: + - get - apiGroups: - "" resources: @@ -4475,6 +4682,20 @@ rules: - patch - create - delete + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures + verbs: + - get + - watch + - list + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures/status + verbs: + - patch - apiGroups: - crd.antrea.io resources: @@ -5168,7 +5389,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: f5cf00de39a27790a7e158a3eca79123de415b3b09d389ac984b74027bbfaade + checksum/config: 7e42a403d388e2ed556d9b41f4af83917eadd0863d4e2bef67353f5adb2ef6c3 labels: app: antrea component: antrea-agent @@ -5404,7 +5625,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: f5cf00de39a27790a7e158a3eca79123de415b3b09d389ac984b74027bbfaade + checksum/config: 7e42a403d388e2ed556d9b41f4af83917eadd0863d4e2bef67353f5adb2ef6c3 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index 84eafd01dfe..2f0ea5db1a0 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -2896,6 +2896,202 @@ spec: shortNames: - nlm +--- +# Source: antrea/crds/packetcapture.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap + --- # Source: antrea/crds/supportbundlecollection.yaml apiVersion: apiextensions.k8s.io/v1 @@ -3783,9 +3979,12 @@ data: # be enabled, otherwise this flag will not take effect. # CleanupStaleUDPSvcConntrack: true - # Enable traceflow which provides packet tracing feature to diagnose network issue. + # Enable Traceflow which provides packet tracing feature to diagnose network issue. # Traceflow: true + # Enable PacketCapture feature which supports capturing packets to diagnose network issues. + # PacketCapture: false + # Enable NodePortLocal feature to make the Pods reachable externally through NodePort # NodePortLocal: true @@ -4367,6 +4566,14 @@ rules: - pods/status verbs: - patch + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - antrea-packetcapture-fileserver-auth + verbs: + - get - apiGroups: - "" resources: @@ -4488,6 +4695,20 @@ rules: - patch - create - delete + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures + verbs: + - get + - watch + - list + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures/status + verbs: + - patch - apiGroups: - crd.antrea.io resources: @@ -5181,7 +5402,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 9e94f199d125877d889ba73e053c95b342e89323d0423cde074ae074df379494 + checksum/config: 7d8b0a065c3db85e34e127fdf38b820b32712657900e3f8fe2703d4310c40632 checksum/ipsec-secret: d0eb9c52d0cd4311b6d252a951126bf9bea27ec05590bed8a394f0f792dcb2a4 labels: app: antrea @@ -5463,7 +5684,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 9e94f199d125877d889ba73e053c95b342e89323d0423cde074ae074df379494 + checksum/config: 7d8b0a065c3db85e34e127fdf38b820b32712657900e3f8fe2703d4310c40632 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index 2d44748eaff..6d2deae09db 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -2896,6 +2896,202 @@ spec: shortNames: - nlm +--- +# Source: antrea/crds/packetcapture.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: packetcaptures.crd.antrea.io + labels: + app: antrea +spec: + group: crd.antrea.io + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - jsonPath: .status.phase + description: The phase of the PacketCapture. + name: Phase + type: string + - jsonPath: .spec.source.pod + description: The name of the source Pod. + name: Source-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.pod + description: The name of the destination Pod. + name: Destination-Pod + type: string + priority: 10 + - jsonPath: .spec.destination.ip + description: The IP address of the destination. + name: Destination-IP + type: string + priority: 10 + - jsonPath: .spec.timeout + description: Timeout in seconds. + name: Timeout + type: integer + priority: 10 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + spec: + type: object + required: + - source + - captureConfig + - destination + anyOf: + - properties: + source: + required: [pod] + - properties: + destination: + required: [pod] + properties: + source: + type: object + oneOf: + - required: + - pod + - required: + - ip + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + destination: + type: object + oneOf: + - required: + - pod + - required: + - ip + - required: + - service + properties: + pod: + type: object + properties: + namespace: + type: string + name: + type: string + service: + type: object + properties: + namespace: + type: string + name: + type: string + ip: + type: string + oneOf: + - format: ipv4 + - format: ipv6 + packet: + type: object + properties: + ipFamily: + type: string + enum: [IPv4, IPv6] + default: IPv4 + protocol: + x-kubernetes-int-or-string: true + transportHeader: + type: object + properties: + udp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + tcp: + type: object + properties: + srcPort: + type: integer + minimum: 1 + maximum: 65535 + dstPort: + type: integer + minimum: 1 + maximum: 65535 + flags: + type: integer + minimum: 0 + maximum: 255 + timeout: + type: integer + minimum: 1 + maximum: 300 + default: 60 + captureConfig: + type: object + anyOf: + - properties: + firstN: + required: [number] + properties: + firstN: + type: object + properties: + number: + type: integer + format: int32 + fileServer: + type: object + properties: + url: + type: string + pattern: 's{0,1}ftps{0,1}:\/\/[\w-_./]+:\d+' + status: + type: object + properties: + reason: + type: string + phase: + type: string + startTime: + type: string + numCapturedPackets: + type: integer + packetsFilePath: + type: string + subresources: + status: {} + scope: Cluster + names: + plural: packetcaptures + singular: packetcapture + kind: PacketCapture + shortNames: + - pcap + --- # Source: antrea/crds/supportbundlecollection.yaml apiVersion: apiextensions.k8s.io/v1 @@ -3770,9 +3966,12 @@ data: # be enabled, otherwise this flag will not take effect. # CleanupStaleUDPSvcConntrack: true - # Enable traceflow which provides packet tracing feature to diagnose network issue. + # Enable Traceflow which provides packet tracing feature to diagnose network issue. # Traceflow: true + # Enable PacketCapture feature which supports capturing packets to diagnose network issues. + # PacketCapture: false + # Enable NodePortLocal feature to make the Pods reachable externally through NodePort # NodePortLocal: true @@ -4354,6 +4553,14 @@ rules: - pods/status verbs: - patch + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - antrea-packetcapture-fileserver-auth + verbs: + - get - apiGroups: - "" resources: @@ -4475,6 +4682,20 @@ rules: - patch - create - delete + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures + verbs: + - get + - watch + - list + - apiGroups: + - crd.antrea.io + resources: + - packetcaptures/status + verbs: + - patch - apiGroups: - crd.antrea.io resources: @@ -5168,7 +5389,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 8256bc0d365d60f16d0bdef14cf674be49d525ee1cd921e531f8bf7e521e1421 + checksum/config: 2b4d82bcb825d50926115bad2125097f85aed424bfc49147444314cad8b7826a labels: app: antrea component: antrea-agent @@ -5404,7 +5625,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 8256bc0d365d60f16d0bdef14cf674be49d525ee1cd921e531f8bf7e521e1421 + checksum/config: 2b4d82bcb825d50926115bad2125097f85aed424bfc49147444314cad8b7826a labels: app: antrea component: antrea-controller diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index 60bd61503aa..43a6b9226d1 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -45,6 +45,7 @@ import ( "antrea.io/antrea/pkg/agent/controller/networkpolicy" "antrea.io/antrea/pkg/agent/controller/networkpolicy/l7engine" "antrea.io/antrea/pkg/agent/controller/noderoute" + "antrea.io/antrea/pkg/agent/controller/packetcapture" "antrea.io/antrea/pkg/agent/controller/serviceexternalip" "antrea.io/antrea/pkg/agent/controller/traceflow" "antrea.io/antrea/pkg/agent/controller/trafficcontrol" @@ -117,6 +118,7 @@ func run(o *Options) error { informerFactory := informers.NewSharedInformerFactoryWithOptions(k8sClient, informerDefaultResync, informers.WithTransform(k8s.NewTrimmer(k8s.TrimNode))) crdInformerFactory := crdinformers.NewSharedInformerFactoryWithOptions(crdClient, informerDefaultResync, crdinformers.WithTransform(k8s.NewTrimmer())) traceflowInformer := crdInformerFactory.Crd().V1beta1().Traceflows() + packetCaptureInformer := crdInformerFactory.Crd().V1alpha1().PacketCaptures() egressInformer := crdInformerFactory.Crd().V1beta1().Egresses() externalIPPoolInformer := crdInformerFactory.Crd().V1beta1().ExternalIPPools() trafficControlInformer := crdInformerFactory.Crd().V1alpha2().TrafficControls() @@ -189,6 +191,7 @@ func run(o *Options) error { enableMulticlusterGW, groupIDAllocator, *o.config.EnablePrometheusMetrics, + features.DefaultFeatureGate.Enabled(features.PacketCapture), o.config.PacketInRate, ) @@ -650,6 +653,20 @@ func run(o *Options) error { o.enableAntreaProxy) } + var packetCaptureController *packetcapture.Controller + if features.DefaultFeatureGate.Enabled(features.PacketCapture) { + packetCaptureController = packetcapture.NewPacketCaptureController( + k8sClient, + crdClient, + serviceInformer, + endpointsInformer, + packetCaptureInformer, + ofClient, + ifaceStore, + nodeConfig, + ) + } + if err := antreaClientProvider.RunOnce(); err != nil { return err } @@ -808,6 +825,10 @@ func run(o *Options) error { go traceflowController.Run(stopCh) } + if features.DefaultFeatureGate.Enabled(features.PacketCapture) { + go packetCaptureController.Run(stopCh) + } + if o.enableAntreaProxy { go proxier.GetProxyProvider().Run(stopCh) diff --git a/docs/api.md b/docs/api.md index 4547cb66738..5000c95aad9 100644 --- a/docs/api.md +++ b/docs/api.md @@ -40,6 +40,7 @@ These are the CRDs currently available in `crd.antrea.io`. | `Group` | v1beta1 | v1.13.0 | N/A | N/A | | `NetworkPolicy` | v1beta1 | v1.13.0 | N/A | N/A | | `NodeLatencyMonitor` | v1alpha1 | v2.1.0 | N/A | N/A | +| `PacketCapture` | v1alpha1 | v2.2 | N/A | N/A | | `SupportBundleCollection` | v1alpha1 | v1.10.0 | N/A | N/A | | `Tier` | v1beta1 | v1.13.0 | N/A | N/A | | `Traceflow` | v1beta1 | v1.13.0 | N/A | N/A | diff --git a/docs/feature-gates.md b/docs/feature-gates.md index 41da9eaac1a..ee8f5d78afa 100644 --- a/docs/feature-gates.md +++ b/docs/feature-gates.md @@ -62,6 +62,7 @@ edit the Agent configuration in the | `L7FlowExporter` | Agent | `false` | Alpha | v1.15 | N/A | N/A | Yes | | | `BGPPolicy` | Agent | `false` | Alpha | v2.1 | N/A | N/A | No | | | `NodeLatencyMonitor` | Agent | `false` | Alpha | v2.1 | N/A | N/A | No | | +| `PacketCapture` | Agent | `false` | Alpha | v2.2 | N/A | N/A | No | | ## Description and Requirements of Features @@ -531,3 +532,8 @@ experienced by Pod traffic. #### Requirements for this Feature - Linux Nodes only - the feature has not been tested on Windows Nodes yet. + +### PacketCapture + +`PacketCapture` allows user to capture live traffic packets from specified flows for further analysis. +Refer to this [document](packetcapture-guide.md) for more information. diff --git a/docs/packetcapture-guide.md b/docs/packetcapture-guide.md new file mode 100644 index 00000000000..2ffafe91d1d --- /dev/null +++ b/docs/packetcapture-guide.md @@ -0,0 +1,84 @@ +# Packet Capture User Guide + +Starting with Antrea v2.2, Antrea supports the packet capture feature for network diagnosis. +It can capture specified number of packets from real traffic and upload them to a +supported storage location. Users can create a PacketCapture CR to trigger +packet capture on the target traffic flow. + +## Prerequisites + +The PacketCapture feature is disabled by default. If you +want to enable this feature, you need to set PacketCapture feature gate to `true` in +the `antrea-config` ConfigMap for `antrea-agent`. + +```yaml + antrea-agent.conf: | + # FeatureGates is a map of feature names to bools that enable or disable experimental features. + featureGates: + # Enable PacketCapture feature which provides packets capture feature to diagnose network issue. + PacketCapture: true +``` + +## Start a new PacketCapture + +When starting a new packet capture, you can provide the following information to identify +the target traffic flow: + +* Source Pod +* Destination Pod, Service or IP address +* Transport protocol (TCP/UDP/ICMP) +* Transport ports + +You can start a new packet capture by creating a PacketCapture CR. An optional `fileServer` field can be specified to +store the generated packets file. Before that, a Secret named `antrea-packetcapture-fileserver-auth` +located in the `kube-system` namespace must exist and carry the auth information for the target file server. +You can also create the Secret using following `kubectl` command: + +```bash +kubectl create secret generic antrea-packetcapture-fileserver-auth -n kube-system --from-literal=username='' --from-literal=password='' +``` + +(If no `fileServer` field present in the CR, the captured packets file will only exist in the antrea-agent pod) + +And here is an example of `PacketCapture` CR: + +```yaml +apiVersion: crd.antrea.io/v1alpha1 +kind: PacketCapture +metadata: + name: pc-test +spec: + fileServer: + url: sftp://127.0.0.1:22/upload # define your own ftp url here. + timeout: 60 + captureConfig: + firstN: + number: 5 + source: + pod: + namespace: default + name: frontend + destination: + pod: + namespace: default + name: backend + # Destination can also be an IP address ('ip' field) or a Service name ('service' field); the 3 choices are mutually exclusive. + packet: + ipFamily: IPv4 + protocol: TCP # numerical format is also supported. eg. TCP (6), UDP (17), ICMP (1) + transportHeader: + tcp: + dstPort: 8080 # Destination port needs to be set when the protocol is TCP/UDP. +status: + numCapturedPackets: 5 + # path format: :. If this file was uploaded to the target file server, filename format is .pcapng + packetsFilePath: antrea-agent-z4zgw:/tmp/antrea/packetcapture/packets/70bedae9-ba65-4f9f-bfac-59c1332e8132.pcapng +``` + +The CR above starts a new packet capture of TCP flows from a Pod named `frontend` +to the port 8080 of a Pod named `backend` using TCP protocol. It will capture the first 5 packets +that meet this criterion and upload them to the specified sftp server. Users can download the +packet file from the sftp server(or from local antrea-agent pod) and analyze its contents with network diagnose tools +like Wireshark or tcpdump. + +Currently we support max to `15` concurrrent PacketCapture session running at the same time. diff --git a/go.mod b/go.mod index 13d9568c55b..329a186c491 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/gogo/protobuf v1.3.2 github.com/google/btree v1.1.3 + github.com/google/gopacket v1.1.19 github.com/google/uuid v1.6.0 github.com/hashicorp/memberlist v0.5.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.3.0 diff --git a/go.sum b/go.sum index 03a2c9dc81d..8749ab87591 100644 --- a/go.sum +++ b/go.sum @@ -366,6 +366,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= @@ -858,6 +860,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -1029,6 +1033,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/hack/.notableofcontents b/hack/.notableofcontents index 476abf7da99..5b29b5dea1b 100644 --- a/hack/.notableofcontents +++ b/hack/.notableofcontents @@ -38,6 +38,7 @@ docs/noencap-hybrid-modes.md docs/octant-plugin-installation.md docs/os-issues.md docs/ovs-offload.md +docs/packetcapture-guide.md docs/prometheus-integration.md docs/secondary-network.md docs/security.md diff --git a/pkg/agent/controller/networkpolicy/audit_logging.go b/pkg/agent/controller/networkpolicy/audit_logging.go index 26dce3b746a..114e866fa9d 100644 --- a/pkg/agent/controller/networkpolicy/audit_logging.go +++ b/pkg/agent/controller/networkpolicy/audit_logging.go @@ -233,7 +233,7 @@ func getNetworkPolicyInfo(pktIn *ofctrl.PacketIn, packet *binding.Packet, c *Con // Get disposition Allow or Drop. match = getMatchRegField(matchers, openflow.APDispositionField) - disposition, err := getInfoInReg(match, openflow.APDispositionField.GetRange().ToNXRange()) + disposition, err := openflow.GetInfoInReg(match, openflow.APDispositionField.GetRange().ToNXRange()) if err != nil { return fmt.Errorf("received error while unloading disposition from reg: %v", err) } @@ -241,7 +241,7 @@ func getNetworkPolicyInfo(pktIn *ofctrl.PacketIn, packet *binding.Packet, c *Con // Get layer 7 NetworkPolicy redirect action, if traffic is redirected, disposition log should be overwritten. if match = getMatchRegField(matchers, openflow.L7NPRegField); match != nil { - l7NPRegVal, err := getInfoInReg(match, openflow.L7NPRegField.GetRange().ToNXRange()) + l7NPRegVal, err := openflow.GetInfoInReg(match, openflow.L7NPRegField.GetRange().ToNXRange()) if err != nil { return fmt.Errorf("received error while unloading l7 NP redirect value from reg: %v", err) } @@ -252,7 +252,7 @@ func getNetworkPolicyInfo(pktIn *ofctrl.PacketIn, packet *binding.Packet, c *Con // Get K8s default deny action, if traffic is default deny, no conjunction could be matched. if match = getMatchRegField(matchers, openflow.APDenyRegMark.GetField()); match != nil { - apDenyRegVal, err := getInfoInReg(match, openflow.APDenyRegMark.GetField().GetRange().ToNXRange()) + apDenyRegVal, err := openflow.GetInfoInReg(match, openflow.APDenyRegMark.GetField().GetRange().ToNXRange()) if err != nil { return fmt.Errorf("received error while unloading deny mark from reg: %v", err) } @@ -269,7 +269,7 @@ func getNetworkPolicyInfo(pktIn *ofctrl.PacketIn, packet *binding.Packet, c *Con match = getMatch(matchers, tableID, disposition) // Get NetworkPolicy full name and OF priority of the conjunction. - conjID, err := getInfoInReg(match, nil) + conjID, err := openflow.GetInfoInReg(match, nil) if err != nil { return fmt.Errorf("received error while unloading conjunction id from reg: %v", err) } diff --git a/pkg/agent/controller/networkpolicy/packetin.go b/pkg/agent/controller/networkpolicy/packetin.go index ac7de7f95ea..1c1fd2e7403 100644 --- a/pkg/agent/controller/networkpolicy/packetin.go +++ b/pkg/agent/controller/networkpolicy/packetin.go @@ -21,7 +21,6 @@ import ( "net/netip" "time" - "antrea.io/libOpenflow/openflow15" "antrea.io/ofnet/ofctrl" "github.com/vmware/go-ipfix/pkg/registry" "k8s.io/klog/v2" @@ -91,18 +90,6 @@ func getMatch(matchers *ofctrl.Matchers, tableID uint8, disposition uint32) *ofc return nil } -// getInfoInReg unloads and returns data stored in the match field. -func getInfoInReg(regMatch *ofctrl.MatchField, rng *openflow15.NXRange) (uint32, error) { - regValue, ok := regMatch.GetValue().(*ofctrl.NXRegister) - if !ok { - return 0, errors.New("register value cannot be retrieved") - } - if rng != nil { - return ofctrl.GetUint32ValueWithRange(regValue.Data, rng), nil - } - return regValue.Data, nil -} - func (c *Controller) storeDenyConnection(pktIn *ofctrl.PacketIn) error { packet, err := binding.ParsePacketIn(pktIn) if err != nil { @@ -147,7 +134,7 @@ func (c *Controller) storeDenyConnection(pktIn *ofctrl.PacketIn) error { tableID := getPacketInTableID(pktIn) // Get disposition Allow, Drop or Reject match = getMatchRegField(matchers, openflow.APDispositionField) - id, err := getInfoInReg(match, openflow.APDispositionField.GetRange().ToNXRange()) + id, err := openflow.GetInfoInReg(match, openflow.APDispositionField.GetRange().ToNXRange()) if err != nil { return fmt.Errorf("error when getting disposition from reg: %v", err) } @@ -156,7 +143,7 @@ func (c *Controller) storeDenyConnection(pktIn *ofctrl.PacketIn) error { // Set match to corresponding ingress/egress reg according to disposition match = getMatch(matchers, tableID, id) if match != nil { - ruleID, err := getInfoInReg(match, nil) + ruleID, err := openflow.GetInfoInReg(match, nil) if err != nil { return fmt.Errorf("error when obtaining rule id from reg: %v", err) } @@ -223,7 +210,7 @@ func getPacketInTableID(pktIn *ofctrl.PacketIn) uint8 { tableID := pktIn.TableId matchers := pktIn.GetMatches() if match := getMatchRegField(matchers, openflow.PacketInTableField); match != nil { - tableVal, err := getInfoInReg(match, openflow.PacketInTableField.GetRange().ToNXRange()) + tableVal, err := openflow.GetInfoInReg(match, openflow.PacketInTableField.GetRange().ToNXRange()) if err == nil { return uint8(tableVal) } else { diff --git a/pkg/agent/controller/networkpolicy/reject.go b/pkg/agent/controller/networkpolicy/reject.go index 75de5abd6b5..4c2fcc95635 100644 --- a/pkg/agent/controller/networkpolicy/reject.go +++ b/pkg/agent/controller/networkpolicy/reject.go @@ -142,7 +142,7 @@ func (c *Controller) rejectRequest(pktIn *ofctrl.PacketIn) error { if c.antreaProxyEnabled { matches := pktIn.GetMatches() if match := getMatchRegField(matches, openflow.ServiceEPStateField); match != nil { - svcEpstate, err := getInfoInReg(match, openflow.ServiceEPStateField.GetRange().ToNXRange()) + svcEpstate, err := openflow.GetInfoInReg(match, openflow.ServiceEPStateField.GetRange().ToNXRange()) if err != nil { return false } @@ -343,7 +343,7 @@ func parseFlexibleIPAMStatus(pktIn *ofctrl.PacketIn, nodeConfig *config.NodeConf // The generated reject packet should have same ctZone with the incoming packet, otherwise the conntrack cannot work properly. matches := pktIn.GetMatches() if match := getMatchRegField(matches, openflow.CtZoneField); match != nil { - ctZone, err = getInfoInReg(match, openflow.CtZoneField.GetRange().ToNXRange()) + ctZone, err = openflow.GetInfoInReg(match, openflow.CtZoneField.GetRange().ToNXRange()) if err != nil { return false, false, 0, err } diff --git a/pkg/agent/controller/packetcapture/packetcapture_controller.go b/pkg/agent/controller/packetcapture/packetcapture_controller.go new file mode 100644 index 00000000000..736ff5c45f0 --- /dev/null +++ b/pkg/agent/controller/packetcapture/packetcapture_controller.go @@ -0,0 +1,767 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packetcapture + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "sync" + "time" + + "antrea.io/libOpenflow/protocol" + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" + "github.com/spf13/afero" + "golang.org/x/time/rate" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/interfacestore" + "antrea.io/antrea/pkg/agent/openflow" + "antrea.io/antrea/pkg/agent/util" + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + clientsetversioned "antrea.io/antrea/pkg/client/clientset/versioned" + crdinformers "antrea.io/antrea/pkg/client/informers/externalversions/crd/v1alpha1" + crdlisters "antrea.io/antrea/pkg/client/listers/crd/v1alpha1" + binding "antrea.io/antrea/pkg/ovs/openflow" + "antrea.io/antrea/pkg/util/ftp" +) + +type StorageProtocolType string + +const ( + sftpProtocol StorageProtocolType = "sftp" +) + +const ( + controllerName = "AntreaAgentPacketCaptureController" + resyncPeriod time.Duration = 0 + + minRetryDelay = 5 * time.Second + maxRetryDelay = 300 * time.Second + + defaultWorkers = 4 + + // 4bits in ovs reg4 + minTagNum uint8 = 1 + maxTagNum uint8 = 15 + + // reason for timeout + captureTimeoutReason = "PacketCapture timeout" + defaultTimeoutDuration = time.Second * time.Duration(crdv1alpha1.DefaultPacketCaptureTimeout) + timeoutCheckInterval = 10 * time.Second + + captureStatusUpdatePeriod = 10 * time.Second + + // PacketCapture uses a dedicated secret object to store auth info for file server. + // #nosec G101 + fileServerAuthSecretName = "antrea-packetcapture-fileserver-auth" + fileServerAuthSecretNamespace = "kube-system" +) + +var ( + packetDirectory = getPacketDirectory() + defaultFS = afero.NewOsFs() +) + +func getPacketDirectory() string { + return filepath.Join(os.TempDir(), "antrea", "packetcapture", "packets") +} + +type packetCaptureState struct { + // name is the PacketCapture name + name string + // tag is a node scope unique id for the PacketCapture. It will be written into ovs reg and parsed in packetIn handler + // to match with existing PacketCapture. + tag uint8 + // shouldCapturePackets means this node will be responsible for doing the actual packet capture job. + shouldCapturePackets bool + // numCapturedPackets record how many packets have been captured. Due to the RateLimiter, + // this maybe not be realtime data. + numCapturedPackets int32 + // maxNumCapturedPackets is target number limit for our capture. If numCapturedPackets=maxNumCapturedPackets, means + // the PacketCapture is finished successfully. + maxNumCapturedPackets int32 + // updateRateLimiter controls the frequency of the updates to PacketCapture status. + updateRateLimiter *rate.Limiter + // pcapngFile is the file object for the packet file. + pcapngFile afero.File + // pcapngWriter is the writer for the packet file. + pcapngWriter *pcapgo.NgWriter +} + +type Controller struct { + kubeClient clientset.Interface + crdClient clientsetversioned.Interface + serviceLister corelisters.ServiceLister + serviceListerSynced cache.InformerSynced + endpointLister corelisters.EndpointsLister + endpointSynced cache.InformerSynced + packetCaptureInformer crdinformers.PacketCaptureInformer + packetCaptureLister crdlisters.PacketCaptureLister + packetCaptureSynced cache.InformerSynced + ofClient openflow.Client + interfaceStore interfacestore.InterfaceStore + nodeConfig *config.NodeConfig + queue workqueue.TypedRateLimitingInterface[string] + runningPacketCapturesMutex sync.RWMutex + runningPacketCaptures map[uint8]*packetCaptureState + sftpUploader ftp.Uploader +} + +func NewPacketCaptureController( + kubeClient clientset.Interface, + crdClient clientsetversioned.Interface, + serviceInformer coreinformers.ServiceInformer, + endpointInformer coreinformers.EndpointsInformer, + packetCaptureInformer crdinformers.PacketCaptureInformer, + client openflow.Client, + interfaceStore interfacestore.InterfaceStore, + nodeConfig *config.NodeConfig, +) *Controller { + c := &Controller{ + kubeClient: kubeClient, + crdClient: crdClient, + packetCaptureInformer: packetCaptureInformer, + packetCaptureLister: packetCaptureInformer.Lister(), + packetCaptureSynced: packetCaptureInformer.Informer().HasSynced, + ofClient: client, + interfaceStore: interfaceStore, + nodeConfig: nodeConfig, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "packetcapture"}, + ), + runningPacketCaptures: make(map[uint8]*packetCaptureState), + sftpUploader: &ftp.SftpUploader{}, + } + + packetCaptureInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addPacketCapture, + UpdateFunc: c.updatePacketCapture, + DeleteFunc: c.deletePacketCapture, + }, resyncPeriod) + + c.ofClient.RegisterPacketInHandler(uint8(openflow.PacketInCategoryPacketCapture), c) + + c.serviceLister = serviceInformer.Lister() + c.serviceListerSynced = serviceInformer.Informer().HasSynced + c.endpointLister = endpointInformer.Lister() + c.endpointSynced = endpointInformer.Informer().HasSynced + return c +} + +func (c *Controller) enqueuePacketCapture(pc *crdv1alpha1.PacketCapture) { + c.queue.Add(pc.Name) +} + +// Run will create defaultWorkers workers (go routines) which will process the PacketCapture events from the +// workqueue. +func (c *Controller) Run(stopCh <-chan struct{}) { + defer c.queue.ShutDown() + + klog.InfoS("Starting packetcapture controller", "name", controllerName) + defer klog.InfoS("Shutting down packetcapture controller", "name", controllerName) + + cacheSynced := []cache.InformerSynced{c.packetCaptureSynced, c.serviceListerSynced, c.endpointSynced} + if !cache.WaitForNamedCacheSync(controllerName, stopCh, cacheSynced...) { + return + } + + // Clean up existing packets files first. A successful PacketCapture will upload them to the target file server. + // others are useless once we restart the controller. + if err := defaultFS.RemoveAll(packetDirectory); err != nil { + klog.ErrorS(err, "Remove packets dir error", "directory", packetDirectory) + } + err := defaultFS.MkdirAll(packetDirectory, 0755) + if err != nil { + klog.ErrorS(err, "Couldn't create directory for storing captured packets", "directory", packetDirectory) + return + } + + go wait.Until(c.checkPacketCaptureTimeout, timeoutCheckInterval, stopCh) + + for i := 0; i < defaultWorkers; i++ { + go wait.Until(c.worker, time.Second, stopCh) + } + <-stopCh +} + +func (c *Controller) checkPacketCaptureTimeout() { + c.runningPacketCapturesMutex.RLock() + pcs := make([]string, 0, len(c.runningPacketCaptures)) + for _, pcState := range c.runningPacketCaptures { + pcs = append(pcs, pcState.name) + } + c.runningPacketCapturesMutex.RUnlock() + for _, pcName := range pcs { + // Re-post all running PacketCapture requests to the work queue to + // be processed and checked for timeout. + c.queue.Add(pcName) + } +} + +func (c *Controller) addPacketCapture(obj interface{}) { + pc := obj.(*crdv1alpha1.PacketCapture) + klog.InfoS("Processing PacketCapture ADD event", "name", pc.Name) + c.enqueuePacketCapture(pc) +} + +func (c *Controller) updatePacketCapture(_, obj interface{}) { + pc := obj.(*crdv1alpha1.PacketCapture) + klog.InfoS("Processing PacketCapture UPDATE event", "name", pc.Name) + c.enqueuePacketCapture(pc) +} + +func (c *Controller) deletePacketCapture(obj interface{}) { + pc := obj.(*crdv1alpha1.PacketCapture) + klog.InfoS("Processing PacketCapture DELETE event", "name", pc.Name) + c.enqueuePacketCapture(pc) +} + +func uidToPath(uid string) string { + return filepath.Join(packetDirectory, uid+".pcapng") +} + +func (c *Controller) worker() { + for c.processPacketCaptureItem() { + } +} + +func (c *Controller) processPacketCaptureItem() bool { + key, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(key) + if err := c.syncPacketCapture(key); err == nil { + c.queue.Forget(key) + } else { + klog.ErrorS(err, "Error syncing PacketCapture, exiting", "key", key) + } + return true +} + +func (c *Controller) cleanupPacketCapture(pcName string) { + pcState := c.deletePacketCaptureState(pcName) + if pcState != nil { + err := c.ofClient.UninstallPacketCaptureFlows(pcState.tag) + if err != nil { + klog.ErrorS(err, "Error cleaning up flows for PacketCapture", "name", pcName) + } + if pcState.pcapngFile != nil { + if err := pcState.pcapngFile.Close(); err != nil { + klog.ErrorS(err, "Error closing pcap file", "name", pcName) + } + if err := defaultFS.Remove(pcState.pcapngFile.Name()); err != nil { + klog.ErrorS(err, "Error deleting pcap file", "name", pcName) + } + } + } +} + +func (c *Controller) deletePacketCaptureState(pcName string) *packetCaptureState { + c.runningPacketCapturesMutex.Lock() + defer c.runningPacketCapturesMutex.Unlock() + + for tag, state := range c.runningPacketCaptures { + if state.name == pcName { + delete(c.runningPacketCaptures, tag) + return state + } + } + return nil +} + +func getPacketFileAndWriter(uid string) (afero.File, *pcapgo.NgWriter, error) { + filePath := uidToPath(uid) + var file afero.File + if _, err := os.Stat(filePath); err == nil { + return nil, nil, fmt.Errorf("packet file already exists. this may be due to an unexpected termination") + } else if os.IsNotExist(err) { + file, err = defaultFS.Create(filePath) + if err != nil { + return nil, nil, fmt.Errorf("failed to create pcapng file: %w", err) + } + } else { + return nil, nil, fmt.Errorf("couldn't check if the file exists: %w", err) + } + writer, err := pcapgo.NewNgWriter(file, layers.LinkTypeEthernet) + if err != nil { + return nil, nil, fmt.Errorf("couldn't initialize pcap writer: %w", err) + } + return file, writer, nil +} + +func (c *Controller) startPacketCapture(pc *crdv1alpha1.PacketCapture, pcState *packetCaptureState) error { + var err error + defer func() { + if err != nil { + c.cleanupPacketCapture(pc.Name) + c.updatePacketCaptureStatus(pc, crdv1alpha1.PacketCaptureFailed, fmt.Sprintf("Node: %s, Error: %+v", c.nodeConfig.Name, err), 0) + } + }() + receiverOnly := false + senderOnly := false + var pod, ns string + + if pc.Spec.Source.Pod != nil { + pod = pc.Spec.Source.Pod.Name + ns = pc.Spec.Source.Pod.Namespace + if pc.Spec.Destination.Pod == nil { + senderOnly = true + } + } else { + pod = pc.Spec.Destination.Pod.Name + ns = pc.Spec.Destination.Pod.Namespace + receiverOnly = true + } + + podInterfaces := c.interfaceStore.GetContainerInterfacesByPod(pod, ns) + pcState.shouldCapturePackets = len(podInterfaces) > 0 + if !pcState.shouldCapturePackets { + return nil + } + var senderPacket *binding.Packet + var endpointPackets []binding.Packet + var ofPort uint32 + senderPacket, err = c.preparePacket(pc, podInterfaces[0], receiverOnly) + if err != nil { + return err + } + ofPort = uint32(podInterfaces[0].OFPort) + klog.V(2).InfoS("PacketCapture sender packet", "packet", *senderPacket) + if senderOnly && pc.Spec.Destination.Service != nil { + endpointPackets, err = c.genEndpointMatchPackets(pc) + if err != nil { + return fmt.Errorf("couldn't generate endpoint match packets: %w", err) + } + } + + f := func() error { + c.runningPacketCapturesMutex.Lock() + defer c.runningPacketCapturesMutex.Unlock() + pcState.maxNumCapturedPackets = pc.Spec.CaptureConfig.FirstN.Number + file, writer, err := getPacketFileAndWriter(string(pc.UID)) + if err != nil { + return err + } + pcState.shouldCapturePackets = len(podInterfaces) > 0 + pcState.pcapngFile = file + pcState.pcapngWriter = writer + pcState.updateRateLimiter = rate.NewLimiter(rate.Every(captureStatusUpdatePeriod), 1) + c.runningPacketCaptures[pcState.tag] = pcState + return nil + } + err = f() + if err != nil { + return err + } + + timeout := crdv1alpha1.DefaultPacketCaptureTimeout + if pc.Spec.Timeout != nil { + timeout = *pc.Spec.Timeout + } + klog.V(2).InfoS("Installing flow entries for PacketCapture", "name", pc.Name) + err = c.ofClient.InstallPacketCaptureFlows(pcState.tag, receiverOnly, senderPacket, endpointPackets, ofPort, timeout) + if err != nil { + klog.ErrorS(err, "Install flow entries failed for the PacketCapture", "name", pc.Name) + } + return err +} + +// genEndpointMatchPackets generates match packets (with destination Endpoint's IP/Port info) besides the normal match packet. +// these match packets will help the pipeline to capture the pod -> svc traffic. +// TODO: 1. support name based port name 2. dual-stack support +func (c *Controller) genEndpointMatchPackets(pc *crdv1alpha1.PacketCapture) ([]binding.Packet, error) { + var port int32 + if pc.Spec.Packet.TransportHeader.TCP != nil && pc.Spec.Packet.TransportHeader.TCP.DstPort != nil { + port = *pc.Spec.Packet.TransportHeader.TCP.DstPort + } else if pc.Spec.Packet.TransportHeader.UDP != nil && pc.Spec.Packet.TransportHeader.UDP.DstPort != nil { + port = *pc.Spec.Packet.TransportHeader.UDP.DstPort + } + var packets []binding.Packet + dstSvc, err := c.serviceLister.Services(pc.Spec.Destination.Service.Namespace).Get(pc.Spec.Destination.Service.Name) + if err != nil { + return nil, err + } + for _, item := range dstSvc.Spec.Ports { + if item.Port == port { + if item.TargetPort.Type == intstr.Int { + port = item.TargetPort.IntVal + } + } + } + dstEndpoint, err := c.endpointLister.Endpoints(pc.Spec.Destination.Service.Namespace).Get(pc.Spec.Destination.Service.Name) + if err != nil { + return nil, err + } + for _, item := range dstEndpoint.Subsets[0].Addresses { + packet := binding.Packet{} + packet.DestinationIP = net.ParseIP(item.IP) + if port != 0 { + packet.DestinationPort = uint16(port) + } + packet.IPProto = parseTargetProto(pc.Spec.Packet) + packets = append(packets, packet) + } + return packets, nil +} + +func (c *Controller) preparePacket(pc *crdv1alpha1.PacketCapture, intf *interfacestore.InterfaceConfig, receiverOnly bool) (*binding.Packet, error) { + packet := new(binding.Packet) + if pc.Spec.Packet == nil { + pc.Spec.Packet = &crdv1alpha1.Packet{} + } + packet.IsIPv6 = pc.Spec.Packet.IPFamily == v1.IPv6Protocol + + if receiverOnly { + if pc.Spec.Source.IP != nil { + packet.SourceIP = net.ParseIP(*pc.Spec.Source.IP) + } + packet.DestinationMAC = intf.MAC + } else if pc.Spec.Destination.IP != nil { + packet.DestinationIP = net.ParseIP(*pc.Spec.Destination.IP) + } else if pc.Spec.Destination.Pod != nil { + dstPodInterfaces := c.interfaceStore.GetContainerInterfacesByPod(pc.Spec.Destination.Pod.Name, pc.Spec.Destination.Pod.Namespace) + if len(dstPodInterfaces) > 0 { + if packet.IsIPv6 { + packet.DestinationIP = dstPodInterfaces[0].GetIPv6Addr() + } else { + packet.DestinationIP = dstPodInterfaces[0].GetIPv4Addr() + } + } else { + dstPod, err := c.kubeClient.CoreV1().Pods(pc.Spec.Destination.Pod.Namespace).Get(context.TODO(), pc.Spec.Destination.Pod.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get the destination pod %s/%s: %v", pc.Spec.Destination.Pod.Namespace, pc.Spec.Destination.Pod.Name, err) + } + podIPs := make([]net.IP, len(dstPod.Status.PodIPs)) + for i, ip := range dstPod.Status.PodIPs { + podIPs[i] = net.ParseIP(ip.IP) + } + if packet.IsIPv6 { + packet.DestinationIP, _ = util.GetIPWithFamily(podIPs, util.FamilyIPv6) + } else { + packet.DestinationIP = util.GetIPv4Addr(podIPs) + } + } + if packet.DestinationIP == nil { + if packet.IsIPv6 { + return nil, errors.New("destination Pod does not have an IPv6 address") + } + return nil, errors.New("destination Pod does not have an IPv4 address") + } + } else if pc.Spec.Destination.Service != nil { + dstSvc, err := c.serviceLister.Services(pc.Spec.Destination.Service.Namespace).Get(pc.Spec.Destination.Service.Name) + if err != nil { + return nil, fmt.Errorf("failed to get the destination service %s/%s: %v", pc.Spec.Destination.Service.Namespace, pc.Spec.Destination.Service.Name, err) + } + if dstSvc.Spec.ClusterIP == "" { + return nil, errors.New("the destination Service does not have a ClusterIP") + } + + packet.DestinationIP = net.ParseIP(dstSvc.Spec.ClusterIP) + if !packet.IsIPv6 { + packet.DestinationIP = packet.DestinationIP.To4() + if packet.DestinationIP == nil { + return nil, errors.New("the destination Service does not have an IPv4 address") + } + } else if packet.DestinationIP.To4() != nil { + return nil, errors.New("the destination Service does not have an IPv6 address") + } + } else { + return nil, errors.New("the destination is not specified") + } + + if pc.Spec.Packet.TransportHeader.TCP != nil { + if pc.Spec.Packet.TransportHeader.TCP.SrcPort != nil { + packet.SourcePort = uint16(*pc.Spec.Packet.TransportHeader.TCP.SrcPort) + } + if pc.Spec.Packet.TransportHeader.TCP.DstPort != nil { + packet.DestinationPort = uint16(*pc.Spec.Packet.TransportHeader.TCP.DstPort) + } + if pc.Spec.Packet.TransportHeader.TCP.Flags != nil { + packet.TCPFlags = uint8(*pc.Spec.Packet.TransportHeader.TCP.Flags) + } + } else if pc.Spec.Packet.TransportHeader.UDP != nil { + if pc.Spec.Packet.TransportHeader.UDP.SrcPort != nil { + packet.SourcePort = uint16(*pc.Spec.Packet.TransportHeader.UDP.SrcPort) + } + if pc.Spec.Packet.TransportHeader.UDP.DstPort != nil { + packet.DestinationPort = uint16(*pc.Spec.Packet.TransportHeader.UDP.DstPort) + } + } + packet.IPProto = parseTargetProto(pc.Spec.Packet) + return packet, nil +} + +func parseTargetProto(packet *crdv1alpha1.Packet) uint8 { + inputProto := packet.Protocol + if inputProto == nil { + if packet.IPFamily == v1.IPv4Protocol { + return protocol.Type_ICMP + } else { + return protocol.Type_IPv6ICMP + } + } + if inputProto.Type == intstr.Int { + return uint8(inputProto.IntVal) + } + + if inputProto.StrVal == "TCP" { + return protocol.Type_TCP + } else if inputProto.StrVal == "ICMP" { + return protocol.Type_ICMP + } else if inputProto.StrVal == "UDP" { + return protocol.Type_UDP + } else { + return protocol.Type_IPv6ICMP + } +} + +func (c *Controller) syncPacketCapture(pcName string) error { + startTime := time.Now() + defer func() { + klog.V(4).InfoS("Finished syncing PacketCapture", "name", pcName, "startTime", time.Since(startTime)) + }() + + pc, err := c.packetCaptureLister.Get(pcName) + if err != nil { + if apierrors.IsNotFound(err) { + c.cleanupPacketCapture(pcName) + return nil + } + return err + } + + switch pc.Status.Phase { + case "": + err = c.initPacketCapture(pc) + case crdv1alpha1.PacketCaptureRunning: + err = c.checkPacketCaptureStatus(pc) + default: + c.cleanupPacketCapture(pcName) + } + return err + +} + +// Allocates a tag. If the PacketCapture request has been allocated with a tag +// already, 0 is returned. If number of existing PacketCapture requests reaches +// the upper limit, an error is returned. +func (c *Controller) allocateTag(name string) (uint8, error) { + c.runningPacketCapturesMutex.Lock() + defer c.runningPacketCapturesMutex.Unlock() + + for _, state := range c.runningPacketCaptures { + if state != nil && state.name == name { + // The packetcapture request has been processed already. + return 0, nil + } + } + for i := minTagNum; i <= maxTagNum; i += 1 { + if _, ok := c.runningPacketCaptures[i]; !ok { + c.runningPacketCaptures[i] = &packetCaptureState{ + name: name, + tag: i, + } + return i, nil + } + } + return 0, fmt.Errorf("the number of on-going PacketCapture operations already reached the upper limit: %d", maxTagNum) +} + +func (c *Controller) getUploaderByProtocol(protocol StorageProtocolType) (ftp.Uploader, error) { + if protocol == sftpProtocol { + return c.sftpUploader, nil + } + return nil, fmt.Errorf("unsupported protocol %s", protocol) +} + +func (c *Controller) generatePacketsPathForServer(name string) string { + return name + ".pcapng" +} + +func getDefaultFileServerAuth() *crdv1alpha1.BundleServerAuthConfiguration { + return &crdv1alpha1.BundleServerAuthConfiguration{ + AuthType: crdv1alpha1.BasicAuthentication, + AuthSecret: &v1.SecretReference{ + Name: fileServerAuthSecretName, + Namespace: fileServerAuthSecretNamespace, + }, + } +} + +func (c *Controller) uploadPackets(pc *crdv1alpha1.PacketCapture, outputFile afero.File) error { + if pc.Spec.FileServer == nil { + klog.V(2).Info("No fileserver info found in PacketCapture, skip upload packets file") + return nil + } + klog.V(2).InfoS("Uploading captured packets for PacketCapture", "name", pc.Name) + uploader, err := c.getUploaderByProtocol(sftpProtocol) + if err != nil { + return fmt.Errorf("failed to upload support bundle while getting uploader: %v", err) + } + authConfig := getDefaultFileServerAuth() + serverAuth, err := ftp.ParseBundleAuth(*authConfig, c.kubeClient) + if err != nil { + klog.ErrorS(err, "Failed to get authentication for the fileServer", "name", pc.Name, "authentication", authConfig) + return err + } + cfg := ftp.GenSSHClientConfig(serverAuth.BasicAuthentication.Username, serverAuth.BasicAuthentication.Password) + return uploader.Upload(pc.Spec.FileServer.URL, c.generatePacketsPathForServer(string(pc.UID)), cfg, outputFile) +} + +// initPacketCapture mark the PacketCapture as running and allocate tag for it, then start the capture. the tag will +// serve as a unique id for concurrent processing. +func (c *Controller) initPacketCapture(pc *crdv1alpha1.PacketCapture) error { + tag, err := c.allocateTag(pc.Name) + if err != nil { + return err + } + if tag == 0 { + return nil + } + err = c.updatePacketCaptureStatus(pc, crdv1alpha1.PacketCaptureRunning, "", 0) + if err != nil { + c.deallocateTag(pc.Name, tag) + return err + } + return c.startPacketCapture(pc, c.runningPacketCaptures[tag]) +} + +func (c *Controller) updatePacketCaptureStatus(pc *crdv1alpha1.PacketCapture, phase crdv1alpha1.PacketCapturePhase, reason string, numCapturedPackets int32) error { + latestPC, err := c.packetCaptureLister.Get(pc.Name) + if err != nil { + return fmt.Errorf("get PacketCapture failed: %w", err) + } + type PacketCapture struct { + Status crdv1alpha1.PacketCaptureStatus `json:"status,omitempty"` + } + patchData := PacketCapture{Status: crdv1alpha1.PacketCaptureStatus{Phase: phase}} + if phase == crdv1alpha1.PacketCaptureRunning && pc.Status.StartTime == nil { + t := metav1.Now() + patchData.Status.StartTime = &t + } + if reason != "" { + patchData.Status.Reason = reason + } + if numCapturedPackets != 0 { + patchData.Status.NumCapturedPackets = &numCapturedPackets + } + patchData.Status.PacketsFilePath = latestPC.Status.PacketsFilePath + payloads, _ := json.Marshal(patchData) + _, err = c.crdClient.CrdV1alpha1().PacketCaptures().Patch(context.TODO(), pc.Name, types.MergePatchType, payloads, metav1.PatchOptions{}, "status") + return err +} + +// we also support only store the packets file in container, so add pod name here for users to +// know which pod the file is located. +func (c *Controller) setPacketsFilePathStatus(name, uid string) error { + type PacketCapture struct { + Status crdv1alpha1.PacketCaptureStatus `json:"status,omitempty"` + } + patchData := PacketCapture{ + Status: crdv1alpha1.PacketCaptureStatus{ + PacketsFilePath: os.Getenv("POD_NAME") + ":" + uidToPath(uid), + }, + } + payloads, _ := json.Marshal(patchData) + _, err := c.crdClient.CrdV1alpha1().PacketCaptures().Patch(context.TODO(), name, types.MergePatchType, payloads, metav1.PatchOptions{}, "status") + return err +} + +func (c *Controller) deallocateTag(name string, tag uint8) { + c.runningPacketCapturesMutex.Lock() + defer c.runningPacketCapturesMutex.Unlock() + if state, ok := c.runningPacketCaptures[tag]; ok { + if state != nil && name == state.name { + delete(c.runningPacketCaptures, tag) + } + } +} + +func (c *Controller) getTagForPacketCapture(name string) uint8 { + c.runningPacketCapturesMutex.RLock() + defer c.runningPacketCapturesMutex.RUnlock() + for tag, state := range c.runningPacketCaptures { + if state != nil && state.name == name { + // The packetcapture request has been processed already. + return tag + } + } + return 0 +} + +// checkPacketCaptureStatus is only called for PacketCaptures in the Running phase +func (c *Controller) checkPacketCaptureStatus(pc *crdv1alpha1.PacketCapture) error { + tag := c.getTagForPacketCapture(pc.Name) + if tag == 0 { + return nil + } + if checkPacketCaptureSucceeded(pc) { + c.deallocateTag(pc.Name, tag) + return c.updatePacketCaptureStatus(pc, crdv1alpha1.PacketCaptureSucceeded, "", 0) + } + + if isPacketCaptureTimeout(pc) { + c.deallocateTag(pc.Name, tag) + return c.updatePacketCaptureStatus(pc, crdv1alpha1.PacketCaptureFailed, captureTimeoutReason, 0) + } + return nil +} + +func checkPacketCaptureSucceeded(pc *crdv1alpha1.PacketCapture) bool { + succeeded := false + cfg := pc.Spec.CaptureConfig.FirstN + captured := pc.Status.NumCapturedPackets + if cfg != nil && captured != nil && *captured == cfg.Number { + succeeded = true + } + return succeeded +} + +func isPacketCaptureTimeout(pc *crdv1alpha1.PacketCapture) bool { + var timeout time.Duration + if pc.Spec.Timeout != nil { + timeout = time.Duration(*pc.Spec.Timeout) * time.Second + } else { + timeout = defaultTimeoutDuration + } + var startTime time.Time + if pc.Status.StartTime != nil { + startTime = pc.Status.StartTime.Time + } else { + klog.V(2).InfoS("StartTime field in PacketCapture Status should not be empty", "PacketCapture", klog.KObj(pc)) + startTime = pc.CreationTimestamp.Time + } + return startTime.Add(timeout).Before(time.Now()) +} diff --git a/pkg/agent/controller/packetcapture/packetcapture_controller_test.go b/pkg/agent/controller/packetcapture/packetcapture_controller_test.go new file mode 100644 index 00000000000..ba738074064 --- /dev/null +++ b/pkg/agent/controller/packetcapture/packetcapture_controller_test.go @@ -0,0 +1,1105 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packetcapture + +import ( + "bytes" + "net" + "os" + "reflect" + "testing" + "time" + + "antrea.io/libOpenflow/protocol" + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/interfacestore" + openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" + "antrea.io/antrea/pkg/agent/util" + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + fakeversioned "antrea.io/antrea/pkg/client/clientset/versioned/fake" + crdinformers "antrea.io/antrea/pkg/client/informers/externalversions" + binding "antrea.io/antrea/pkg/ovs/openflow" + "antrea.io/antrea/pkg/util/k8s" +) + +var ( + pod1IPv4 = "192.168.10.10" + pod2IPv4 = "192.168.11.10" + ipv4 = "192.168.12.4" + ipv6 = "2001:db8::68" + service1IPv4 = "10.96.0.10" + dstIPv4 = "192.168.99.99" + pod1MAC, _ = net.ParseMAC("aa:bb:cc:dd:ee:0f") + pod2MAC, _ = net.ParseMAC("aa:bb:cc:dd:ee:00") + ofPortPod1 = uint32(1) + ofPortPod2 = uint32(2) + testTCPFlags = int32(11) + icmp6Proto = intstr.FromInt(58) + icmpProto = intstr.FromString("ICMP") + port80 int32 = 80 + port81 int32 = 81 + + pod1 = v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: "default", + }, + Status: v1.PodStatus{ + PodIP: pod1IPv4, + }, + } + pod2 = v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2", + Namespace: "default", + }, + Status: v1.PodStatus{ + PodIP: pod2IPv4, + }, + } + pod3 = v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3", + Namespace: "default", + }, + } + + secret1 = v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fileServerAuthSecretName, + Namespace: fileServerAuthSecretNamespace, + }, + Data: map[string][]byte{ + "username": []byte("username"), + "password": []byte("password"), + }, + } + + service1 = v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-1", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + ClusterIP: service1IPv4, + }, + } +) + +type fakePacketCaptureController struct { + *Controller + kubeClient kubernetes.Interface + mockController *gomock.Controller + mockOFClient *openflowtest.MockClient + crdClient *fakeversioned.Clientset + crdInformerFactory crdinformers.SharedInformerFactory + informerFactory informers.SharedInformerFactory +} + +func newFakePacketCaptureController(t *testing.T, runtimeObjects []runtime.Object, initObjects []runtime.Object, nodeConfig *config.NodeConfig) *fakePacketCaptureController { + controller := gomock.NewController(t) + objs := []runtime.Object{ + &pod1, + &pod2, + &pod3, + &service1, + &secret1, + } + objs = append(objs, generateTestSecret()) + if runtimeObjects != nil { + objs = append(objs, runtimeObjects...) + } + kubeClient := fake.NewSimpleClientset(objs...) + mockOFClient := openflowtest.NewMockClient(controller) + crdClient := fakeversioned.NewSimpleClientset(initObjects...) + crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0) + packetCaptureInformer := crdInformerFactory.Crd().V1alpha1().PacketCaptures() + informerFactory := informers.NewSharedInformerFactory(kubeClient, 0) + serviceInformer := informerFactory.Core().V1().Services() + endpointInformer := informerFactory.Core().V1().Endpoints() + + ifaceStore := interfacestore.NewInterfaceStore() + addPodInterface(ifaceStore, pod1.Namespace, pod1.Name, pod1IPv4, pod1MAC.String(), int32(ofPortPod1)) + addPodInterface(ifaceStore, pod2.Namespace, pod2.Name, pod2IPv4, pod2MAC.String(), int32(ofPortPod2)) + + mockOFClient.EXPECT().RegisterPacketInHandler(gomock.Any(), gomock.Any()).Times(1) + pcController := NewPacketCaptureController( + kubeClient, + crdClient, + serviceInformer, + endpointInformer, + packetCaptureInformer, + mockOFClient, + ifaceStore, + nodeConfig, + ) + pcController.sftpUploader = &testUploader{} + + return &fakePacketCaptureController{ + Controller: pcController, + kubeClient: kubeClient, + mockController: controller, + mockOFClient: mockOFClient, + crdClient: crdClient, + crdInformerFactory: crdInformerFactory, + informerFactory: informerFactory, + } +} + +func addPodInterface(ifaceStore interfacestore.InterfaceStore, podNamespace, podName, podIP, podMac string, ofPort int32) { + containerName := k8s.NamespacedName(podNamespace, podName) + ifIPs := []net.IP{net.ParseIP(podIP)} + mac, _ := net.ParseMAC(podMac) + ifaceStore.AddInterface(&interfacestore.InterfaceConfig{ + IPs: ifIPs, + MAC: mac, + InterfaceName: util.GenerateContainerInterfaceName(podName, podNamespace, containerName), + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{PodName: podName, PodNamespace: podNamespace, ContainerID: containerName}, + OVSPortConfig: &interfacestore.OVSPortConfig{OFPort: ofPort}, + }) +} + +func TestErrPacketCaptureCRD(t *testing.T) { + pc := &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pc", + UID: "uid", + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 12, + }, + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv4Protocol, + Protocol: &icmpProto, + }, + }, + Status: crdv1alpha1.PacketCaptureStatus{ + Phase: crdv1alpha1.PacketCaptureRunning, + }, + } + expectedPC := pc + reason := "failed" + expectedPC.Status.Phase = crdv1alpha1.PacketCaptureFailed + expectedPC.Status.Reason = reason + + pcc := newFakePacketCaptureController(t, nil, []runtime.Object{pc}, nil) + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + + err := pcc.updatePacketCaptureStatus(pc, crdv1alpha1.PacketCaptureFailed, reason, 0) + require.NoError(t, err) +} + +func TestPreparePacket(t *testing.T) { + pcs := []struct { + name string + pc *crdv1alpha1.PacketCapture + intf *interfacestore.InterfaceConfig + receiverOnly bool + expectedPacket *binding.Packet + expectedErr string + }{ + { + name: "empty destination", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc2", UID: "uid2"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + }, + }, + expectedErr: "destination is not specified", + }, + { + name: "ipv4 tcp packet", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc3", UID: "uid3"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "TCP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + SrcPort: &port80, + DstPort: &port81, + Flags: &testTCPFlags, + }, + }, + }, + }, + }, + expectedPacket: &binding.Packet{ + DestinationIP: net.ParseIP(pod2IPv4), + IPProto: protocol.Type_TCP, + SourcePort: 80, + DestinationPort: 81, + TCPFlags: 11, + }, + }, + { + name: "receiver only with source ip", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc4", UID: "uid4"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + IP: &ipv4, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv4Protocol, + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "ICMP"}, + }, + }, + }, + receiverOnly: true, + expectedPacket: &binding.Packet{ + SourceIP: net.ParseIP("192.168.12.4"), + DestinationMAC: pod1MAC, + IPProto: 1, + }, + }, + { + name: "destination Pod without IPv6 address", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc4", UID: "uid4"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv6Protocol, + Protocol: &icmp6Proto, + }, + }, + }, + expectedErr: "destination Pod does not have an IPv6 address", + }, + { + name: "pod to ipv6 packet capture", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc5", UID: "uid5"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + IP: &ipv6, + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv6Protocol, + Protocol: &icmp6Proto, + }, + }, + }, + expectedPacket: &binding.Packet{ + IsIPv6: true, + DestinationIP: net.ParseIP("2001:db8::68"), + IPProto: protocol.Type_IPv6ICMP, + }, + }, + { + name: "tcp packet without flags", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc6", UID: "uid6"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "TCP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + SrcPort: &port80, + DstPort: &port81, + }, + }, + }, + }, + }, + expectedPacket: &binding.Packet{ + DestinationIP: net.ParseIP(pod2IPv4), + IPProto: protocol.Type_TCP, + SourcePort: 80, + DestinationPort: 81, + }, + }, + { + name: "udp packet", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc7", UID: "uid7"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "UDP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + UDP: &crdv1alpha1.UDPHeader{ + SrcPort: &port80, + DstPort: &port81, + }, + }, + }, + }, + }, + expectedPacket: &binding.Packet{ + DestinationIP: net.ParseIP(pod2IPv4), + IPProto: protocol.Type_UDP, + SourcePort: 80, + DestinationPort: 81, + }, + }, + { + name: "icmp packet", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc8", UID: "uid8"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "ICMP"}, + }, + }, + }, + expectedPacket: &binding.Packet{ + DestinationIP: net.ParseIP(pod2IPv4), + IPProto: protocol.Type_ICMP, + }, + }, + { + name: "destination Pod unavailable", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc11", UID: "uid11"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Name: "unknown pod", + Namespace: "default", + }, + }, + }, + }, + expectedErr: "failed to get the destination pod default/unknown pod: pods \"unknown pod\"", + }, + { + name: "to service packet", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc12", UID: "uid12"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Service: &crdv1alpha1.ServiceReference{ + Name: service1.Name, + Namespace: service1.Namespace, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "TCP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + SrcPort: &port80, + DstPort: &port81, + Flags: &testTCPFlags, + }, + }, + }, + }, + }, + expectedPacket: &binding.Packet{ + DestinationIP: net.ParseIP(service1IPv4).To4(), + IPProto: protocol.Type_TCP, + SourcePort: 80, + DestinationPort: 81, + TCPFlags: 11, + }, + }, + } + for _, pc := range pcs { + t.Run(pc.name, func(t *testing.T) { + pcc := newFakePacketCaptureController(t, nil, []runtime.Object{pc.pc}, nil) + podInterfaces := pcc.interfaceStore.GetContainerInterfacesByPod(pod1.Name, pod1.Namespace) + if pc.intf != nil { + podInterfaces[0] = pc.intf + } + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + pcc.informerFactory.Start(stopCh) + pcc.informerFactory.WaitForCacheSync(stopCh) + + pkt, err := pcc.preparePacket(pc.pc, podInterfaces[0], pc.receiverOnly) + if pc.expectedErr == "" { + require.NoError(t, err) + assert.Equal(t, pc.expectedPacket, pkt) + } else { + assert.ErrorContains(t, err, pc.expectedErr) + assert.Nil(t, pkt) + } + }) + } +} + +func TestSyncPacketCapture(t *testing.T) { + // create test os + defaultFS = afero.NewMemMapFs() + defaultFS.MkdirAll("/tmp/antrea/packetcapture/packets", 0755) + file, err := defaultFS.Create(uidToPath(testUID)) + if err != nil { + t.Fatal("create pcapng file error: ", err) + } + + testWriter, err := pcapgo.NewNgWriter(file, layers.LinkTypeEthernet) + if err != nil { + t.Fatal("create test pcapng writer failed: ", err) + } + + pcs := []struct { + name string + pc *crdv1alpha1.PacketCapture + existingState *packetCaptureState + newState *packetCaptureState + expectedCalls func(mockOFClient *openflowtest.MockClient) + }{ + { + name: "start packetcapture", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid1"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + }, + }, + existingState: &packetCaptureState{ + name: "pc1", + tag: 1, + }, + newState: &packetCaptureState{ + name: "pc1", + tag: 1, + }, + }, + + { + name: "packetcapture in failed phase", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: types.UID(testUID)}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + }, + Status: crdv1alpha1.PacketCaptureStatus{ + Phase: crdv1alpha1.PacketCaptureFailed, + }, + }, + existingState: &packetCaptureState{ + name: "pc1", + pcapngFile: file, + pcapngWriter: testWriter, + tag: 1, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().UninstallPacketCaptureFlows(uint8(1)) + }, + }, + } + + for _, pc := range pcs { + t.Run(pc.name, func(t *testing.T) { + pcc := newFakePacketCaptureController(t, nil, []runtime.Object{pc.pc}, nil) + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + + if pc.existingState != nil { + pcc.runningPacketCaptures[pc.existingState.tag] = pc.existingState + } + + if pc.expectedCalls != nil { + pc.expectedCalls(pcc.mockOFClient) + } + + err := pcc.syncPacketCapture(pc.pc.Name) + require.NoError(t, err) + assert.Equal(t, pc.newState, pcc.runningPacketCaptures[pc.existingState.tag]) + }) + } +} + +// TestPacketCaptureControllerRun was used to validate the whole run process is working. It doesn't wait for +// the testing pc to finish. +func TestPacketCaptureControllerRun(t *testing.T) { + // create test os + defaultFS = afero.NewMemMapFs() + defaultFS.MkdirAll("/tmp/antrea/packetcapture/packets", 0755) + pc := struct { + name string + pc *crdv1alpha1.PacketCapture + newState *packetCaptureState + }{ + name: "start packetcapture", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid1"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &icmpProto, + }, + }, + }, + newState: &packetCaptureState{tag: 1}, + } + + pcc := newFakePacketCaptureController(t, nil, []runtime.Object{pc.pc}, nil) + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + pcc.informerFactory.Start(stopCh) + pcc.informerFactory.WaitForCacheSync(stopCh) + pcc.mockOFClient.EXPECT().InstallPacketCaptureFlows(pc.newState.tag, false, + &binding.Packet{DestinationIP: net.ParseIP(pod2.Status.PodIP), IPProto: protocol.Type_ICMP}, + nil, ofPortPod1, crdv1alpha1.DefaultPacketCaptureTimeout) + go pcc.Run(stopCh) + time.Sleep(300 * time.Millisecond) +} + +func TestProcessPacketCaptureItem(t *testing.T) { + // create test os + defaultFS = afero.NewMemMapFs() + defaultFS.MkdirAll("/tmp/antrea/packetcapture/packets", 0755) + pc := struct { + pc *crdv1alpha1.PacketCapture + ofPort uint32 + receiverOnly bool + packet *binding.Packet + expected bool + }{ + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid1"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &icmpProto, + }, + }, + }, + ofPort: ofPortPod1, + packet: &binding.Packet{ + DestinationIP: net.ParseIP(pod2IPv4), + IPProto: 1, + }, + expected: true, + } + + pcc := newFakePacketCaptureController(t, nil, []runtime.Object{pc.pc}, nil) + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + + pcc.mockOFClient.EXPECT().InstallPacketCaptureFlows(uint8(1), pc.receiverOnly, pc.packet, nil, pc.ofPort, crdv1alpha1.DefaultPacketCaptureTimeout) + pcc.enqueuePacketCapture(pc.pc) + got := pcc.processPacketCaptureItem() + assert.Equal(t, pc.expected, got) +} + +func TestStartPacketCapture(t *testing.T) { + defaultFS = afero.NewMemMapFs() + defaultFS.MkdirAll(packetDirectory, 0755) + tcs := []struct { + name string + pc *crdv1alpha1.PacketCapture + state *packetCaptureState + ofPort uint32 + receiverOnly bool + packet *binding.Packet + expectedCalls func(mockOFClient *openflowtest.MockClient) + nodeConfig *config.NodeConfig + expectedErr string + expectedErrLog string + }{ + { + name: "Pod-to-Pod PacketCapture", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid1"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod2.Namespace, + Name: pod2.Name, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv4Protocol, + }, + }, + + Status: crdv1alpha1.PacketCaptureStatus{ + Phase: crdv1alpha1.PacketCaptureRunning, + }, + }, + state: &packetCaptureState{tag: 1}, + ofPort: ofPortPod1, + packet: &binding.Packet{ + SourceIP: net.ParseIP(pod1IPv4), + SourceMAC: pod1MAC, + DestinationIP: net.ParseIP(pod2IPv4), + DestinationMAC: pod2MAC, + IPProto: 1, + TTL: 64, + ICMPType: 8, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallPacketCaptureFlows(uint8(1), false, + &binding.Packet{ + DestinationIP: net.ParseIP(pod2IPv4), + IPProto: 1, + }, + nil, ofPortPod1, crdv1alpha1.DefaultPacketCaptureTimeout) + }, + }, + { + name: "Pod-to-IPv4 packetcapture", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid2"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + IP: &dstIPv4, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv4Protocol, + }, + }, + Status: crdv1alpha1.PacketCaptureStatus{ + Phase: crdv1alpha1.PacketCaptureRunning, + }, + }, + state: &packetCaptureState{tag: 2}, + ofPort: ofPortPod1, + packet: &binding.Packet{ + SourceIP: net.ParseIP(pod1IPv4), + SourceMAC: pod1MAC, + DestinationIP: net.ParseIP(dstIPv4), + IPProto: 1, + TTL: 64, + ICMPType: 8, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallPacketCaptureFlows(uint8(2), false, &binding.Packet{ + DestinationIP: net.ParseIP(dstIPv4), + IPProto: 1, + }, nil, ofPortPod1, crdv1alpha1.DefaultPacketCaptureTimeout) + }, + }, + } + + for _, tt := range tcs { + t.Run(tt.name, func(t *testing.T) { + tfc := newFakePacketCaptureController(t, nil, []runtime.Object{tt.pc}, tt.nodeConfig) + if tt.expectedCalls != nil { + tt.expectedCalls(tfc.mockOFClient) + } + + bufWriter := bytes.NewBuffer(nil) + klog.SetOutput(bufWriter) + klog.LogToStderr(false) + defer func() { + klog.SetOutput(os.Stderr) + klog.LogToStderr(true) + }() + + err := tfc.startPacketCapture(tt.pc, tt.state) + if tt.expectedErr != "" { + assert.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + if tt.expectedErrLog != "" { + assert.Contains(t, bufWriter.String(), tt.expectedErrLog) + } + }) + } +} + +func TestPrepareEndpointsPackets(t *testing.T) { + pcs := []struct { + name string + pc *crdv1alpha1.PacketCapture + expectedPackets []binding.Packet + objs []runtime.Object + expectedErr string + }{ + { + name: "svc-not-exist", + expectedErr: "service \"svc1\" not found", + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid2"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Service: &crdv1alpha1.ServiceReference{ + Name: "svc1", + Namespace: pod1.Namespace, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "TCP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + DstPort: &port80, + }, + }, + }, + }, + }, + }, + { + name: "ep-not-exist", + expectedErr: "endpoints \"svc1\" not found", + objs: []runtime.Object{&v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pod1.Namespace, + Name: "svc1", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8080, + }, + }, + }, + }, + }}, + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc2", UID: "uid2"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Service: &crdv1alpha1.ServiceReference{ + Namespace: pod1.Namespace, + Name: "svc1", + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "TCP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + DstPort: &port80, + }, + }, + }, + }, + }, + }, + { + name: "tcp-2-backends-svc", + expectedPackets: []binding.Packet{ + { + DestinationIP: net.ParseIP(pod1.Status.PodIP), + DestinationPort: 8080, + IPProto: protocol.Type_TCP, + }, + { + DestinationIP: net.ParseIP(pod2.Status.PodIP), + DestinationPort: 8080, + IPProto: protocol.Type_TCP, + }, + }, + objs: []runtime.Object{&v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pod1.Namespace, + Name: "svc1", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8080, + }, + }, + }, + }, + }, &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pod1.Namespace, + Name: "svc1", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: pod1.Status.PodIP, + }, + { + IP: pod2.Status.PodIP, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "http", + Port: 8080, + }, + }, + }, + }, + }}, + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{Name: "pc1", UID: "uid1"}, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: pod1.Namespace, + Name: pod1.Name, + }, + }, + Destination: crdv1alpha1.Destination{ + Service: &crdv1alpha1.ServiceReference{ + Name: "svc1", + Namespace: pod1.Namespace, + }, + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &intstr.IntOrString{Type: intstr.String, StrVal: "TCP"}, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + DstPort: &port80, + }, + }, + }, + }, + }, + }, + } + + for _, pc := range pcs { + t.Run(pc.name, func(t *testing.T) { + pcc := newFakePacketCaptureController(t, pc.objs, []runtime.Object{pc.pc}, nil) + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + pcc.informerFactory.Start(stopCh) + pcc.informerFactory.WaitForCacheSync(stopCh) + + pkts, err := pcc.genEndpointMatchPackets(pc.pc) + if pc.expectedErr == "" { + require.NoError(t, err) + if !reflect.DeepEqual(pc.expectedPackets, pkts) { + t.Errorf("expected packets: %+v, got: %+v", pc.expectedPackets, pkts) + } + + } else { + assert.ErrorContains(t, err, pc.expectedErr) + assert.Nil(t, pkts) + } + }) + } +} diff --git a/pkg/agent/controller/packetcapture/packetin.go b/pkg/agent/controller/packetcapture/packetin.go new file mode 100644 index 00000000000..0149d3449b0 --- /dev/null +++ b/pkg/agent/controller/packetcapture/packetin.go @@ -0,0 +1,109 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packetcapture + +import ( + "fmt" + "time" + + "antrea.io/libOpenflow/util" + "antrea.io/ofnet/ofctrl" + "github.com/google/gopacket" + "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/agent/openflow" + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" +) + +// HandlePacketIn processes PacketIn messages from the OFSwitch. If the register value match, it will be counted and captured. +// Once the total number reaches the target, the PacketCapture will be marked as Succeed. +func (c *Controller) HandlePacketIn(pktIn *ofctrl.PacketIn) error { + klog.V(4).InfoS("PacketIn for PacketCapture", "PacketIn", pktIn.PacketIn) + captureState, captureFinished, err := c.parsePacketIn(pktIn) + if err != nil { + return fmt.Errorf("parsePacketIn error: %w", err) + } + if captureFinished { + return nil + } + rawData := pktIn.Data.(*util.Buffer).Bytes() + ci := gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(rawData), + Length: len(rawData), + } + err = captureState.pcapngWriter.WritePacket(ci, rawData) + if err != nil { + return fmt.Errorf("couldn't write packet: %w", err) + } + reachTarget := captureState.numCapturedPackets == captureState.maxNumCapturedPackets + // use rate limiter to reduce the times we need to update status. + if reachTarget || captureState.updateRateLimiter.Allow() { + pc, err := c.packetCaptureLister.Get(captureState.name) + if err != nil { + return fmt.Errorf("get PacketCapture failed: %w", err) + } + // if reach the target. flush the file and upload it. + if reachTarget { + if err := captureState.pcapngWriter.Flush(); err != nil { + return err + } + if err := c.uploadPackets(pc, captureState.pcapngFile); err != nil { + return err + } + if err := c.setPacketsFilePathStatus(pc.Name, string(pc.UID)); err != nil { + return err + } + } + err = c.updatePacketCaptureStatus(pc, crdv1alpha1.PacketCaptureRunning, "", captureState.numCapturedPackets) + if err != nil { + return fmt.Errorf("failed to update the PacketCapture: %w", err) + } + klog.InfoS("Updated PacketCapture", "PacketCapture", klog.KObj(pc), "numCapturedPackets", captureState.numCapturedPackets) + } + return nil +} + +// parsePacketIn parses the packet-in message. If the value in register match with existing PacketCapture's state(tag), +// it will be counted. If the total count reach the target, the ovs flow will be uninstalled. +func (c *Controller) parsePacketIn(pktIn *ofctrl.PacketIn) (_ *packetCaptureState, captureFinished bool, _ error) { + var tag uint8 + matchers := pktIn.GetMatches() + match := openflow.GetMatchFieldByRegID(matchers, openflow.PacketCaptureMark.GetRegID()) + if match != nil { + value, err := openflow.GetInfoInReg(match, openflow.PacketCaptureMark.GetRange().ToNXRange()) + if err != nil { + return nil, false, fmt.Errorf("failed to get PacketCapture tag from packet-in message: %w", err) + } + tag = uint8(value) + } + c.runningPacketCapturesMutex.Lock() + defer c.runningPacketCapturesMutex.Unlock() + pcState, exists := c.runningPacketCaptures[tag] + if !exists { + return nil, false, fmt.Errorf("PacketCapture for dataplane tag %d not found in cache", tag) + } + if pcState.numCapturedPackets == pcState.maxNumCapturedPackets { + return nil, true, nil + } + pcState.numCapturedPackets++ + if pcState.numCapturedPackets == pcState.maxNumCapturedPackets { + err := c.ofClient.UninstallPacketCaptureFlows(tag) + if err != nil { + return nil, false, fmt.Errorf("uninstall PacketCapture ovs flow failed: %v", err) + } + } + return pcState, false, nil +} diff --git a/pkg/agent/controller/packetcapture/packetin_test.go b/pkg/agent/controller/packetcapture/packetin_test.go new file mode 100644 index 00000000000..fce008ba3e2 --- /dev/null +++ b/pkg/agent/controller/packetcapture/packetin_test.go @@ -0,0 +1,266 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packetcapture + +import ( + "context" + "encoding/binary" + "fmt" + "net" + "testing" + + "antrea.io/libOpenflow/openflow15" + "antrea.io/libOpenflow/protocol" + "antrea.io/libOpenflow/util" + "antrea.io/ofnet/ofctrl" + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ssh" + "golang.org/x/time/rate" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "antrea.io/antrea/pkg/agent/config" + openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" +) + +const ( + maxNum = 5 +) + +const ( + testTag = uint8(3) + testUID = "1-2-3-4" + testSFTPUrl = "sftp://127.0.0.1:22/root/packetcaptures" +) + +// generatePacketInMatchFromTag reverse the packetIn message/matcher -> REG4/tag value path +// to generate test matchers. It follows the following process: +// 1. shift bits to generate uint32, which represents data in REG4 and another REG (unrelated) +// 2. convert uint32 to bytes(bigEndian), which will be the Match value/mask. +// 3. generate MatchField from the bytes. +func generatePacketInMatchFromTag(tag uint8) *openflow15.MatchField { + value := uint32(tag) << 28 + regID := 4 + data := make([]byte, 8) + binary.BigEndian.PutUint32(data, value) + + m := openflow15.MatchField{ + Class: openflow15.OXM_CLASS_PACKET_REGS, + Field: uint8(regID / 2), + HasMask: false, + Value: &openflow15.ByteArrayField{Data: data}, + } + return &m +} + +func genMatchers() []openflow15.MatchField { + // m := generateMatch(openflow.PacketCaptureMark.GetRegID(), testTagData) + matchers := []openflow15.MatchField{*generatePacketInMatchFromTag(testTag)} + return matchers +} + +func getTestPacketBytes(dstIP string) []byte { + ipPacket := &protocol.IPv4{ + Version: 0x4, + IHL: 5, + Protocol: uint8(8), + Length: 20, + NWSrc: net.IP(pod1IPv4), + NWDst: net.IP(dstIP), + } + ethernetPkt := protocol.NewEthernet() + ethernetPkt.HWSrc = pod1MAC + ethernetPkt.Ethertype = protocol.IPv4_MSG + ethernetPkt.Data = ipPacket + pktBytes, _ := ethernetPkt.MarshalBinary() + return pktBytes +} + +func generateTestPCState(name string, pcapngFile afero.File, writer *pcapgo.NgWriter, num int32) *packetCaptureState { + return &packetCaptureState{ + name: name, + maxNumCapturedPackets: maxNum, + numCapturedPackets: num, + tag: testTag, + pcapngWriter: writer, + pcapngFile: pcapngFile, + shouldCapturePackets: true, + updateRateLimiter: rate.NewLimiter(rate.Every(captureStatusUpdatePeriod), 1), + } +} + +func generatePacketCapture(name string) *crdv1alpha1.PacketCapture { + return &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + UID: testUID, + }, + Status: crdv1alpha1.PacketCaptureStatus{}, + Spec: crdv1alpha1.PacketCaptureSpec{ + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: testSFTPUrl, + }, + }, + } +} + +func generateTestSecret() *v1.Secret { + return &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "AAA", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("AAA"), + "password": []byte("BBBCCC"), + }, + } +} + +type testUploader struct { + url string + fileName string +} + +func (uploader *testUploader) Upload(url string, fileName string, config *ssh.ClientConfig, outputFile afero.File) error { + if url != uploader.url { + return fmt.Errorf("expected url: %s for uploader, got: %s", uploader.url, url) + } + if fileName != uploader.fileName { + return fmt.Errorf("expected filename: %s for uploader, got: %s", uploader.fileName, fileName) + } + return nil +} + +func TestHandlePacketCapturePacketIn(t *testing.T) { + + invalidPktBytes := getTestPacketBytes("89.207.132.170") + pktBytesPodToPod := getTestPacketBytes(pod2IPv4) + + // create test os + defaultFS = afero.NewMemMapFs() + defaultFS.MkdirAll("/tmp/packetcapture/packets", 0755) + file, err := defaultFS.Create(uidToPath(testUID)) + if err != nil { + t.Fatal("create pcapng file error: ", err) + } + + testWriter, err := pcapgo.NewNgWriter(file, layers.LinkTypeEthernet) + if err != nil { + t.Fatal("create test pcapng writer failed: ", err) + } + + tests := []struct { + name string + networkConfig *config.NetworkConfig + nodeConfig *config.NodeConfig + pcState *packetCaptureState + pktIn *ofctrl.PacketIn + expectedPC *crdv1alpha1.PacketCapture + expectedErrStr string + expectedCalls func(mockOFClient *openflowtest.MockClient) + expectedNum int32 + expectedUploader *testUploader + }{ + { + name: "invalid packets", + pcState: generateTestPCState("pc-with-invalid-packet", nil, testWriter, 0), + expectedPC: generatePacketCapture("pc-with-invalid-packet"), + pktIn: &ofctrl.PacketIn{ + PacketIn: &openflow15.PacketIn{ + Data: util.NewBuffer(invalidPktBytes), + }, + }, + expectedErrStr: "parsePacketIn error: PacketCapture for dataplane tag 0 not found in cache", + }, + { + name: "not hitting target number", + pcState: generateTestPCState("pc-with-less-num", nil, testWriter, 1), + expectedPC: generatePacketCapture("pc-with-less-num"), + expectedNum: 2, + pktIn: &ofctrl.PacketIn{ + PacketIn: &openflow15.PacketIn{ + Data: util.NewBuffer(pktBytesPodToPod), + Match: openflow15.Match{ + Fields: genMatchers(), + }, + }, + }, + }, + { + name: "hit target number", + pcState: generateTestPCState("pc-with-max-num", file, testWriter, maxNum-1), + expectedPC: generatePacketCapture("pc-with-max-num"), + expectedNum: maxNum, + pktIn: &ofctrl.PacketIn{ + PacketIn: &openflow15.PacketIn{ + Data: util.NewBuffer(pktBytesPodToPod), + Match: openflow15.Match{ + Fields: genMatchers(), + }, + }, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().UninstallPacketCaptureFlows(testTag) + }, + expectedUploader: &testUploader{ + fileName: testUID + ".pcapng", + url: testSFTPUrl, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pcc := newFakePacketCaptureController(t, nil, []runtime.Object{tt.expectedPC}, &config.NodeConfig{Name: "node1"}) + if tt.expectedCalls != nil { + tt.expectedCalls(pcc.mockOFClient) + } + stopCh := make(chan struct{}) + defer close(stopCh) + pcc.crdInformerFactory.Start(stopCh) + pcc.crdInformerFactory.WaitForCacheSync(stopCh) + pcc.runningPacketCaptures[tt.pcState.tag] = tt.pcState + pcc.sftpUploader = tt.expectedUploader + + err := pcc.HandlePacketIn(tt.pktIn) + if err == nil { + assert.Equal(t, tt.expectedErrStr, "") + // check target num in status + pc, err := pcc.crdClient.CrdV1alpha1().PacketCaptures().Get(context.TODO(), tt.expectedPC.Name, metav1.GetOptions{}) + require.Nil(t, err) + assert.Equal(t, tt.expectedNum, *pc.Status.NumCapturedPackets) + } else { + assert.Equal(t, tt.expectedErrStr, err.Error()) + } + + }) + } +} diff --git a/pkg/agent/openflow/client.go b/pkg/agent/openflow/client.go index e6e77d0cebd..ac35d4c054f 100644 --- a/pkg/agent/openflow/client.go +++ b/pkg/agent/openflow/client.go @@ -234,9 +234,15 @@ type Client interface { // InstallTraceflowFlows installs flows for a Traceflow request. InstallTraceflowFlows(dataplaneTag uint8, liveTraffic, droppedOnly, receiverOnly bool, packet *binding.Packet, ofPort uint32, timeoutSeconds uint16) error + // InstallPacketCaptureFlows installs flows for a PacketCapture request. + InstallPacketCaptureFlows(dataplaneTag uint8, receiverOnly bool, packet *binding.Packet, endpointPackets []binding.Packet, ofPort uint32, timeoutSeconds uint16) error + // UninstallTraceflowFlows uninstalls flows for a Traceflow request. UninstallTraceflowFlows(dataplaneTag uint8) error + // UninstallPacketCaptureFlows uninstalls flows for a PacketCapture request. + UninstallPacketCaptureFlows(dataplaneTag uint8) error + // GetPolicyInfoFromConjunction returns the following policy information for the provided conjunction ID: // NetworkPolicy reference, OF priority, rule name, label // The boolean return value indicates whether the policy information was found. @@ -943,6 +949,7 @@ func (c *client) generatePipelines() { c.connectUplinkToBridge) c.activatedFeatures = append(c.activatedFeatures, c.featureService) c.traceableFeatures = append(c.traceableFeatures, c.featureService) + } if c.nodeType == config.ExternalNode { @@ -990,6 +997,11 @@ func (c *client) generatePipelines() { c.featureTraceflow = newFeatureTraceflow() c.activatedFeatures = append(c.activatedFeatures, c.featureTraceflow) + if c.enablePacketCapture { + c.featurePacketCapture = newFeaturePacketCapture(c.cookieAllocator, []binding.Protocol{binding.ProtocolIP, binding.ProtocolIPv6}, c.enableProxy, c.networkConfig, c.nodeConfig) + c.activatedFeatures = append(c.activatedFeatures, c.featurePacketCapture) + } + // Pipelines to generate. pipelineIDs := []binding.PipelineID{pipelineRoot, pipelineIP} if c.networkConfig.IPv4Enabled { @@ -1038,6 +1050,18 @@ func (c *client) generatePipelines() { } } +func (c *client) InstallPacketCaptureFlows(dataplaneTag uint8, receiverOnly bool, packet *binding.Packet, endpointPackets []binding.Packet, ofPort uint32, timeoutSeconds uint16) error { + cacheKey := fmt.Sprintf("%x", dataplaneTag) + flows := c.featurePacketCapture.genFlows(dataplaneTag, + c.ovsMetersAreSupported, + receiverOnly, + packet, + endpointPackets, + ofPort, + timeoutSeconds) + return c.addFlows(c.featurePacketCapture.cachedFlows, cacheKey, flows) +} + func (c *client) InstallSNATBypassServiceFlows(serviceCIDRs []*net.IPNet) error { var flows []binding.Flow for _, serviceCIDR := range serviceCIDRs { @@ -1255,6 +1279,11 @@ func (c *client) UninstallTraceflowFlows(dataplaneTag uint8) error { return c.deleteFlows(c.featureTraceflow.cachedFlows, cacheKey) } +func (c *client) UninstallPacketCaptureFlows(dataplaneTag uint8) error { + cacheKey := fmt.Sprintf("%x", dataplaneTag) + return c.deleteFlows(c.featurePacketCapture.cachedFlows, cacheKey) +} + // setBasePacketOutBuilder sets base IP properties of a packetOutBuilder which can have more packet data added. func setBasePacketOutBuilder(packetOutBuilder binding.PacketOutBuilder, srcMAC string, dstMAC string, srcIP string, dstIP string, inPort uint32, outPort uint32) (binding.PacketOutBuilder, error) { // Set ethernet header. diff --git a/pkg/agent/openflow/client_test.go b/pkg/agent/openflow/client_test.go index 658193b1220..ad4a2fc6928 100644 --- a/pkg/agent/openflow/client_test.go +++ b/pkg/agent/openflow/client_test.go @@ -94,6 +94,7 @@ type clientOptions struct { enableMulticluster bool enableL7NetworkPolicy bool enableL7FlowExporter bool + enablePacketCapture bool trafficEncryptionMode config.TrafficEncryptionModeType } @@ -168,6 +169,10 @@ func enableMulticluster(o *clientOptions) { o.enableMulticluster = true } +func enablePacketCapture(o *clientOptions) { + o.enablePacketCapture = true +} + func setTrafficEncryptionMode(trafficEncryptionMode config.TrafficEncryptionModeType) clientOptionsFn { return func(o *clientOptions) { o.trafficEncryptionMode = trafficEncryptionMode @@ -419,6 +424,7 @@ func newFakeClientWithBridge( o.enableMulticluster, NewGroupAllocator(), false, + o.enablePacketCapture, defaultPacketInRate) // Meters must be supported to enable Egress traffic shaping. @@ -1778,6 +1784,126 @@ func Test_client_InstallEgressQoS(t *testing.T) { require.False(t, ok) } +func Test_client_InstallPacketCaptureFlows(t *testing.T) { + type fields struct { + } + type args struct { + dataplaneTag uint8 + senderOnly bool + receiverOnly bool + packet *binding.Packet + endpointsPacket []binding.Packet + } + srcMAC, _ := net.ParseMAC("11:22:33:44:55:66") + dstMAC, _ := net.ParseMAC("11:22:33:44:55:77") + tests := []struct { + name string + fields fields + args args + wantErr bool + prepareFunc func(*gomock.Controller) *client + }{ + { + name: "packetcapture flow", + fields: fields{}, + args: args{ + dataplaneTag: 1, + packet: &binding.Packet{ + SourceMAC: srcMAC, + DestinationMAC: dstMAC, + SourceIP: net.ParseIP("1.2.3.4"), + DestinationIP: net.ParseIP("1.2.3.5"), + IPProto: 1, + TTL: 64, + }, + }, + wantErr: false, + prepareFunc: preparePacketCaptureFlow, + }, + { + name: "packetcapture flow with receiver only", + fields: fields{}, + args: args{ + dataplaneTag: 1, + receiverOnly: true, + packet: &binding.Packet{ + SourceMAC: srcMAC, + DestinationMAC: dstMAC, + SourceIP: net.ParseIP("1.2.3.4"), + DestinationIP: net.ParseIP("1.2.3.5"), + IPProto: 1, + TTL: 64, + }, + }, + wantErr: false, + prepareFunc: preparePacketCaptureFlow, + }, + { + name: "packetcapture flow with sender only", + fields: fields{}, + args: args{ + dataplaneTag: 1, + senderOnly: true, + packet: &binding.Packet{ + SourceMAC: srcMAC, + DestinationMAC: dstMAC, + SourceIP: net.ParseIP("1.2.3.4"), + DestinationIP: net.ParseIP("1.2.3.5"), + IPProto: 1, + TTL: 64, + }, + }, + wantErr: false, + prepareFunc: preparePacketCaptureFlow, + }, + { + name: "packetcapture flow with endpoints packets", + fields: fields{}, + args: args{ + dataplaneTag: 1, + senderOnly: true, + packet: &binding.Packet{ + SourceMAC: srcMAC, + DestinationMAC: dstMAC, + SourceIP: net.ParseIP("1.2.3.4"), + DestinationIP: net.ParseIP("1.2.3.5"), + IPProto: 1, + TTL: 64, + }, + endpointsPacket: []binding.Packet{ + { + SourceMAC: srcMAC, + DestinationMAC: dstMAC, + SourceIP: net.ParseIP("1.2.3.4"), + DestinationIP: net.ParseIP("1.2.3.6"), + IPProto: 1, + TTL: 64, + }, + { + SourceMAC: srcMAC, + DestinationMAC: dstMAC, + SourceIP: net.ParseIP("1.2.3.4"), + DestinationIP: net.ParseIP("1.2.3.7"), + IPProto: 1, + TTL: 64, + }, + }, + }, + wantErr: false, + prepareFunc: preparePacketCaptureFlow, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + c := tt.prepareFunc(ctrl) + if err := c.InstallPacketCaptureFlows(tt.args.dataplaneTag, tt.args.receiverOnly, tt.args.packet, nil, 0, 300); (err != nil) != tt.wantErr { + t.Errorf("InstallPacketCaptureFlows() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + func Test_client_InstallTraceflowFlows(t *testing.T) { type fields struct { } @@ -1918,6 +2044,32 @@ func Test_client_SendTraceflowPacket(t *testing.T) { } } +func preparePacketCaptureFlow(ctrl *gomock.Controller) *client { + m := opstest.NewMockOFEntryOperations(ctrl) + fc := newFakeClientWithBridge(m, true, false, config.K8sNode, config.TrafficEncapModeEncap, ovsoftest.NewMockBridge(ctrl), enablePacketCapture) + defer resetPipelines() + + m.EXPECT().AddAll(gomock.Any()).Return(nil).Times(1) + _, ipCIDR, _ := net.ParseCIDR("192.168.2.30/32") + flows, _ := EgressDefaultTable.ofTable.BuildFlow(priority100).Action().Drop().Done().GetBundleMessages(binding.AddMessage) + flowMsg := flows[0].GetMessage().(*openflow15.FlowMod) + ctx := &conjMatchFlowContext{ + dropFlow: flowMsg, + dropFlowEnableLogging: false, + conjunctiveMatch: &conjunctiveMatch{ + tableID: 1, + matchPairs: []matchPair{ + { + matchKey: MatchCTSrcIPNet, + matchValue: *ipCIDR, + }, + }, + }} + fc.featureNetworkPolicy.globalConjMatchFlowCache["mockContext"] = ctx + fc.featureNetworkPolicy.policyCache.Add(&policyRuleConjunction{metricFlows: []*openflow15.FlowMod{flowMsg}}) + return fc +} + func prepareTraceflowFlow(ctrl *gomock.Controller) *client { m := opstest.NewMockOFEntryOperations(ctrl) fc := newFakeClientWithBridge(m, true, false, config.K8sNode, config.TrafficEncapModeEncap, ovsoftest.NewMockBridge(ctrl)) @@ -2031,7 +2183,7 @@ func Test_client_setBasePacketOutBuilder(t *testing.T) { } func prepareSetBasePacketOutBuilder(ctrl *gomock.Controller, success bool) *client { - ofClient := NewClient(bridgeName, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, nil, false, defaultPacketInRate) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, nil, false, false, defaultPacketInRate) m := ovsoftest.NewMockBridge(ctrl) ofClient.bridge = m bridge := binding.OFBridge{} diff --git a/pkg/agent/openflow/cookie/allocator.go b/pkg/agent/openflow/cookie/allocator.go index 3aef3db4c84..9d6dbdeaae2 100644 --- a/pkg/agent/openflow/cookie/allocator.go +++ b/pkg/agent/openflow/cookie/allocator.go @@ -39,6 +39,7 @@ const ( Multicluster Traceflow ExternalNodeConnectivity + PacketCapture ) func (c Category) String() string { @@ -61,6 +62,8 @@ func (c Category) String() string { return "Traceflow" case ExternalNodeConnectivity: return "ExternalNodeConnectivity" + case PacketCapture: + return "PacketCapture" default: return "Invalid" } diff --git a/pkg/agent/openflow/fields.go b/pkg/agent/openflow/fields.go index 78f0845a143..6d522e97eb8 100644 --- a/pkg/agent/openflow/fields.go +++ b/pkg/agent/openflow/fields.go @@ -150,6 +150,9 @@ var ( // reg4[28]: Mark to indicate that whether the traffic's source is a local Pod or the Node. FromLocalRegMark = binding.NewOneBitRegMark(4, 28) + // reg4[28..31]: Field mark the flow for packet capture case. + PacketCaptureMark = binding.NewRegField(4, 28, 31) + // reg5(NXM_NX_REG5) // Field to cache the Egress conjunction ID hit by TraceFlow packet. TFEgressConjIDField = binding.NewRegField(5, 0, 31) diff --git a/pkg/agent/openflow/framework.go b/pkg/agent/openflow/framework.go index d8c353ee2a2..c1e1680fe11 100644 --- a/pkg/agent/openflow/framework.go +++ b/pkg/agent/openflow/framework.go @@ -309,6 +309,10 @@ func (f *featureTraceflow) getRequiredTables() []*Table { return nil } +func (f *featurePacketCapture) getRequiredTables() []*Table { + return nil +} + func (f *featureExternalNodeConnectivity) getRequiredTables() []*Table { return []*Table{ ConntrackTable, diff --git a/pkg/agent/openflow/packetcapture.go b/pkg/agent/openflow/packetcapture.go new file mode 100644 index 00000000000..9583dff0508 --- /dev/null +++ b/pkg/agent/openflow/packetcapture.go @@ -0,0 +1,304 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openflow + +import ( + "net" + + "antrea.io/libOpenflow/openflow15" + "antrea.io/libOpenflow/protocol" + + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/openflow/cookie" + binding "antrea.io/antrea/pkg/ovs/openflow" +) + +type featurePacketCapture struct { + cookieAllocator cookie.Allocator + cachedFlows *flowCategoryCache + ipProtocols []binding.Protocol + networkConfig *config.NetworkConfig + enableProxy bool + tunnelPort uint32 + gatewayPort uint32 + gatewayIPs map[binding.Protocol]net.IP +} + +func (f *featurePacketCapture) getFeatureName() string { + return "PacketCapture" +} + +func newFeaturePacketCapture(cookieAllocator cookie.Allocator, + ipProtocols []binding.Protocol, + enableProxy bool, + networkConfig *config.NetworkConfig, + nodeConfig *config.NodeConfig) *featurePacketCapture { + gatewayIPs := make(map[binding.Protocol]net.IP) + for _, ipProtocol := range ipProtocols { + if ipProtocol == binding.ProtocolIP { + gatewayIPs[ipProtocol] = nodeConfig.GatewayConfig.IPv4 + } else if ipProtocol == binding.ProtocolIPv6 { + gatewayIPs[ipProtocol] = nodeConfig.GatewayConfig.IPv6 + } + + } + return &featurePacketCapture{ + cachedFlows: newFlowCategoryCache(), + cookieAllocator: cookieAllocator, + ipProtocols: ipProtocols, + networkConfig: networkConfig, + enableProxy: enableProxy, + tunnelPort: nodeConfig.TunnelOFPort, + gatewayPort: nodeConfig.GatewayConfig.OFPort, + gatewayIPs: gatewayIPs, + } +} + +func (f *featurePacketCapture) initFlows() []*openflow15.FlowMod { + return []*openflow15.FlowMod{} +} + +func (f *featurePacketCapture) replayFlows() []*openflow15.FlowMod { + return []*openflow15.FlowMod{} +} + +func (f *featurePacketCapture) initGroups() []binding.OFEntry { + return nil +} + +func (f *featurePacketCapture) replayGroups() []binding.OFEntry { + return nil +} + +func (f *featurePacketCapture) replayMeters() []binding.OFEntry { + return nil +} + +// genFlows generates flows for packet capture. dataplaneTag is used as a mark for the target flow. +func (f *featurePacketCapture) genFlows(dataplaneTag uint8, + ovsMetersAreSupported, + receiverOnly bool, + packet *binding.Packet, + endpointPackets []binding.Packet, + ofPort uint32, + timeout uint16) []binding.Flow { + cookieID := f.cookieAllocator.Request(cookie.PacketCapture).Raw() + var flows []binding.Flow + tag := uint32(dataplaneTag) + var flowBuilder binding.FlowBuilder + if !receiverOnly { + // if not receiverOnly, ofPort is inPort + if endpointPackets == nil { + flowBuilder = ConntrackStateTable.ofTable.BuildFlow(priorityHigh). + Cookie(cookieID). + MatchInPort(ofPort). + MatchCTStateTrk(true). + Action().LoadToRegField(PacketCaptureMark, tag). + SetHardTimeout(timeout). + Action().GotoStage(stagePreRouting) + if packet.DestinationIP != nil { + flowBuilder = flowBuilder.MatchDstIP(packet.DestinationIP) + } + } else { + // handle pod -> svc case: + // generate flows to endpoints. + for _, epPacket := range endpointPackets { + tmpFlowBuilder := ConntrackStateTable.ofTable.BuildFlow(priorityHigh). + Cookie(cookieID). + MatchInPort(ofPort). + MatchCTStateTrk(true). + Action().LoadRegMark(RewriteMACRegMark). + Action().LoadToRegField(PacketCaptureMark, tag). + SetHardTimeout(timeout). + Action().GotoStage(stageEgressSecurity) + tmpFlowBuilder.MatchDstIP(epPacket.DestinationIP) + flow := matchTransportHeader(packet, tmpFlowBuilder, endpointPackets).Done() + flows = append(flows, flow) + } + + // capture the first tracked packet for svc. + for _, ipProtocol := range f.ipProtocols { + tmpFlowBuilder := ConntrackStateTable.ofTable.BuildFlow(priorityHigh). + Cookie(cookieID). + MatchInPort(ofPort). + MatchProtocol(ipProtocol). + MatchCTStateNew(true). + MatchCTStateTrk(true). + Action().LoadRegMark(RewriteMACRegMark). + Action().LoadToRegField(PacketCaptureMark, tag). + SetHardTimeout(timeout). + Action().GotoStage(stagePreRouting) + tmpFlowBuilder.MatchDstIP(packet.DestinationIP) + tmpFlowBuilder = matchTransportHeader(packet, tmpFlowBuilder, nil) + flows = append(flows, tmpFlowBuilder.Done()) + } + + } + } else { + flowBuilder = L2ForwardingCalcTable.ofTable.BuildFlow(priorityHigh). + Cookie(cookieID). + MatchCTStateTrk(true). + MatchDstMAC(packet.DestinationMAC). + Action().LoadToRegField(TargetOFPortField, ofPort). + Action().LoadRegMark(OutputToOFPortRegMark). + Action().LoadToRegField(PacketCaptureMark, tag). + SetHardTimeout(timeout). + Action().GotoStage(stageIngressSecurity) + if packet.SourceIP != nil { + flowBuilder = flowBuilder.MatchSrcIP(packet.SourceIP) + } + } + + if flowBuilder != nil { + flow := matchTransportHeader(packet, flowBuilder, nil).Done() + flows = append(flows, flow) + } + + output := func(fb binding.FlowBuilder) binding.FlowBuilder { + return fb.Action().OutputToRegField(TargetOFPortField) + } + + sendToController := func(fb binding.FlowBuilder) binding.FlowBuilder { + if ovsMetersAreSupported { + fb = fb.Action().Meter(PacketInMeterIDTF) + } + fb = fb.Action().SendToController([]byte{uint8(PacketInCategoryPacketCapture)}, false) + return fb + } + + // This generates PacketCapture specific flows that outputs capture + // non-hairpin packets to OVS port and Antrea Agent after + // L2 forwarding calculation. + for _, ipProtocol := range f.ipProtocols { + if f.networkConfig.TrafficEncapMode.SupportsEncap() { + // SendToController and Output if output port is tunnel port. + fb := OutputTable.ofTable.BuildFlow(priorityNormal+3). + Cookie(cookieID). + MatchRegFieldWithValue(TargetOFPortField, f.tunnelPort). + MatchProtocol(ipProtocol). + MatchRegMark(OutputToOFPortRegMark). + MatchRegFieldWithValue(PacketCaptureMark, tag). + SetHardTimeout(timeout). + Action().OutputToRegField(TargetOFPortField) + fb = sendToController(fb) + flows = append(flows, fb.Done()) + // For injected packets, only SendToController if output port is local gateway. In encapMode, a PacketCapture + // packet going out of the gateway port (i.e. exiting the overlay) essentially means that the PacketCapture + // request is complete. + fb = OutputTable.ofTable.BuildFlow(priorityNormal+2). + Cookie(cookieID). + MatchRegFieldWithValue(TargetOFPortField, f.gatewayPort). + MatchProtocol(ipProtocol). + MatchRegMark(OutputToOFPortRegMark). + MatchRegFieldWithValue(PacketCaptureMark, tag). + SetHardTimeout(timeout) + fb = sendToController(fb) + fb = output(fb) + flows = append(flows, fb.Done()) + } else { + // SendToController and Output if output port is local gateway. Unlike in encapMode, inter-Node Pod-to-Pod + // traffic is expected to go out of the gateway port on the way to its destination. + fb := OutputTable.ofTable.BuildFlow(priorityNormal+2). + Cookie(cookieID). + MatchRegFieldWithValue(TargetOFPortField, f.gatewayPort). + MatchProtocol(ipProtocol). + MatchRegMark(OutputToOFPortRegMark). + MatchRegFieldWithValue(PacketCaptureMark, tag). + SetHardTimeout(timeout). + Action().OutputToRegField(TargetOFPortField) + fb = sendToController(fb) + flows = append(flows, fb.Done()) + } + + gatewayIP := f.gatewayIPs[ipProtocol] + if gatewayIP != nil { + fb := OutputTable.ofTable.BuildFlow(priorityNormal+3). + Cookie(cookieID). + MatchRegFieldWithValue(TargetOFPortField, f.gatewayPort). + MatchProtocol(ipProtocol). + MatchDstIP(gatewayIP). + MatchRegMark(OutputToOFPortRegMark). + MatchRegFieldWithValue(PacketCaptureMark, tag). + SetHardTimeout(timeout) + fb = sendToController(fb) + fb = output(fb) + flows = append(flows, fb.Done()) + } + + fb := OutputTable.ofTable.BuildFlow(priorityNormal+2). + Cookie(cookieID). + MatchProtocol(ipProtocol). + MatchRegMark(OutputToOFPortRegMark). + MatchRegFieldWithValue(PacketCaptureMark, tag). + SetHardTimeout(timeout) + fb = sendToController(fb) + fb = output(fb) + flows = append(flows, fb.Done()) + } + + // This generates PacketCapture specific flows that outputs hairpin PacketCapture packets to OVS port and Antrea Agent after + // L2forwarding calculation. + for _, ipProtocol := range f.ipProtocols { + if f.enableProxy { + fb := OutputTable.ofTable.BuildFlow(priorityHigh+2). + Cookie(cookieID). + MatchProtocol(ipProtocol). + MatchCTMark(HairpinCTMark). + MatchRegFieldWithValue(PacketCaptureMark, uint32(dataplaneTag)). + SetHardTimeout(timeout) + fb = sendToController(fb) + fb = fb.Action().OutputToRegField(TargetOFPortField) + flows = append(flows, fb.Done()) + } + } + + return flows +} + +func matchTransportHeader(packet *binding.Packet, flowBuilder binding.FlowBuilder, endpointPackets []binding.Packet) binding.FlowBuilder { + // Match transport header + switch packet.IPProto { + case protocol.Type_ICMP: + flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolICMP) + case protocol.Type_IPv6ICMP: + flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolICMPv6) + case protocol.Type_TCP: + if packet.IsIPv6 { + flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolTCPv6) + } else { + flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolTCP) + } + case protocol.Type_UDP: + if packet.IsIPv6 { + flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolUDPv6) + } else { + flowBuilder = flowBuilder.MatchProtocol(binding.ProtocolUDP) + } + default: + flowBuilder = flowBuilder.MatchIPProtocolValue(packet.IsIPv6, packet.IPProto) + } + if packet.IPProto == protocol.Type_TCP || packet.IPProto == protocol.Type_UDP { + if endpointPackets != nil && endpointPackets[0].DestinationPort != 0 { + flowBuilder = flowBuilder.MatchDstPort(endpointPackets[0].DestinationPort, nil) + } else if packet.DestinationPort != 0 { + flowBuilder = flowBuilder.MatchDstPort(packet.DestinationPort, nil) + } + if packet.SourcePort != 0 { + flowBuilder = flowBuilder.MatchSrcPort(packet.SourcePort, nil) + } + } + + return flowBuilder +} diff --git a/pkg/agent/openflow/packetin.go b/pkg/agent/openflow/packetin.go index 608f1759e34..6103d06d586 100644 --- a/pkg/agent/openflow/packetin.go +++ b/pkg/agent/openflow/packetin.go @@ -53,6 +53,8 @@ const ( // PacketInCategorySvcReject is used to process the Service packets not matching any // Endpoints within packetIn message. PacketInCategorySvcReject + // PacketInCategoryPacketCapture is used for packetIn messages related to capture. + PacketInCategoryPacketCapture // PacketIn operations below are used to decide which operation(s) should be // executed by a handler. It(they) should be loaded in the second byte of the diff --git a/pkg/agent/openflow/pipeline.go b/pkg/agent/openflow/pipeline.go index a1eb6655d35..10c2e79d9e0 100644 --- a/pkg/agent/openflow/pipeline.go +++ b/pkg/agent/openflow/pipeline.go @@ -406,6 +406,7 @@ type client struct { enableL7FlowExporter bool enableMulticluster bool enablePrometheusMetrics bool + enablePacketCapture bool connectUplinkToBridge bool nodeType config.NodeType roundInfo types.RoundInfo @@ -425,6 +426,8 @@ type client struct { featureTraceflow *featureTraceflow traceableFeatures []traceableFeature + featurePacketCapture *featurePacketCapture + pipelines map[binding.PipelineID]binding.Pipeline // ofEntryOperations is a wrapper interface for operating multiple OpenFlow entries with action AddAll / ModifyAll / DeleteAll. @@ -940,14 +943,7 @@ func (f *featurePodConnectivity) flowsToTrace(dataplaneTag uint8, default: flowBuilder = flowBuilder.MatchIPProtocolValue(packet.IsIPv6, packet.IPProto) } - if packet.IPProto == protocol.Type_TCP || packet.IPProto == protocol.Type_UDP { - if packet.DestinationPort != 0 { - flowBuilder = flowBuilder.MatchDstPort(packet.DestinationPort, nil) - } - if packet.SourcePort != 0 { - flowBuilder = flowBuilder.MatchSrcPort(packet.SourcePort, nil) - } - } + flows = append(flows, flowBuilder.Done()) } @@ -2838,6 +2834,7 @@ func NewClient(bridgeName string, enableMulticluster bool, groupIDAllocator GroupAllocator, enablePrometheusMetrics bool, + enablePacketCapture bool, packetInRate int, ) *client { bridge := binding.NewOFBridge(bridgeName, mgmtAddr) @@ -2857,6 +2854,7 @@ func NewClient(bridgeName string, enableL7FlowExporter: enableL7FlowExporter, enableMulticluster: enableMulticluster, enablePrometheusMetrics: enablePrometheusMetrics, + enablePacketCapture: enablePacketCapture, connectUplinkToBridge: connectUplinkToBridge, pipelines: make(map[binding.PipelineID]binding.Pipeline), packetInHandlers: map[uint8]PacketInHandler{}, diff --git a/pkg/agent/openflow/pipeline_test.go b/pkg/agent/openflow/pipeline_test.go index 451abe7ccc1..3d094d15ca8 100644 --- a/pkg/agent/openflow/pipeline_test.go +++ b/pkg/agent/openflow/pipeline_test.go @@ -19,6 +19,7 @@ import ( "testing" "antrea.io/libOpenflow/openflow15" + "antrea.io/libOpenflow/protocol" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -301,3 +302,94 @@ func getGroupModLen(g *openflow15.GroupMod) uint32 { } return n } + +func TestMatchTransportHeader(t *testing.T) { + + testCases := []struct { + name string + packet *binding.Packet + endpointPackets []binding.Packet + expectCalls func(ctrl *gomock.Controller, builder *openflowtest.MockFlowBuilder) *openflowtest.MockFlowBuilder + }{ + { + name: "tcp proto", + packet: &binding.Packet{ + IPProto: protocol.Type_TCP, + }, + expectCalls: func(ctrl *gomock.Controller, builder *openflowtest.MockFlowBuilder) *openflowtest.MockFlowBuilder { + builder.EXPECT().MatchProtocol(binding.ProtocolTCP) + return builder + + }, + }, + { + name: "udp proto", + packet: &binding.Packet{ + IPProto: protocol.Type_UDP, + }, + expectCalls: func(ctrl *gomock.Controller, builder *openflowtest.MockFlowBuilder) *openflowtest.MockFlowBuilder { + builder.EXPECT().MatchProtocol(binding.ProtocolUDP) + return builder + }, + }, + { + name: "ipv6-tcp", + packet: &binding.Packet{ + IPProto: protocol.Type_TCP, + IsIPv6: true, + }, + expectCalls: func(ctrl *gomock.Controller, builder *openflowtest.MockFlowBuilder) *openflowtest.MockFlowBuilder { + builder.EXPECT().MatchProtocol(binding.ProtocolTCPv6) + return builder + }, + }, + { + name: "udp-with-src-and-dst-port", + packet: &binding.Packet{ + IPProto: protocol.Type_UDP, + SourcePort: 1000, + DestinationPort: 53, + }, + expectCalls: func(ctrl *gomock.Controller, builder *openflowtest.MockFlowBuilder) *openflowtest.MockFlowBuilder { + builder.EXPECT().MatchProtocol(binding.ProtocolUDP).Return(builder).AnyTimes() + builder.EXPECT().MatchDstPort(uint16(53), nil).Return(builder).AnyTimes() + builder.EXPECT().MatchSrcPort(uint16(1000), nil).Return(builder).AnyTimes() + return builder + }, + }, + { + name: "with endpoints packets", + packet: &binding.Packet{ + IPProto: protocol.Type_TCP, + SourcePort: 1000, + DestinationPort: 53, + }, + endpointPackets: []binding.Packet{ + { + IPProto: protocol.Type_TCP, + SourcePort: 1000, + DestinationPort: 54, + }, + }, + expectCalls: func(ctrl *gomock.Controller, builder *openflowtest.MockFlowBuilder) *openflowtest.MockFlowBuilder { + builder.EXPECT().MatchProtocol(binding.ProtocolTCP).Return(builder).AnyTimes() + builder.EXPECT().MatchDstPort(uint16(54), nil).Return(builder).AnyTimes() + builder.EXPECT().MatchSrcPort(uint16(1000), nil).Return(builder).AnyTimes() + return builder + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + fakeOfTable := openflowtest.NewMockTable(ctrl) + ConntrackStateTable.ofTable = fakeOfTable + defer func() { + ConntrackStateTable.ofTable = nil + }() + testBuilder := openflowtest.NewMockFlowBuilder(ctrl) + tc.expectCalls(ctrl, testBuilder) + matchTransportHeader(tc.packet, testBuilder, tc.endpointPackets) + }) + } +} diff --git a/pkg/agent/openflow/testing/mock_openflow.go b/pkg/agent/openflow/testing/mock_openflow.go index 96dd46af1ea..52c966ad747 100644 --- a/pkg/agent/openflow/testing/mock_openflow.go +++ b/pkg/agent/openflow/testing/mock_openflow.go @@ -420,6 +420,20 @@ func (mr *MockClientMockRecorder) InstallNodeFlows(arg0, arg1, arg2, arg3, arg4 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallNodeFlows", reflect.TypeOf((*MockClient)(nil).InstallNodeFlows), arg0, arg1, arg2, arg3, arg4) } +// InstallPacketCaptureFlows mocks base method. +func (m *MockClient) InstallPacketCaptureFlows(arg0 byte, arg1 bool, arg2 *openflow0.Packet, arg3 []openflow0.Packet, arg4 uint32, arg5 uint16) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InstallPacketCaptureFlows", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(error) + return ret0 +} + +// InstallPacketCaptureFlows indicates an expected call of InstallPacketCaptureFlows. +func (mr *MockClientMockRecorder) InstallPacketCaptureFlows(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallPacketCaptureFlows", reflect.TypeOf((*MockClient)(nil).InstallPacketCaptureFlows), arg0, arg1, arg2, arg3, arg4, arg5) +} + // InstallPodFlows mocks base method. func (m *MockClient) InstallPodFlows(arg0 string, arg1 []net.IP, arg2 net.HardwareAddr, arg3 uint32, arg4 uint16, arg5 *uint32) error { m.ctrl.T.Helper() @@ -960,6 +974,20 @@ func (mr *MockClientMockRecorder) UninstallNodeFlows(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UninstallNodeFlows", reflect.TypeOf((*MockClient)(nil).UninstallNodeFlows), arg0) } +// UninstallPacketCaptureFlows mocks base method. +func (m *MockClient) UninstallPacketCaptureFlows(arg0 byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UninstallPacketCaptureFlows", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UninstallPacketCaptureFlows indicates an expected call of UninstallPacketCaptureFlows. +func (mr *MockClientMockRecorder) UninstallPacketCaptureFlows(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UninstallPacketCaptureFlows", reflect.TypeOf((*MockClient)(nil).UninstallPacketCaptureFlows), arg0) +} + // UninstallPodFlows mocks base method. func (m *MockClient) UninstallPodFlows(arg0 string) error { m.ctrl.T.Helper() diff --git a/pkg/agent/supportbundlecollection/support_bundle_controller.go b/pkg/agent/supportbundlecollection/support_bundle_controller.go index 4d037c820b2..e8dd9ca28c3 100644 --- a/pkg/agent/supportbundlecollection/support_bundle_controller.go +++ b/pkg/agent/supportbundlecollection/support_bundle_controller.go @@ -17,16 +17,11 @@ package supportbundlecollection import ( "context" "fmt" - "io" - "net/url" - "path" "reflect" "sync" "time" - "github.com/pkg/sftp" "github.com/spf13/afero" - "golang.org/x/crypto/ssh" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" @@ -43,18 +38,15 @@ import ( "antrea.io/antrea/pkg/querier" "antrea.io/antrea/pkg/support" "antrea.io/antrea/pkg/util/compress" + "antrea.io/antrea/pkg/util/ftp" "antrea.io/antrea/pkg/util/k8s" ) type ProtocolType string const ( - sftpProtocol ProtocolType = "sftp" - - controllerName = "SupportBundleCollectionController" - - uploadToFileServerTries = 5 - uploadToFileServerRetryDelay = 5 * time.Second + sftpProtocol ProtocolType = "sftp" + controllerName string = "SupportBundleCollectionController" ) var ( @@ -78,7 +70,7 @@ type SupportBundleController struct { npq querier.AgentNetworkPolicyInfoQuerier v4Enabled bool v6Enabled bool - sftpUploader uploader + sftpUploader ftp.Uploader } func NewSupportBundleController(nodeName string, @@ -103,7 +95,7 @@ func NewSupportBundleController(nodeName string, npq: npq, v4Enabled: v4Enabled, v6Enabled: v6Enabled, - sftpUploader: &sftpUploader{}, + sftpUploader: &ftp.SftpUploader{}, } return c } @@ -301,100 +293,20 @@ func (c *SupportBundleController) uploadSupportBundle(supportBundle *cpv1b2.Supp if err != nil { return fmt.Errorf("failed to upload support bundle while getting uploader: %v", err) } - if _, err := outputFile.Seek(0, 0); err != nil { - return fmt.Errorf("failed to upload support bundle to file server while setting offset: %v", err) - } - // fileServer.URL should be like: 10.92.23.154:22/path or sftp://10.92.23.154:22/path - parsedURL, err := parseUploadUrl(supportBundle.FileServer.URL) - if err != nil { - return fmt.Errorf("failed to upload support bundle while parsing upload URL: %v", err) - } - triesLeft := uploadToFileServerTries - var uploadErr error - for triesLeft > 0 { - if uploadErr = c.uploadToFileServer(uploader, supportBundle.Name, parsedURL, &supportBundle.Authentication, outputFile); uploadErr == nil { - return nil - } - triesLeft-- - if triesLeft == 0 { - return fmt.Errorf("failed to upload support bundle after %d attempts", uploadToFileServerTries) - } - klog.InfoS("Failed to upload support bundle", "UploadError", uploadErr, "TriesLeft", triesLeft) - time.Sleep(uploadToFileServerRetryDelay) - } - return nil -} -func parseUploadUrl(uploadUrl string) (*url.URL, error) { - parsedURL, err := url.Parse(uploadUrl) - if err != nil { - parsedURL, err = url.Parse("sftp://" + uploadUrl) - if err != nil { - return nil, err - } - } - if parsedURL.Scheme != "sftp" { - return nil, fmt.Errorf("not sftp protocol") - } - return parsedURL, nil + fileName := c.nodeName + "_" + supportBundle.Name + ".tar.gz" + serverAuth := supportBundle.Authentication.BasicAuthentication + cfg := ftp.GenSSHClientConfig(serverAuth.Username, serverAuth.Password) + return uploader.Upload(supportBundle.FileServer.URL, fileName, cfg, outputFile) } -func (c *SupportBundleController) uploadToFileServer(up uploader, bundleName string, parsedURL *url.URL, serverAuth *cpv1b2.BundleServerAuthConfiguration, tarGzFile io.Reader) error { - joinedPath := path.Join(parsedURL.Path, c.nodeName+"_"+bundleName+".tar.gz") - cfg := &ssh.ClientConfig{ - User: serverAuth.BasicAuthentication.Username, - Auth: []ssh.AuthMethod{ssh.Password(serverAuth.BasicAuthentication.Password)}, - // #nosec G106: skip host key check here and users can specify their own checks if needed - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - Timeout: time.Second, - } - return up.upload(parsedURL.Host, joinedPath, cfg, tarGzFile) -} - -func (c *SupportBundleController) getUploaderByProtocol(protocol ProtocolType) (uploader, error) { +func (c *SupportBundleController) getUploaderByProtocol(protocol ProtocolType) (ftp.Uploader, error) { if protocol == sftpProtocol { return c.sftpUploader, nil } return nil, fmt.Errorf("unsupported protocol %s", protocol) } -type uploader interface { - upload(addr string, path string, config *ssh.ClientConfig, tarGzFile io.Reader) error -} - -type sftpUploader struct { -} - -func (uploader *sftpUploader) upload(address string, path string, config *ssh.ClientConfig, tarGzFile io.Reader) error { - conn, err := ssh.Dial("tcp", address, config) - if err != nil { - return fmt.Errorf("error when connecting to fs server: %w", err) - } - sftpClient, err := sftp.NewClient(conn) - if err != nil { - return fmt.Errorf("error when setting up sftp client: %w", err) - } - defer func() { - if err := sftpClient.Close(); err != nil { - klog.ErrorS(err, "Error when closing sftp client") - } - }() - targetFile, err := sftpClient.Create(path) - if err != nil { - return fmt.Errorf("error when creating target file on remote: %v", err) - } - defer func() { - if err := targetFile.Close(); err != nil { - klog.ErrorS(err, "Error when closing target file on remote") - } - }() - if written, err := io.Copy(targetFile, tarGzFile); err != nil { - return fmt.Errorf("error when copying target file: %v, written: %d", err, written) - } - klog.InfoS("Successfully upload file to path", "filePath", path) - return nil -} - func (c *SupportBundleController) updateSupportBundleCollectionStatus(key string, complete bool, genErr error) error { antreaClient, err := c.antreaClientGetter.GetAntreaClient() if err != nil { diff --git a/pkg/agent/supportbundlecollection/support_bundle_controller_test.go b/pkg/agent/supportbundlecollection/support_bundle_controller_test.go index 72b24d317bd..e38f2189ac7 100644 --- a/pkg/agent/supportbundlecollection/support_bundle_controller_test.go +++ b/pkg/agent/supportbundlecollection/support_bundle_controller_test.go @@ -16,7 +16,6 @@ package supportbundlecollection import ( "fmt" - "io" "testing" "github.com/spf13/afero" @@ -37,6 +36,7 @@ import ( "antrea.io/antrea/pkg/ovs/ovsctl" "antrea.io/antrea/pkg/querier" "antrea.io/antrea/pkg/support" + "antrea.io/antrea/pkg/util/ftp" ) type fakeController struct { @@ -69,7 +69,7 @@ func TestSupportBundleCollectionAdd(t *testing.T) { supportBundleCollection *cpv1b2.SupportBundleCollection expectedCompleted bool agentDumper *mockAgentDumper - uploader uploader + uploader ftp.Uploader }{ { name: "Add SupportBundleCollection", @@ -90,7 +90,7 @@ func TestSupportBundleCollectionAdd(t *testing.T) { supportBundleCollection: generateSupportbundleCollection("supportBundle3", "https://10.220.175.92:22/root/supportbundle"), expectedCompleted: false, agentDumper: &mockAgentDumper{}, - uploader: &testUploader{}, + uploader: &testFailedUploader{}, }, { name: "Add SupportBundleCollection with retry logics", @@ -198,7 +198,7 @@ func TestSupportBundleCollectionDelete(t *testing.T) { type testUploader struct { } -func (uploader *testUploader) upload(address string, path string, config *ssh.ClientConfig, tarGzFile io.Reader) error { +func (uploader *testUploader) Upload(url string, fileName string, config *ssh.ClientConfig, outputFile afero.File) error { klog.Info("Called test uploader") return nil } @@ -206,7 +206,7 @@ func (uploader *testUploader) upload(address string, path string, config *ssh.Cl type testFailedUploader struct { } -func (uploader *testFailedUploader) upload(address string, path string, config *ssh.ClientConfig, tarGzFile io.Reader) error { +func (uploader *testFailedUploader) Upload(url string, fileName string, config *ssh.ClientConfig, outputFile afero.File) error { klog.Info("Called test uploader for failed case") return fmt.Errorf("uploader failed") } diff --git a/pkg/apis/crd/v1alpha1/register.go b/pkg/apis/crd/v1alpha1/register.go index ecefd26a924..fc46da806a3 100644 --- a/pkg/apis/crd/v1alpha1/register.go +++ b/pkg/apis/crd/v1alpha1/register.go @@ -57,8 +57,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &NodeLatencyMonitorList{}, &BGPPolicy{}, &BGPPolicyList{}, + &PacketCapture{}, + &PacketCaptureList{}, ) - metav1.AddToGroupVersion( scheme, SchemeGroupVersion, diff --git a/pkg/apis/crd/v1alpha1/types.go b/pkg/apis/crd/v1alpha1/types.go index 378d3a5c58c..dae7672bfd1 100644 --- a/pkg/apis/crd/v1alpha1/types.go +++ b/pkg/apis/crd/v1alpha1/types.go @@ -17,6 +17,7 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed @@ -354,3 +355,129 @@ type BGPPeer struct { // a restart before deleting stale routes. The range of the value is from 1 to 3600, and the default value is 120. GracefulRestartTimeSeconds *int32 `json:"gracefulRestartTimeSeconds,omitempty"` } + +type PodReference struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` +} + +type ServiceReference struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` +} + +// Source describes the source spec of the packetcapture. +type Source struct { + // Pod is the source pod, + Pod *PodReference `json:"pod,omitempty"` + // IP is the source IPv4 or IPv6 address. + IP *string `json:"ip,omitempty"` +} + +// Destination describes the destination spec of the PacketCapture. +type Destination struct { + // Pod is the destination Pod, exclusive with destination Service. + Pod *PodReference `json:"pod,omitempty"` + // Service is the destination Service, exclusive with destination Pod. + Service *ServiceReference `json:"service,omitempty"` + // IP is the destination IPv4 or IPv6 address. + IP *string `json:"ip,omitempty"` +} + +// TransportHeader describes spec of a TransportHeader. +type TransportHeader struct { + UDP *UDPHeader `json:"udp,omitempty"` + TCP *TCPHeader `json:"tcp,omitempty"` +} + +// UDPHeader describes spec of a UDP header. +type UDPHeader struct { + // SrcPort is the source port. + SrcPort *int32 `json:"srcPort,omitempty"` + // DstPort is the destination port. + DstPort *int32 `json:"dstPort,omitempty"` +} + +// TCPHeader describes spec of a TCP header. +type TCPHeader struct { + // SrcPort is the source port. + SrcPort *int32 `json:"srcPort,omitempty"` + // DstPort is the destination port. + DstPort *int32 `json:"dstPort,omitempty"` + // Flags are flags in the header. + Flags *int32 `json:"flags,omitempty"` +} + +// Packet includes header info. +type Packet struct { + // IPFamily is the filter's IP family. Default to `IPv4`. + IPFamily v1.IPFamily `json:"ipFamily,omitempty"` + // Protocol represents the transport protocol. Default is to not filter on protocol. + Protocol *intstr.IntOrString `json:"protocol,omitempty"` + TransportHeader TransportHeader `json:"transportHeader"` +} + +// PacketCaptureFirstNConfig contains the config for the FirstN type capture. The only supported parameter is +// `Number` at the moment, meaning capturing the first specified number of packets in a flow. +type PacketCaptureFirstNConfig struct { + Number int32 `json:"number"` +} + +const DefaultPacketCaptureTimeout uint16 = 60 + +type PacketCapturePhase string + +const ( + PacketCaptureRunning PacketCapturePhase = "Running" + PacketCaptureSucceeded PacketCapturePhase = "Succeeded" + PacketCaptureFailed PacketCapturePhase = "Failed" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PacketCaptureList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PacketCapture `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PacketCapture struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PacketCaptureSpec `json:"spec"` + Status PacketCaptureStatus `json:"status"` +} + +type CaptureConfig struct { + FirstN *PacketCaptureFirstNConfig `json:"firstN,omitempty"` +} + +type PacketCaptureSpec struct { + Timeout *uint16 `json:"timeout,omitempty"` + CaptureConfig CaptureConfig `json:"captureConfig"` + Source Source `json:"source"` + Destination Destination `json:"destination"` + Packet *Packet `json:"packet,omitempty"` + // FileServer specifies the sftp url config for the fileServer. Captured packets will be uploaded to this server. + FileServer *BundleFileServer `json:"fileServer,omitempty"` +} + +type PacketCaptureStatus struct { + Phase PacketCapturePhase `json:"phase"` + // Reason records the failure reason when the capture fails. + Reason string `json:"reason"` + // NumCapturedPackets records how many packets have been captured. If it reaches the target number, the capture + // can be considered as finished. + NumCapturedPackets *int32 `json:"numCapturedPackets,omitempty"` + // PacketsFilePath is the file path where the captured packets are stored. The format is: ":". + // If `.spec.FileServer` is present, this file will also be uploaded to the targeted location. + PacketsFilePath string `json:"packetsFilePath"` + // StartTime is the time when this capture sessions starts. + StartTime *metav1.Time `json:"startTime,omitempty"` +} diff --git a/pkg/apis/crd/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/crd/v1alpha1/zz_generated.deepcopy.go index a45bdeca9c0..ef467e3b948 100644 --- a/pkg/apis/crd/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/crd/v1alpha1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -266,6 +267,58 @@ func (in *BundleServerAuthConfiguration) DeepCopy() *BundleServerAuthConfigurati return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureConfig) DeepCopyInto(out *CaptureConfig) { + *out = *in + if in.FirstN != nil { + in, out := &in.FirstN, &out.FirstN + *out = new(PacketCaptureFirstNConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureConfig. +func (in *CaptureConfig) DeepCopy() *CaptureConfig { + if in == nil { + return nil + } + out := new(CaptureConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(PodReference) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + **out = **in + } + if in.IP != nil { + in, out := &in.IP, &out.IP + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressAdvertisement) DeepCopyInto(out *EgressAdvertisement) { *out = *in @@ -536,6 +589,164 @@ func (in *NodeLatencyMonitorSpec) DeepCopy() *NodeLatencyMonitorSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Packet) DeepCopyInto(out *Packet) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(intstr.IntOrString) + **out = **in + } + in.TransportHeader.DeepCopyInto(&out.TransportHeader) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Packet. +func (in *Packet) DeepCopy() *Packet { + if in == nil { + return nil + } + out := new(Packet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCapture) DeepCopyInto(out *PacketCapture) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCapture. +func (in *PacketCapture) DeepCopy() *PacketCapture { + if in == nil { + return nil + } + out := new(PacketCapture) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketCapture) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureFirstNConfig) DeepCopyInto(out *PacketCaptureFirstNConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureFirstNConfig. +func (in *PacketCaptureFirstNConfig) DeepCopy() *PacketCaptureFirstNConfig { + if in == nil { + return nil + } + out := new(PacketCaptureFirstNConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureList) DeepCopyInto(out *PacketCaptureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacketCapture, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureList. +func (in *PacketCaptureList) DeepCopy() *PacketCaptureList { + if in == nil { + return nil + } + out := new(PacketCaptureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketCaptureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureSpec) DeepCopyInto(out *PacketCaptureSpec) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(uint16) + **out = **in + } + in.CaptureConfig.DeepCopyInto(&out.CaptureConfig) + in.Source.DeepCopyInto(&out.Source) + in.Destination.DeepCopyInto(&out.Destination) + if in.Packet != nil { + in, out := &in.Packet, &out.Packet + *out = new(Packet) + (*in).DeepCopyInto(*out) + } + if in.FileServer != nil { + in, out := &in.FileServer, &out.FileServer + *out = new(BundleFileServer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureSpec. +func (in *PacketCaptureSpec) DeepCopy() *PacketCaptureSpec { + if in == nil { + return nil + } + out := new(PacketCaptureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureStatus) DeepCopyInto(out *PacketCaptureStatus) { + *out = *in + if in.NumCapturedPackets != nil { + in, out := &in.NumCapturedPackets, &out.NumCapturedPackets + *out = new(int32) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureStatus. +func (in *PacketCaptureStatus) DeepCopy() *PacketCaptureStatus { + if in == nil { + return nil + } + out := new(PacketCaptureStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodAdvertisement) DeepCopyInto(out *PodAdvertisement) { *out = *in @@ -552,6 +763,22 @@ func (in *PodAdvertisement) DeepCopy() *PodAdvertisement { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReference) DeepCopyInto(out *PodReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReference. +func (in *PodReference) DeepCopy() *PodReference { + if in == nil { + return nil + } + out := new(PodReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAdvertisement) DeepCopyInto(out *ServiceAdvertisement) { *out = *in @@ -573,6 +800,48 @@ func (in *ServiceAdvertisement) DeepCopy() *ServiceAdvertisement { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Source) DeepCopyInto(out *Source) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(PodReference) + **out = **in + } + if in.IP != nil { + in, out := &in.IP, &out.IP + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Source. +func (in *Source) DeepCopy() *Source { + if in == nil { + return nil + } + out := new(Source) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SupportBundleCollection) DeepCopyInto(out *SupportBundleCollection) { *out = *in @@ -702,6 +971,37 @@ func (in *SupportBundleCollectionStatus) DeepCopy() *SupportBundleCollectionStat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPHeader) DeepCopyInto(out *TCPHeader) { + *out = *in + if in.SrcPort != nil { + in, out := &in.SrcPort, &out.SrcPort + *out = new(int32) + **out = **in + } + if in.DstPort != nil { + in, out := &in.DstPort, &out.DstPort + *out = new(int32) + **out = **in + } + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPHeader. +func (in *TCPHeader) DeepCopy() *TCPHeader { + if in == nil { + return nil + } + out := new(TCPHeader) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSProtocol) DeepCopyInto(out *TLSProtocol) { *out = *in @@ -717,3 +1017,55 @@ func (in *TLSProtocol) DeepCopy() *TLSProtocol { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportHeader) DeepCopyInto(out *TransportHeader) { + *out = *in + if in.UDP != nil { + in, out := &in.UDP, &out.UDP + *out = new(UDPHeader) + (*in).DeepCopyInto(*out) + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPHeader) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportHeader. +func (in *TransportHeader) DeepCopy() *TransportHeader { + if in == nil { + return nil + } + out := new(TransportHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPHeader) DeepCopyInto(out *UDPHeader) { + *out = *in + if in.SrcPort != nil { + in, out := &in.SrcPort, &out.SrcPort + *out = new(int32) + **out = **in + } + if in.DstPort != nil { + in, out := &in.DstPort, &out.DstPort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPHeader. +func (in *UDPHeader) DeepCopy() *UDPHeader { + if in == nil { + return nil + } + out := new(UDPHeader) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apiserver/handlers/featuregates/handler_test.go b/pkg/apiserver/handlers/featuregates/handler_test.go index 401a14d9562..54ec43c13a4 100644 --- a/pkg/apiserver/handlers/featuregates/handler_test.go +++ b/pkg/apiserver/handlers/featuregates/handler_test.go @@ -73,6 +73,7 @@ func Test_getGatesResponse(t *testing.T) { {Component: "agent", Name: "NodeLatencyMonitor", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "NodeNetworkPolicy", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "NodePortLocal", Status: "Enabled", Version: "GA"}, + {Component: "agent", Name: "PacketCapture", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "SecondaryNetwork", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "ServiceExternalIP", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "ServiceTrafficDistribution", Status: "Enabled", Version: "BETA"}, diff --git a/pkg/client/clientset/versioned/typed/crd/v1alpha1/crd_client.go b/pkg/client/clientset/versioned/typed/crd/v1alpha1/crd_client.go index bcff19f9bd7..c0780d228ec 100644 --- a/pkg/client/clientset/versioned/typed/crd/v1alpha1/crd_client.go +++ b/pkg/client/clientset/versioned/typed/crd/v1alpha1/crd_client.go @@ -29,6 +29,7 @@ type CrdV1alpha1Interface interface { BGPPoliciesGetter ExternalNodesGetter NodeLatencyMonitorsGetter + PacketCapturesGetter SupportBundleCollectionsGetter } @@ -49,6 +50,10 @@ func (c *CrdV1alpha1Client) NodeLatencyMonitors() NodeLatencyMonitorInterface { return newNodeLatencyMonitors(c) } +func (c *CrdV1alpha1Client) PacketCaptures() PacketCaptureInterface { + return newPacketCaptures(c) +} + func (c *CrdV1alpha1Client) SupportBundleCollections() SupportBundleCollectionInterface { return newSupportBundleCollections(c) } diff --git a/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_crd_client.go b/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_crd_client.go index 4d6c869b949..34b1c00ff7e 100644 --- a/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_crd_client.go +++ b/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_crd_client.go @@ -38,6 +38,10 @@ func (c *FakeCrdV1alpha1) NodeLatencyMonitors() v1alpha1.NodeLatencyMonitorInter return &FakeNodeLatencyMonitors{c} } +func (c *FakeCrdV1alpha1) PacketCaptures() v1alpha1.PacketCaptureInterface { + return &FakePacketCaptures{c} +} + func (c *FakeCrdV1alpha1) SupportBundleCollections() v1alpha1.SupportBundleCollectionInterface { return &FakeSupportBundleCollections{c} } diff --git a/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_packetcapture.go b/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_packetcapture.go new file mode 100644 index 00000000000..86a20fdd7d8 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/crd/v1alpha1/fake/fake_packetcapture.go @@ -0,0 +1,136 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePacketCaptures implements PacketCaptureInterface +type FakePacketCaptures struct { + Fake *FakeCrdV1alpha1 +} + +var packetcapturesResource = v1alpha1.SchemeGroupVersion.WithResource("packetcaptures") + +var packetcapturesKind = v1alpha1.SchemeGroupVersion.WithKind("PacketCapture") + +// Get takes name of the packetCapture, and returns the corresponding packetCapture object, and an error if there is any. +func (c *FakePacketCaptures) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PacketCapture, err error) { + emptyResult := &v1alpha1.PacketCapture{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(packetcapturesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.PacketCapture), err +} + +// List takes label and field selectors, and returns the list of PacketCaptures that match those selectors. +func (c *FakePacketCaptures) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PacketCaptureList, err error) { + emptyResult := &v1alpha1.PacketCaptureList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(packetcapturesResource, packetcapturesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PacketCaptureList{ListMeta: obj.(*v1alpha1.PacketCaptureList).ListMeta} + for _, item := range obj.(*v1alpha1.PacketCaptureList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested packetCaptures. +func (c *FakePacketCaptures) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(packetcapturesResource, opts)) +} + +// Create takes the representation of a packetCapture and creates it. Returns the server's representation of the packetCapture, and an error, if there is any. +func (c *FakePacketCaptures) Create(ctx context.Context, packetCapture *v1alpha1.PacketCapture, opts v1.CreateOptions) (result *v1alpha1.PacketCapture, err error) { + emptyResult := &v1alpha1.PacketCapture{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(packetcapturesResource, packetCapture, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.PacketCapture), err +} + +// Update takes the representation of a packetCapture and updates it. Returns the server's representation of the packetCapture, and an error, if there is any. +func (c *FakePacketCaptures) Update(ctx context.Context, packetCapture *v1alpha1.PacketCapture, opts v1.UpdateOptions) (result *v1alpha1.PacketCapture, err error) { + emptyResult := &v1alpha1.PacketCapture{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(packetcapturesResource, packetCapture, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.PacketCapture), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePacketCaptures) UpdateStatus(ctx context.Context, packetCapture *v1alpha1.PacketCapture, opts v1.UpdateOptions) (result *v1alpha1.PacketCapture, err error) { + emptyResult := &v1alpha1.PacketCapture{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(packetcapturesResource, "status", packetCapture, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.PacketCapture), err +} + +// Delete takes name of the packetCapture and deletes it. Returns an error if one occurs. +func (c *FakePacketCaptures) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(packetcapturesResource, name, opts), &v1alpha1.PacketCapture{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePacketCaptures) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(packetcapturesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.PacketCaptureList{}) + return err +} + +// Patch applies the patch and returns the patched packetCapture. +func (c *FakePacketCaptures) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PacketCapture, err error) { + emptyResult := &v1alpha1.PacketCapture{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(packetcapturesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.PacketCapture), err +} diff --git a/pkg/client/clientset/versioned/typed/crd/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/crd/v1alpha1/generated_expansion.go index 0631615e701..fdcf058ec7e 100644 --- a/pkg/client/clientset/versioned/typed/crd/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/crd/v1alpha1/generated_expansion.go @@ -22,4 +22,6 @@ type ExternalNodeExpansion interface{} type NodeLatencyMonitorExpansion interface{} +type PacketCaptureExpansion interface{} + type SupportBundleCollectionExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/crd/v1alpha1/packetcapture.go b/pkg/client/clientset/versioned/typed/crd/v1alpha1/packetcapture.go new file mode 100644 index 00000000000..9c52682070c --- /dev/null +++ b/pkg/client/clientset/versioned/typed/crd/v1alpha1/packetcapture.go @@ -0,0 +1,67 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + scheme "antrea.io/antrea/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// PacketCapturesGetter has a method to return a PacketCaptureInterface. +// A group's client should implement this interface. +type PacketCapturesGetter interface { + PacketCaptures() PacketCaptureInterface +} + +// PacketCaptureInterface has methods to work with PacketCapture resources. +type PacketCaptureInterface interface { + Create(ctx context.Context, packetCapture *v1alpha1.PacketCapture, opts v1.CreateOptions) (*v1alpha1.PacketCapture, error) + Update(ctx context.Context, packetCapture *v1alpha1.PacketCapture, opts v1.UpdateOptions) (*v1alpha1.PacketCapture, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, packetCapture *v1alpha1.PacketCapture, opts v1.UpdateOptions) (*v1alpha1.PacketCapture, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PacketCapture, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PacketCaptureList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PacketCapture, err error) + PacketCaptureExpansion +} + +// packetCaptures implements PacketCaptureInterface +type packetCaptures struct { + *gentype.ClientWithList[*v1alpha1.PacketCapture, *v1alpha1.PacketCaptureList] +} + +// newPacketCaptures returns a PacketCaptures +func newPacketCaptures(c *CrdV1alpha1Client) *packetCaptures { + return &packetCaptures{ + gentype.NewClientWithList[*v1alpha1.PacketCapture, *v1alpha1.PacketCaptureList]( + "packetcaptures", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.PacketCapture { return &v1alpha1.PacketCapture{} }, + func() *v1alpha1.PacketCaptureList { return &v1alpha1.PacketCaptureList{} }), + } +} diff --git a/pkg/client/informers/externalversions/crd/v1alpha1/interface.go b/pkg/client/informers/externalversions/crd/v1alpha1/interface.go index e69100682ba..244bbe860f2 100644 --- a/pkg/client/informers/externalversions/crd/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/crd/v1alpha1/interface.go @@ -28,6 +28,8 @@ type Interface interface { ExternalNodes() ExternalNodeInformer // NodeLatencyMonitors returns a NodeLatencyMonitorInformer. NodeLatencyMonitors() NodeLatencyMonitorInformer + // PacketCaptures returns a PacketCaptureInformer. + PacketCaptures() PacketCaptureInformer // SupportBundleCollections returns a SupportBundleCollectionInformer. SupportBundleCollections() SupportBundleCollectionInformer } @@ -58,6 +60,11 @@ func (v *version) NodeLatencyMonitors() NodeLatencyMonitorInformer { return &nodeLatencyMonitorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// PacketCaptures returns a PacketCaptureInformer. +func (v *version) PacketCaptures() PacketCaptureInformer { + return &packetCaptureInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // SupportBundleCollections returns a SupportBundleCollectionInformer. func (v *version) SupportBundleCollections() SupportBundleCollectionInformer { return &supportBundleCollectionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/crd/v1alpha1/packetcapture.go b/pkg/client/informers/externalversions/crd/v1alpha1/packetcapture.go new file mode 100644 index 00000000000..1995048a4c9 --- /dev/null +++ b/pkg/client/informers/externalversions/crd/v1alpha1/packetcapture.go @@ -0,0 +1,87 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + versioned "antrea.io/antrea/pkg/client/clientset/versioned" + internalinterfaces "antrea.io/antrea/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "antrea.io/antrea/pkg/client/listers/crd/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PacketCaptureInformer provides access to a shared informer and lister for +// PacketCaptures. +type PacketCaptureInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PacketCaptureLister +} + +type packetCaptureInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPacketCaptureInformer constructs a new informer for PacketCapture type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPacketCaptureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPacketCaptureInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPacketCaptureInformer constructs a new informer for PacketCapture type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPacketCaptureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CrdV1alpha1().PacketCaptures().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CrdV1alpha1().PacketCaptures().Watch(context.TODO(), options) + }, + }, + &crdv1alpha1.PacketCapture{}, + resyncPeriod, + indexers, + ) +} + +func (f *packetCaptureInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPacketCaptureInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *packetCaptureInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&crdv1alpha1.PacketCapture{}, f.defaultInformer) +} + +func (f *packetCaptureInformer) Lister() v1alpha1.PacketCaptureLister { + return v1alpha1.NewPacketCaptureLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 07c1d724cbc..d8325bbf33f 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -59,6 +59,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Crd().V1alpha1().ExternalNodes().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("nodelatencymonitors"): return &genericInformer{resource: resource.GroupResource(), informer: f.Crd().V1alpha1().NodeLatencyMonitors().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("packetcaptures"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Crd().V1alpha1().PacketCaptures().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("supportbundlecollections"): return &genericInformer{resource: resource.GroupResource(), informer: f.Crd().V1alpha1().SupportBundleCollections().Informer()}, nil diff --git a/pkg/client/listers/crd/v1alpha1/expansion_generated.go b/pkg/client/listers/crd/v1alpha1/expansion_generated.go index 6d1c92155c1..ebe5ff42e87 100644 --- a/pkg/client/listers/crd/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/crd/v1alpha1/expansion_generated.go @@ -32,6 +32,10 @@ type ExternalNodeNamespaceListerExpansion interface{} // NodeLatencyMonitorLister. type NodeLatencyMonitorListerExpansion interface{} +// PacketCaptureListerExpansion allows custom methods to be added to +// PacketCaptureLister. +type PacketCaptureListerExpansion interface{} + // SupportBundleCollectionListerExpansion allows custom methods to be added to // SupportBundleCollectionLister. type SupportBundleCollectionListerExpansion interface{} diff --git a/pkg/client/listers/crd/v1alpha1/packetcapture.go b/pkg/client/listers/crd/v1alpha1/packetcapture.go new file mode 100644 index 00000000000..d052771c46f --- /dev/null +++ b/pkg/client/listers/crd/v1alpha1/packetcapture.go @@ -0,0 +1,46 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// PacketCaptureLister helps list PacketCaptures. +// All objects returned here must be treated as read-only. +type PacketCaptureLister interface { + // List lists all PacketCaptures in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.PacketCapture, err error) + // Get retrieves the PacketCapture from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.PacketCapture, error) + PacketCaptureListerExpansion +} + +// packetCaptureLister implements the PacketCaptureLister interface. +type packetCaptureLister struct { + listers.ResourceIndexer[*v1alpha1.PacketCapture] +} + +// NewPacketCaptureLister returns a new PacketCaptureLister. +func NewPacketCaptureLister(indexer cache.Indexer) PacketCaptureLister { + return &packetCaptureLister{listers.New[*v1alpha1.PacketCapture](indexer, v1alpha1.Resource("packetcapture"))} +} diff --git a/pkg/controller/supportbundlecollection/controller.go b/pkg/controller/supportbundlecollection/controller.go index d7d10ec0bde..5a4dd81ee13 100644 --- a/pkg/controller/supportbundlecollection/controller.go +++ b/pkg/controller/supportbundlecollection/controller.go @@ -15,7 +15,6 @@ package supportbundlecollection import ( - "bytes" "context" "fmt" "reflect" @@ -45,6 +44,7 @@ import ( crdinformers "antrea.io/antrea/pkg/client/informers/externalversions/crd/v1alpha1" crdlisters "antrea.io/antrea/pkg/client/listers/crd/v1alpha1" "antrea.io/antrea/pkg/controller/types" + "antrea.io/antrea/pkg/util/ftp" "antrea.io/antrea/pkg/util/k8s" ) @@ -391,7 +391,7 @@ func (c *Controller) createInternalSupportBundleCollection(bundle *v1alpha1.Supp } nodeSpan := nodeNames.Union(externalNodeNames) // Get authentication from the Secret provided in authentication field in the CRD - authentication, err := c.parseBundleAuth(bundle.Spec.Authentication) + authentication, err := ftp.ParseBundleAuth(bundle.Spec.Authentication, c.kubeClient) if err != nil { klog.ErrorS(err, "Failed to get authentication defined in the SupportBundleCollection CR", "name", bundle.Name, "authentication", bundle.Spec.Authentication) return nil, err @@ -512,60 +512,6 @@ func (c *Controller) deleteInternalSupportBundleCollection(key string) error { return nil } -// parseBundleAuth returns the authentication from the Secret provided in BundleServerAuthConfiguration. -// The authentication is stored in the Secret Data with a key decided by the AuthType, and encoded using base64. -func (c *Controller) parseBundleAuth(authentication v1alpha1.BundleServerAuthConfiguration) (*controlplane.BundleServerAuthConfiguration, error) { - secretReference := authentication.AuthSecret - if secretReference == nil { - return nil, fmt.Errorf("authentication is not specified") - } - secret, err := c.kubeClient.CoreV1().Secrets(secretReference.Namespace).Get(context.TODO(), secretReference.Name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("unable to get Secret with name %s in Namespace %s: %v", secretReference.Name, secretReference.Namespace, err) - } - parseAuthValue := func(secretData map[string][]byte, key string) (string, error) { - authValue, found := secret.Data[key] - if !found { - return "", fmt.Errorf("not found authentication in Secret %s/%s with key %s", secretReference.Namespace, secretReference.Name, key) - } - return bytes.NewBuffer(authValue).String(), nil - } - switch authentication.AuthType { - case v1alpha1.APIKey: - value, err := parseAuthValue(secret.Data, secretKeyWithAPIKey) - if err != nil { - return nil, err - } - return &controlplane.BundleServerAuthConfiguration{ - APIKey: value, - }, nil - case v1alpha1.BearerToken: - value, err := parseAuthValue(secret.Data, secretKeyWithBearerToken) - if err != nil { - return nil, err - } - return &controlplane.BundleServerAuthConfiguration{ - BearerToken: value, - }, nil - case v1alpha1.BasicAuthentication: - username, err := parseAuthValue(secret.Data, secretKeyWithUsername) - if err != nil { - return nil, err - } - password, err := parseAuthValue(secret.Data, secretKeyWithPassword) - if err != nil { - return nil, err - } - return &controlplane.BundleServerAuthConfiguration{ - BasicAuthentication: &controlplane.BasicAuthentication{ - Username: username, - Password: password, - }, - }, nil - } - return nil, fmt.Errorf("unsupported authentication type %s", authentication.AuthType) -} - // addInternalSupportBundleCollection adds internalBundle into supportBundleCollectionStore, and creates a // supportBundleCollectionAppliedTo resource to maintain the SupportBundleCollection's required Nodes or ExternalNodes. func (c *Controller) addInternalSupportBundleCollection( diff --git a/pkg/controller/supportbundlecollection/controller_test.go b/pkg/controller/supportbundlecollection/controller_test.go index 298d8cefbea..ce20db48a39 100644 --- a/pkg/controller/supportbundlecollection/controller_test.go +++ b/pkg/controller/supportbundlecollection/controller_test.go @@ -664,111 +664,6 @@ type secretConfig struct { data map[string][]byte } -func TestParseBundleAuth(t *testing.T) { - ns := "ns-auth" - apiKey := testKeyString - token := testTokenString - usr := "user" - pwd := "pwd123456" - var secretObjects []runtime.Object - for _, s := range prepareSecrets(ns, []secretConfig{ - {name: "s1", data: map[string][]byte{secretKeyWithAPIKey: []byte(apiKey)}}, - {name: "s2", data: map[string][]byte{secretKeyWithBearerToken: []byte(token)}}, - {name: "s3", data: map[string][]byte{secretKeyWithUsername: []byte(usr), secretKeyWithPassword: []byte(pwd)}}, - {name: "invalid-base64", data: map[string][]byte{secretKeyWithAPIKey: []byte("invalid string to decode with base64")}}, - {name: "invalid-secret", data: map[string][]byte{"unknown": []byte(apiKey)}}, - }) { - secretObjects = append(secretObjects, s) - } - - testClient := newTestClient(secretObjects, nil) - controller := newController(testClient) - stopCh := make(chan struct{}) - testClient.start(stopCh) - - testClient.waitForSync(stopCh) - - for _, tc := range []struct { - authentication v1alpha1.BundleServerAuthConfiguration - expectedError string - expectedAuth *controlplane.BundleServerAuthConfiguration - }{ - { - authentication: v1alpha1.BundleServerAuthConfiguration{ - AuthType: v1alpha1.APIKey, - AuthSecret: &corev1.SecretReference{ - Namespace: ns, - Name: "s1", - }, - }, - expectedAuth: &controlplane.BundleServerAuthConfiguration{ - APIKey: testKeyString, - }, - }, - { - authentication: v1alpha1.BundleServerAuthConfiguration{ - AuthType: v1alpha1.BearerToken, - AuthSecret: &corev1.SecretReference{ - Namespace: ns, - Name: "s2", - }, - }, - expectedAuth: &controlplane.BundleServerAuthConfiguration{ - BearerToken: testTokenString, - }, - }, - { - authentication: v1alpha1.BundleServerAuthConfiguration{ - AuthType: v1alpha1.BasicAuthentication, - AuthSecret: &corev1.SecretReference{ - Namespace: ns, - Name: "s3", - }, - }, - expectedAuth: &controlplane.BundleServerAuthConfiguration{ - BasicAuthentication: &controlplane.BasicAuthentication{ - Username: usr, - Password: pwd, - }, - }, - }, - { - authentication: v1alpha1.BundleServerAuthConfiguration{ - AuthType: v1alpha1.BearerToken, - AuthSecret: &corev1.SecretReference{ - Namespace: ns, - Name: "invalid-secret", - }, - }, - expectedError: fmt.Sprintf("not found authentication in Secret %s/invalid-secret with key %s", ns, secretKeyWithBearerToken), - }, - { - authentication: v1alpha1.BundleServerAuthConfiguration{ - AuthType: v1alpha1.BearerToken, - AuthSecret: &corev1.SecretReference{ - Namespace: ns, - Name: "not-exist", - }, - }, - expectedError: fmt.Sprintf("unable to get Secret with name not-exist in Namespace %s", ns), - }, - { - authentication: v1alpha1.BundleServerAuthConfiguration{ - AuthType: v1alpha1.APIKey, - AuthSecret: nil, - }, - expectedError: "authentication is not specified", - }, - } { - auth, err := controller.parseBundleAuth(tc.authentication) - if tc.expectedError != "" { - assert.Contains(t, err.Error(), tc.expectedError) - } else { - assert.Equal(t, tc.expectedAuth, auth) - } - } -} - func TestCreateAndDeleteInternalSupportBundleCollection(t *testing.T) { coreObjects, crdObjects := prepareTopology() testClient := newTestClient(coreObjects, crdObjects) diff --git a/pkg/features/antrea_features.go b/pkg/features/antrea_features.go index 8dc612a9340..0c8086f4c8b 100644 --- a/pkg/features/antrea_features.go +++ b/pkg/features/antrea_features.go @@ -73,6 +73,10 @@ const ( // Allows to trace path from a generated packet. Traceflow featuregate.Feature = "Traceflow" + // alpha: v2.2 + // Allows to capture packets for a flow. + PacketCapture featuregate.Feature = "PacketCapture" + // alpha: v0.9 // Flow exporter exports IPFIX flow records of Antrea flows seen in conntrack module. FlowExporter featuregate.Feature = "FlowExporter" @@ -196,6 +200,7 @@ var ( ServiceTrafficDistribution: {Default: true, PreRelease: featuregate.Beta}, CleanupStaleUDPSvcConntrack: {Default: true, PreRelease: featuregate.Beta}, Traceflow: {Default: true, PreRelease: featuregate.Beta}, + PacketCapture: {Default: false, PreRelease: featuregate.Alpha}, AntreaIPAM: {Default: false, PreRelease: featuregate.Alpha}, FlowExporter: {Default: false, PreRelease: featuregate.Alpha}, NetworkPolicyStats: {Default: true, PreRelease: featuregate.Beta}, @@ -244,6 +249,7 @@ var ( SupportBundleCollection, TopologyAwareHints, Traceflow, + PacketCapture, TrafficControl, EgressTrafficShaping, EgressSeparateSubnet, @@ -301,6 +307,7 @@ var ( NodeNetworkPolicy: {}, L7FlowExporter: {}, NodeLatencyMonitor: {}, + PacketCapture: {}, } // supportedFeaturesOnExternalNode records the features supported on an external // Node. Antrea Agent checks the enabled features if it is running on an diff --git a/pkg/util/ftp/auth.go b/pkg/util/ftp/auth.go new file mode 100644 index 00000000000..2900c0e4812 --- /dev/null +++ b/pkg/util/ftp/auth.go @@ -0,0 +1,102 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ftp + +import ( + "bytes" + "context" + "fmt" + "time" + + "golang.org/x/crypto/ssh" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + + "antrea.io/antrea/pkg/apis/controlplane" + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" +) + +const ( + secretKeyWithAPIKey = "apikey" + secretKeyWithBearerToken = "token" + secretKeyWithUsername = "username" + secretKeyWithPassword = "password" +) + +// GenSSHClientConfig generates ssh.ClientConfig from username and password +func GenSSHClientConfig(username, password string) *ssh.ClientConfig { + cfg := &ssh.ClientConfig{ + User: username, + Auth: []ssh.AuthMethod{ssh.Password(password)}, + // #nosec G106: skip host key check here and users can specify their own checks if needed + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + Timeout: time.Second, + } + return cfg +} + +// ParseBundleAuth returns the authentication from the Secret provided in BundleServerAuthConfiguration. +// The authentication is stored in the Secret Data with a key decided by the AuthType, and encoded using base64. +func ParseBundleAuth(authentication crdv1alpha1.BundleServerAuthConfiguration, kubeClient clientset.Interface) (*controlplane.BundleServerAuthConfiguration, error) { + secretReference := authentication.AuthSecret + if secretReference == nil { + return nil, fmt.Errorf("authentication is not specified") + } + secret, err := kubeClient.CoreV1().Secrets(secretReference.Namespace).Get(context.TODO(), secretReference.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("unable to get Secret with name %s in Namespace %s: %v", secretReference.Name, secretReference.Namespace, err) + } + parseAuthValue := func(secretData map[string][]byte, key string) (string, error) { + authValue, found := secret.Data[key] + if !found { + return "", fmt.Errorf("not found authentication in Secret %s/%s with key %s", secretReference.Namespace, secretReference.Name, key) + } + return bytes.NewBuffer(authValue).String(), nil + } + switch authentication.AuthType { + case crdv1alpha1.APIKey: + value, err := parseAuthValue(secret.Data, secretKeyWithAPIKey) + if err != nil { + return nil, err + } + return &controlplane.BundleServerAuthConfiguration{ + APIKey: value, + }, nil + case crdv1alpha1.BearerToken: + value, err := parseAuthValue(secret.Data, secretKeyWithBearerToken) + if err != nil { + return nil, err + } + return &controlplane.BundleServerAuthConfiguration{ + BearerToken: value, + }, nil + case crdv1alpha1.BasicAuthentication: + username, err := parseAuthValue(secret.Data, secretKeyWithUsername) + if err != nil { + return nil, err + } + password, err := parseAuthValue(secret.Data, secretKeyWithPassword) + if err != nil { + return nil, err + } + return &controlplane.BundleServerAuthConfiguration{ + BasicAuthentication: &controlplane.BasicAuthentication{ + Username: username, + Password: password, + }, + }, nil + } + return nil, fmt.Errorf("unsupported authentication type %s", authentication.AuthType) +} diff --git a/pkg/util/ftp/auth_test.go b/pkg/util/ftp/auth_test.go new file mode 100644 index 00000000000..f2d4a3dc0bf --- /dev/null +++ b/pkg/util/ftp/auth_test.go @@ -0,0 +1,183 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ftp + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + + "antrea.io/antrea/pkg/apis/controlplane" + "antrea.io/antrea/pkg/apis/crd/v1alpha1" +) + +const ( + informerDefaultResync = 30 * time.Second + + testKeyString = "it is a valid API key" + testTokenString = "it is a valid token" +) + +type secretConfig struct { + name string + data map[string][]byte +} + +func prepareSecrets(ns string, secretConfigs []secretConfig) []*corev1.Secret { + secrets := make([]*corev1.Secret, 0) + for _, s := range secretConfigs { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.name, + Namespace: ns, + }, + Data: s.data, + } + secrets = append(secrets, secret) + } + return secrets +} + +type testClient struct { + client kubernetes.Interface + informerFactory informers.SharedInformerFactory +} + +func (c *testClient) start(stopCh <-chan struct{}) { + c.informerFactory.Start(stopCh) +} + +func (c *testClient) waitForSync(stopCh <-chan struct{}) { + c.informerFactory.WaitForCacheSync(stopCh) +} + +func newTestClient(coreObjects []runtime.Object, crdObjects []runtime.Object) *testClient { + client := fake.NewSimpleClientset(coreObjects...) + return &testClient{ + client: client, + informerFactory: informers.NewSharedInformerFactory(client, informerDefaultResync), + } +} + +func TestParseBundleAuth(t *testing.T) { + ns := "ns-auth" + apiKey := testKeyString + token := testTokenString + usr := "user" + pwd := "pwd123456" + var secretObjects []runtime.Object + for _, s := range prepareSecrets(ns, []secretConfig{ + {name: "s1", data: map[string][]byte{secretKeyWithAPIKey: []byte(apiKey)}}, + {name: "s2", data: map[string][]byte{secretKeyWithBearerToken: []byte(token)}}, + {name: "s3", data: map[string][]byte{secretKeyWithUsername: []byte(usr), secretKeyWithPassword: []byte(pwd)}}, + {name: "invalid-base64", data: map[string][]byte{secretKeyWithAPIKey: []byte("invalid string to decode with base64")}}, + {name: "invalid-secret", data: map[string][]byte{"unknown": []byte(apiKey)}}, + }) { + secretObjects = append(secretObjects, s) + } + + testClient := newTestClient(secretObjects, nil) + stopCh := make(chan struct{}) + testClient.start(stopCh) + testClient.waitForSync(stopCh) + + for _, tc := range []struct { + authentication v1alpha1.BundleServerAuthConfiguration + expectedError string + expectedAuth *controlplane.BundleServerAuthConfiguration + }{ + { + authentication: v1alpha1.BundleServerAuthConfiguration{ + AuthType: v1alpha1.APIKey, + AuthSecret: &corev1.SecretReference{ + Namespace: ns, + Name: "s1", + }, + }, + expectedAuth: &controlplane.BundleServerAuthConfiguration{ + APIKey: testKeyString, + }, + }, + { + authentication: v1alpha1.BundleServerAuthConfiguration{ + AuthType: v1alpha1.BearerToken, + AuthSecret: &corev1.SecretReference{ + Namespace: ns, + Name: "s2", + }, + }, + expectedAuth: &controlplane.BundleServerAuthConfiguration{ + BearerToken: testTokenString, + }, + }, + { + authentication: v1alpha1.BundleServerAuthConfiguration{ + AuthType: v1alpha1.BasicAuthentication, + AuthSecret: &corev1.SecretReference{ + Namespace: ns, + Name: "s3", + }, + }, + expectedAuth: &controlplane.BundleServerAuthConfiguration{ + BasicAuthentication: &controlplane.BasicAuthentication{ + Username: usr, + Password: pwd, + }, + }, + }, + { + authentication: v1alpha1.BundleServerAuthConfiguration{ + AuthType: v1alpha1.BearerToken, + AuthSecret: &corev1.SecretReference{ + Namespace: ns, + Name: "invalid-secret", + }, + }, + expectedError: fmt.Sprintf("not found authentication in Secret %s/invalid-secret with key %s", ns, secretKeyWithBearerToken), + }, + { + authentication: v1alpha1.BundleServerAuthConfiguration{ + AuthType: v1alpha1.BearerToken, + AuthSecret: &corev1.SecretReference{ + Namespace: ns, + Name: "not-exist", + }, + }, + expectedError: fmt.Sprintf("unable to get Secret with name not-exist in Namespace %s", ns), + }, + { + authentication: v1alpha1.BundleServerAuthConfiguration{ + AuthType: v1alpha1.APIKey, + AuthSecret: nil, + }, + expectedError: "authentication is not specified", + }, + } { + auth, err := ParseBundleAuth(tc.authentication, testClient.client) + if tc.expectedError != "" { + assert.Contains(t, err.Error(), tc.expectedError) + } else { + assert.Equal(t, tc.expectedAuth, auth) + } + } +} diff --git a/pkg/util/ftp/ftp.go b/pkg/util/ftp/ftp.go new file mode 100644 index 00000000000..f1e8fb2ea1d --- /dev/null +++ b/pkg/util/ftp/ftp.go @@ -0,0 +1,106 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ftp + +import ( + "fmt" + "io" + "net/url" + "path" + "time" + + "github.com/pkg/sftp" + "github.com/spf13/afero" + "golang.org/x/crypto/ssh" + "k8s.io/klog/v2" +) + +const ( + uploadToFileServerTries = 5 + uploadToFileServerRetryDelay = 5 * time.Second +) + +func ParseFTPUploadUrl(uploadUrl string) (*url.URL, error) { + parsedURL, err := url.Parse(uploadUrl) + if err != nil { + return nil, err + } + if parsedURL.Scheme != "sftp" { + return nil, fmt.Errorf("not sftp protocol") + } + return parsedURL, nil +} + +type Uploader interface { + // Upload uploads a file to the target sftp address using ssh config. + Upload(url string, fileName string, config *ssh.ClientConfig, outputFile afero.File) error +} + +type SftpUploader struct { +} + +func (uploader *SftpUploader) Upload(url string, fileName string, config *ssh.ClientConfig, outputFile afero.File) error { + if _, err := outputFile.Seek(0, 0); err != nil { + return fmt.Errorf("failed to upload to file server while setting offset: %v", err) + } + // url should be like: 10.92.23.154:22/path or sftp://10.92.23.154:22/path + parsedURL, _ := ParseFTPUploadUrl(url) + joinedPath := path.Join(parsedURL.Path, fileName) + + triesLeft := uploadToFileServerTries + var uploadErr error + for triesLeft > 0 { + if uploadErr = upload(parsedURL.Host, joinedPath, config, outputFile); uploadErr == nil { + return nil + } + triesLeft-- + if triesLeft == 0 { + return fmt.Errorf("failed to upload file after %d attempts", uploadToFileServerTries) + } + klog.InfoS("Failed to upload file", "UploadError", uploadErr, "TriesLeft", triesLeft) + time.Sleep(uploadToFileServerRetryDelay) + } + return nil +} + +func upload(address string, path string, config *ssh.ClientConfig, file io.Reader) error { + conn, err := ssh.Dial("tcp", address, config) + if err != nil { + return fmt.Errorf("error when connecting to fs server: %w", err) + } + sftpClient, err := sftp.NewClient(conn) + if err != nil { + return fmt.Errorf("error when setting up sftp client: %w", err) + } + defer func() { + if err := sftpClient.Close(); err != nil { + klog.ErrorS(err, "Error when closing sftp client") + } + }() + targetFile, err := sftpClient.Create(path) + if err != nil { + return fmt.Errorf("error when creating target file on remote: %v", err) + } + defer func() { + if err := targetFile.Close(); err != nil { + klog.ErrorS(err, "Error when closing target file on remote") + } + }() + if written, err := io.Copy(targetFile, file); err != nil { + return fmt.Errorf("error when copying target file: %v, written: %d", err, written) + } + klog.InfoS("Successfully upload file to path", "filePath", path) + return nil +} diff --git a/pkg/util/ftp/ftp_test.go b/pkg/util/ftp/ftp_test.go new file mode 100644 index 00000000000..df70dc7a4d8 --- /dev/null +++ b/pkg/util/ftp/ftp_test.go @@ -0,0 +1,58 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ftp + +import ( + "net/url" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseFTPUploadUrl(t *testing.T) { + cases := []struct { + url string + expectedError string + expectedURL url.URL + }{ + { + url: "sftp://127.0.0.1:22/path", + expectedURL: url.URL{ + Scheme: "sftp", + Host: "127.0.0.1:22", + Path: "/path", + }, + }, + { + url: "https://127.0.0.1:22/root/supportbundle", + expectedError: "not sftp protocol", + }, + } + + for _, tc := range cases { + uploadUrl, err := ParseFTPUploadUrl(tc.url) + if tc.expectedError == "" { + assert.NoError(t, err) + if !reflect.DeepEqual(tc.expectedURL, *uploadUrl) { + t.Errorf("expected %v, got %v", tc.expectedURL, *uploadUrl) + + } + } else { + assert.Equal(t, tc.expectedError, err.Error()) + } + } + +} diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 6a8af13ad1f..e29aee501d5 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -682,7 +682,7 @@ func (data *TestData) collectClusterInfo() error { podCIDRs, err := retrieveCIDRs("kubectl cluster-info dump | grep cluster-cidr", `cluster-cidr=([^"]+)`) if err != nil { // Retrieve cluster CIDRs for Rancher clusters. - podCIDRs, err = retrieveCIDRs("ps aux | grep kube-controller | grep cluster-cidr", `cluster-cidr=([^\s]+)`) + podCIDRs, err = retrieveCIDRs("pc aux | grep kube-controller | grep cluster-cidr", `cluster-cidr=([^\s]+)`) if err != nil { return err } @@ -694,7 +694,7 @@ func (data *TestData) collectClusterInfo() error { svcCIDRs, err := retrieveCIDRs("kubectl cluster-info dump | grep service-cluster-ip-range", `service-cluster-ip-range=([^"]+)`) if err != nil { // Retrieve service CIDRs for Rancher clusters. - svcCIDRs, err = retrieveCIDRs("ps aux | grep kube-controller | grep service-cluster-ip-range", `service-cluster-ip-range=([^\s]+)`) + svcCIDRs, err = retrieveCIDRs("pc aux | grep kube-controller | grep service-cluster-ip-range", `service-cluster-ip-range=([^\s]+)`) if err != nil { return err } diff --git a/test/e2e/packetcapture_test.go b/test/e2e/packetcapture_test.go new file mode 100644 index 00000000000..297e742177d --- /dev/null +++ b/test/e2e/packetcapture_test.go @@ -0,0 +1,682 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + agentconfig "antrea.io/antrea/pkg/config/agent" + controllerconfig "antrea.io/antrea/pkg/config/controller" + "antrea.io/antrea/pkg/features" +) + +var ( + pcSecretNamespace = "kube-system" + // #nosec G101 + pcSecretName = "antrea-packetcapture-fileserver-auth" + tcpServerPodName = "tcp-server" + pcToolboxPodName = "toolbox" + udpServerPodName = "udp-server" + nonExistPodName = "non-existing-pod" + dstServiceName = "svc" + dstServiceIP = "" + + tcpProto = intstr.FromInt(6) + icmpProto = intstr.FromInt(1) + udpProto = intstr.FromInt(17) + icmp6Proto = intstr.FromInt(58) + + testServerPort int32 = 80 +) + +type pcTestCase struct { + name string + pc *crdv1alpha1.PacketCapture + expectedPhase crdv1alpha1.PacketCapturePhase + expectedReason string + expectedNum int32 + // required IP version, skip if not match. + ipVersion int + // Source Pod to run ping for live-traffic PacketCapture. + srcPod string +} + +func genSFTPService() *v1.Service { + selector := map[string]string{"app": "sftp"} + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sftp", + Labels: selector, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: selector, + Ports: []v1.ServicePort{ + { + Port: 22, + TargetPort: intstr.FromInt32(22), + NodePort: 30010, + }, + }, + }, + } +} + +func genSFTPDeployment() *appsv1.Deployment { + replicas := int32(1) + selector := map[string]string{"app": "sftp"} + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sftp", + Labels: selector, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: selector, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sftp", + Labels: selector, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "sftp", + Image: "antrea/sftp", + ImagePullPolicy: v1.PullIfNotPresent, + Args: []string{"foo:pass:::upload"}, + }, + }, + }, + }, + }, + } +} + +func createUDPServerPod(name string, ns string, portNum int32, serverNode string) error { + port := v1.ContainerPort{Name: fmt.Sprintf("port-%d", portNum), ContainerPort: portNum} + return NewPodBuilder(name, ns, agnhostImage). + OnNode(serverNode). + WithContainerName("agnhost"). + WithArgs([]string{"serve-hostname", "--udp", "--http=false", "--port", fmt.Sprint(portNum)}). + WithPorts([]v1.ContainerPort{port}). + Create(testData) +} + +// TestPacketCapture is the top-level test which contains all subtests for +// PacketCapture related test cases, so they can share setup, teardown. +func TestPacketCapture(t *testing.T) { + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + var previousAgentPacketCaptureEnableState bool + var previousControllerPacketCaptureEnableState bool + + ac := func(config *agentconfig.AgentConfig) { + previousAgentPacketCaptureEnableState = config.FeatureGates[string(features.PacketCapture)] + config.FeatureGates[string(features.PacketCapture)] = true + } + cc := func(config *controllerconfig.ControllerConfig) { + previousControllerPacketCaptureEnableState = config.FeatureGates[string(features.PacketCapture)] + config.FeatureGates[string(features.PacketCapture)] = true + } + if err := data.mutateAntreaConfigMap(cc, ac, true, true); err != nil { + t.Fatalf("Failed to enable PacketCapture flag: %v", err) + } + defer func() { + ac := func(config *agentconfig.AgentConfig) { + config.FeatureGates[string(features.PacketCapture)] = previousAgentPacketCaptureEnableState + } + cc := func(config *controllerconfig.ControllerConfig) { + config.FeatureGates[string(features.PacketCapture)] = previousControllerPacketCaptureEnableState + } + if err := data.mutateAntreaConfigMap(cc, ac, true, true); err != nil { + t.Errorf("Failed to disable PacketCapture flag: %v", err) + } + }() + + // setup sftp server for test. + secretUserName := "foo" + secretPassword := "pass" + _, err = data.clientset.AppsV1().Deployments(data.testNamespace).Create(context.TODO(), genSFTPDeployment(), metav1.CreateOptions{}) + require.NoError(t, err) + _, err = data.clientset.CoreV1().Services(data.testNamespace).Create(context.TODO(), genSFTPService(), metav1.CreateOptions{}) + require.NoError(t, err) + failOnError(data.waitForDeploymentReady(t, data.testNamespace, "sftp", defaultTimeout), t) + + sec := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pcSecretName, + Namespace: pcSecretNamespace, + }, + Data: map[string][]byte{ + "username": []byte(secretUserName), + "password": []byte(secretPassword), + }, + } + _, err = data.clientset.CoreV1().Secrets(pcSecretNamespace).Create(context.TODO(), sec, metav1.CreateOptions{}) + require.NoError(t, err) + defer data.clientset.CoreV1().Secrets(pcSecretNamespace).Delete(context.TODO(), pcSecretName, metav1.DeleteOptions{}) + + t.Run("testPacketCaptureBasic", func(t *testing.T) { + testPacketCaptureBasic(t, data) + }) + t.Run("testPacketCapture", func(t *testing.T) { + testPacketCapture(t, data) + }) +} + +func testPacketCapture(t *testing.T, data *TestData) { + nodeIdx := 0 + if len(clusterInfo.windowsNodes) != 0 { + nodeIdx = clusterInfo.windowsNodes[0] + } + node1 := nodeName(nodeIdx) + + err := data.createServerPodWithLabels(tcpServerPodName, data.testNamespace, serverPodPort, nil) + require.NoError(t, err) + err = data.createToolboxPodOnNode(pcToolboxPodName, data.testNamespace, node1, false) + require.NoError(t, err) + + svc, cleanup := data.createAgnhostServiceAndBackendPods(t, dstServiceName, data.testNamespace, node1, v1.ServiceTypeClusterIP) + defer cleanup() + t.Logf("%s Service is ready", dstServiceName) + dstServiceIP = svc.Spec.ClusterIP + + podIPs := waitForPodIPs(t, data, []PodInfo{ + {tcpServerPodName, getOSString(), "", data.testNamespace}, + {pcToolboxPodName, getOSString(), "", data.testNamespace}, + }) + + // Give a little time for Windows containerd Nodes to set up OVS. + // Containerd configures port asynchronously, which could cause execution time of installing flow longer than docker. + time.Sleep(time.Second * 1) + + tcpServerPodIP := podIPs[tcpServerPodName].IPv4.String() + + testcases := []pcTestCase{ + { + name: "to-ipv4-ip", + ipVersion: 4, + srcPod: pcToolboxPodName, + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, pcToolboxPodName, data.testNamespace, tcpServerPodName)), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: pcToolboxPodName, + }, + }, + Destination: crdv1alpha1.Destination{ + IP: &tcpServerPodIP, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &tcpProto, + IPFamily: v1.IPv4Protocol, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + DstPort: &testServerPort, + }, + }, + }, + }, + }, + + expectedPhase: crdv1alpha1.PacketCaptureSucceeded, + expectedNum: 5, + }, + { + name: "to-svc", + ipVersion: 4, + srcPod: pcToolboxPodName, + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, pcToolboxPodName, data.testNamespace, tcpServerPodName)), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: pcToolboxPodName, + }, + }, + Destination: crdv1alpha1.Destination{ + Service: &crdv1alpha1.ServiceReference{ + Name: dstServiceName, + Namespace: data.testNamespace, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &tcpProto, + IPFamily: v1.IPv4Protocol, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + DstPort: &testServerPort, + }, + }, + }, + }, + }, + + expectedPhase: crdv1alpha1.PacketCaptureSucceeded, + expectedNum: 5, + }, + } + t.Run("testPacketCapture", func(t *testing.T) { + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + runPacketCaptureTest(t, data, tc) + }) + } + }) + +} + +// testPacketCaptureTCP verifies if PacketCapture can capture tcp packets. this function only contains basic +// cases with pod-to-pod. +func testPacketCaptureBasic(t *testing.T, data *TestData) { + nodeIdx := 0 + if len(clusterInfo.windowsNodes) != 0 { + nodeIdx = clusterInfo.windowsNodes[0] + } + node1 := nodeName(nodeIdx) + + node1Pods, _, _ := createTestAgnhostPods(t, data, 3, data.testNamespace, node1) + err := createUDPServerPod(udpServerPodName, data.testNamespace, serverPodPort, node1) + defer data.DeletePodAndWait(defaultTimeout, udpServerPodName, data.testNamespace) + require.NoError(t, err) + // test tcp server pod + err = data.createServerPodWithLabels(tcpServerPodName, data.testNamespace, serverPodPort, nil) + defer data.DeletePodAndWait(defaultTimeout, tcpServerPodName, data.testNamespace) + require.NoError(t, err) + err = data.createToolboxPodOnNode(pcToolboxPodName, data.testNamespace, node1, false) + defer data.DeletePodAndWait(defaultTimeout, pcToolboxPodName, data.testNamespace) + require.NoError(t, err) + + // Give a little time for Windows containerd Nodes to set up OVS. + // Containerd configures port asynchronously, which could cause execution time of installing flow longer than docker. + time.Sleep(time.Second * 1) + + testcases := []pcTestCase{ + { + name: "ipv4-tcp", + ipVersion: 4, + srcPod: pcToolboxPodName, + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, pcToolboxPodName, data.testNamespace, tcpServerPodName)), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: pcToolboxPodName, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: tcpServerPodName, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &tcpProto, + IPFamily: v1.IPv4Protocol, + TransportHeader: crdv1alpha1.TransportHeader{ + TCP: &crdv1alpha1.TCPHeader{ + DstPort: &testServerPort, + }, + }, + }, + }, + }, + expectedPhase: crdv1alpha1.PacketCaptureSucceeded, + expectedNum: 5, + }, + { + name: "ipv4-udp", + ipVersion: 4, + srcPod: pcToolboxPodName, + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, pcToolboxPodName, data.testNamespace, udpServerPodName)), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: pcToolboxPodName, + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: udpServerPodName, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &udpProto, + IPFamily: v1.IPv4Protocol, + TransportHeader: crdv1alpha1.TransportHeader{ + UDP: &crdv1alpha1.UDPHeader{ + DstPort: &testServerPort, + }, + }, + }, + }, + }, + expectedPhase: crdv1alpha1.PacketCaptureSucceeded, + expectedNum: 5, + }, + { + name: "ipv4-icmp", + ipVersion: 4, + srcPod: node1Pods[0], + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[1])), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: node1Pods[0], + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: node1Pods[1], + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + Packet: &crdv1alpha1.Packet{ + Protocol: &icmpProto, + IPFamily: v1.IPv4Protocol, + }, + }, + }, + expectedPhase: crdv1alpha1.PacketCaptureSucceeded, + expectedNum: 5, + }, + { + name: "ipv6-icmp", + ipVersion: 6, + srcPod: node1Pods[0], + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-ipv6", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[1])), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: node1Pods[0], + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: node1Pods[1], + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + Packet: &crdv1alpha1.Packet{ + IPFamily: v1.IPv6Protocol, + Protocol: &icmp6Proto, + }, + }, + }, + expectedPhase: crdv1alpha1.PacketCaptureSucceeded, + expectedNum: 5, + }, + { + + name: "non-exist-pod", + ipVersion: 4, + srcPod: node1Pods[0], + pc: &crdv1alpha1.PacketCapture{ + ObjectMeta: metav1.ObjectMeta{ + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, nonExistPodName)), + }, + Spec: crdv1alpha1.PacketCaptureSpec{ + Source: crdv1alpha1.Source{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: node1Pods[0], + }, + }, + Destination: crdv1alpha1.Destination{ + Pod: &crdv1alpha1.PodReference{ + Namespace: data.testNamespace, + Name: nonExistPodName, + }, + }, + CaptureConfig: crdv1alpha1.CaptureConfig{ + FirstN: &crdv1alpha1.PacketCaptureFirstNConfig{ + Number: 5, + }, + }, + FileServer: &crdv1alpha1.BundleFileServer{ + URL: fmt.Sprintf("sftp://%s:30010/upload", controlPlaneNodeIPv4()), + }, + }, + }, + expectedPhase: crdv1alpha1.PacketCaptureFailed, + expectedReason: fmt.Sprintf("Node: %s, Error: failed to get the destination pod %s/%s: pods \"%s\" not found", node1, data.testNamespace, nonExistPodName, nonExistPodName), + }, + } + t.Run("testPacketCaptureBasic", func(t *testing.T) { + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + runPacketCaptureTest(t, data, tc) + }) + } + }) +} + +func getOSString() string { + if len(clusterInfo.windowsNodes) != 0 { + return "windows" + } else { + return "linux" + } +} + +func runPacketCaptureTest(t *testing.T, data *TestData, tc pcTestCase) { + switch tc.ipVersion { + case 4: + skipIfNotIPv4Cluster(t) + case 6: + skipIfNotIPv6Cluster(t) + } + // wait for toolbox + waitForPodIPs(t, data, []PodInfo{{pcToolboxPodName, getOSString(), "", data.testNamespace}}) + + dstPodName := "" + if tc.pc.Spec.Destination.Pod != nil { + dstPodName = tc.pc.Spec.Destination.Pod.Name + } + var dstPodIPs *PodIPs + if dstPodName != nonExistPodName && dstPodName != "" { + // wait for pods to be ready first , or the pc will skip install flow + podIPs := waitForPodIPs(t, data, []PodInfo{{dstPodName, getOSString(), "", data.testNamespace}}) + dstPodIPs = podIPs[dstPodName] + } + + if _, err := data.crdClient.CrdV1alpha1().PacketCaptures().Create(context.TODO(), tc.pc, metav1.CreateOptions{}); err != nil { + t.Fatalf("Error when creating PacketCapture: %v", err) + } + defer func() { + if err := data.crdClient.CrdV1alpha1().PacketCaptures().Delete(context.TODO(), tc.pc.Name, metav1.DeleteOptions{}); err != nil { + t.Errorf("Error when deleting PacketCapture: %v", err) + } + }() + + if dstPodName != nonExistPodName { + srcPod := tc.srcPod + if dstIP := tc.pc.Spec.Destination.IP; dstIP != nil { + ip := net.ParseIP(*dstIP) + if ip.To4() != nil { + dstPodIPs = &PodIPs{IPv4: &ip} + } else { + dstPodIPs = &PodIPs{IPv6: &ip} + } + } else if tc.pc.Spec.Destination.Service != nil { + ip := net.ParseIP(dstServiceIP) + if ip.To4() != nil { + dstPodIPs = &PodIPs{IPv4: &ip} + } else { + dstPodIPs = &PodIPs{IPv6: &ip} + } + } + // Give a little time for Nodes to install OVS flows. + time.Sleep(time.Second * 2) + protocol := tc.pc.Spec.Packet.Protocol.IntVal + server := dstPodIPs.IPv4.String() + if tc.ipVersion == 6 { + server = dstPodIPs.IPv6.String() + } + // Send an ICMP echo packet from the source Pod to the destination. + if protocol == protocolICMP || protocol == protocolICMPv6 { + if err := data.RunPingCommandFromTestPod(PodInfo{srcPod, getOSString(), "", data.testNamespace}, + data.testNamespace, dstPodIPs, agnhostContainerName, 10, 0, false); err != nil { + t.Logf("Ping(%d) '%s' -> '%v' failed: ERROR (%v)", protocol, srcPod, *dstPodIPs, err) + } + } else if protocol == protocolTCP { + for i := 1; i <= 5; i++ { + if err := data.runNetcatCommandFromTestPodWithProtocol(tc.srcPod, data.testNamespace, toolboxContainerName, server, serverPodPort, "tcp"); err != nil { + t.Logf("Netcat(TCP) '%s' -> '%v' failed: ERROR (%v)", srcPod, server, err) + } + } + } else if protocol == protocolUDP { + for i := 1; i <= 5; i++ { + if err := data.runNetcatCommandFromTestPodWithProtocol(tc.srcPod, data.testNamespace, toolboxContainerName, server, serverPodPort, "udp"); err != nil { + t.Logf("Netcat(UDP) '%s' -> '%v' failed: ERROR (%v)", srcPod, server, err) + } + } + } + } + + pc, err := data.waitForPacketCapture(t, tc.pc.Name, tc.expectedPhase) + if err != nil { + t.Fatalf("Error: Get PacketCapture failed: %v", err) + } + if tc.expectedPhase == crdv1alpha1.PacketCaptureFailed { + if pc.Status.Reason != tc.expectedReason { + t.Fatalf("Error: PacketCapture Error Reason should be %v, but got %s", tc.expectedReason, pc.Status.Reason) + } + } + captured := pc.Status.NumCapturedPackets + if captured == nil || *captured != tc.expectedNum { + got := "nil" + if captured != nil { + got = string(*captured) + } + if captured != nil || tc.expectedNum != 0 { + t.Fatalf("Error: PacketCapture captured packets count should be %v, but got %v", tc.expectedNum, got) + } + } + +} + +func (data *TestData) waitForPacketCapture(t *testing.T, name string, phase crdv1alpha1.PacketCapturePhase) (*crdv1alpha1.PacketCapture, error) { + var pc *crdv1alpha1.PacketCapture + var err error + timeout := 15 * time.Second + if err = wait.PollUntilContextTimeout(context.Background(), defaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + pc, err = data.crdClient.CrdV1alpha1().PacketCaptures().Get(ctx, name, metav1.GetOptions{}) + if err != nil || pc.Status.Phase != phase { + return false, nil + } + return true, nil + }); err != nil { + if pc != nil { + t.Errorf("Latest PacketCapture status: %s %v", pc.Name, pc.Status) + } + return nil, err + } + return pc, nil +} diff --git a/test/integration/agent/openflow_test.go b/test/integration/agent/openflow_test.go index a6d7fe72582..4732972c9aa 100644 --- a/test/integration/agent/openflow_test.go +++ b/test/integration/agent/openflow_test.go @@ -120,7 +120,7 @@ func TestConnectivityFlows(t *testing.T) { antrearuntime.WindowsOS = runtime.GOOS } - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) defer func() { @@ -176,7 +176,7 @@ func TestAntreaFlexibleIPAMConnectivityFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, true, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, true, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) defer func() { @@ -239,7 +239,7 @@ func TestReplayFlowsConnectivityFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -281,7 +281,7 @@ func TestReplayFlowsNetworkPolicyFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -466,7 +466,7 @@ func TestNetworkPolicyFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -580,7 +580,7 @@ func TestIPv6ConnectivityFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -621,7 +621,7 @@ func TestProxyServiceFlowsAntreaPolicyDisabled(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -711,7 +711,7 @@ func TestProxyServiceFlowsAntreaPoilcyEnabled(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -1793,7 +1793,7 @@ func testEgressMarkFlows(t *testing.T, trafficShaping bool) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, true, trafficShaping, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, true, trafficShaping, false, false, false, false, false, false, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -1850,7 +1850,7 @@ func TestTrafficControlFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, false, false, false, false, false, false, false, true, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, false, false, false, false, false, false, false, true, false, false, groupIDAllocator, false, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br))