diff --git a/ci/kind/test-e2e-kind.sh b/ci/kind/test-e2e-kind.sh index 43301936b6a..19f9913e753 100755 --- a/ci/kind/test-e2e-kind.sh +++ b/ci/kind/test-e2e-kind.sh @@ -201,7 +201,7 @@ COMMON_IMAGES_LIST=("registry.k8s.io/e2e-test-images/agnhost:2.29" \ "projects.registry.vmware.com/antrea/nginx:1.21.6-alpine" \ "projects.registry.vmware.com/antrea/toolbox:1.1-0") -FLOW_VISIBILITY_IMAGE_LIST=("projects.registry.vmware.com/antrea/ipfix-collector:v0.6.2" \ +FLOW_VISIBILITY_IMAGE_LIST=("projects.registry.vmware.com/antrea/ipfix-collector:v0.8.2" \ "projects.registry.vmware.com/antrea/clickhouse-operator:0.21.0" \ "projects.registry.vmware.com/antrea/metrics-exporter:0.21.0" \ "projects.registry.vmware.com/antrea/clickhouse-server:23.4") diff --git a/go.mod b/go.mod index cf54ac7e22f..04d22eb8a4a 100644 --- a/go.mod +++ b/go.mod @@ -49,17 +49,17 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 - github.com/ti-mo/conntrack v0.4.0 - github.com/vishvananda/netlink v1.1.1-0.20211101163509-b10eb8fe5cf6 - github.com/vmware/go-ipfix v0.7.0 - go.uber.org/mock v0.3.0 - golang.org/x/crypto v0.14.0 - golang.org/x/mod v0.13.0 - golang.org/x/net v0.17.0 - golang.org/x/sync v0.4.0 - golang.org/x/sys v0.13.0 - golang.org/x/time v0.3.0 - golang.org/x/tools v0.14.0 + github.com/ti-mo/conntrack v0.5.0 + github.com/vishvananda/netlink v1.2.1-beta.2 + github.com/vmware/go-ipfix v0.8.2 + go.uber.org/mock v0.4.0 + golang.org/x/crypto v0.17.0 + golang.org/x/mod v0.14.0 + golang.org/x/net v0.19.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.15.0 + golang.org/x/time v0.5.0 + golang.org/x/tools v0.16.1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20210506160403-92e472f520a5 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 @@ -156,7 +156,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.14 // indirect github.com/kr/fs v0.1.0 // indirect @@ -167,8 +167,8 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mdlayher/genetlink v1.0.0 // indirect - github.com/mdlayher/netlink v1.4.0 // indirect - github.com/mdlayher/socket v0.2.1 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect @@ -195,8 +195,8 @@ require ( github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/ti-mo/netfilter v0.3.1 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect + github.com/ti-mo/netfilter v0.5.0 // indirect + github.com/vishvananda/netns v0.0.4 // indirect github.com/xlab/treeprint v1.1.0 // indirect gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f // indirect go.etcd.io/etcd/api/v3 v3.5.5 // indirect @@ -218,8 +218,8 @@ require ( go.uber.org/multierr v1.9.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.zx2c4.com/wireguard v0.0.0-20210427022245-097af6e1351b // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 5e9e9aa00c2..1ffa23643d7 100644 --- a/go.sum +++ b/go.sum @@ -714,8 +714,9 @@ github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUB github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= @@ -723,7 +724,6 @@ github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqo github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= -github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b h1:c3NTyLNozICy8B4mlMXemD3z/gXgQzVXZS/HqT+i3do= github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -806,7 +806,6 @@ github.com/mdlayher/arp v0.0.0-20220221190821-c37aaafac7f9 h1:LxldC/UdEeJ+j3i/g5 github.com/mdlayher/arp v0.0.0-20220221190821-c37aaafac7f9/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= @@ -816,17 +815,18 @@ github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= -github.com/mdlayher/netlink v1.1.2-0.20201013204415-ded538f7f4be/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= -github.com/mdlayher/netlink v1.4.0 h1:n3ARR+Fm0dDv37dj5wSWZXDKcy+U0zwcXS3zKMnSiT0= github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.1 h1:F2aaOwb53VsBE+ebRS9bLd7yPOfYUMC8lOODdCBDY6w= github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= @@ -1090,10 +1090,10 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/ti-mo/conntrack v0.4.0 h1:6TZXNqhsJmeBl1Pyzg43Y0V1Nx8jyZ4dpOtItCVXE+8= -github.com/ti-mo/conntrack v0.4.0/go.mod h1:L0vkIzG/TECsuVYMMlID9QWmZQLjyP9gDq8XKTlbg4Q= -github.com/ti-mo/netfilter v0.3.1 h1:+ZTmeTx+64Jw2N/1gmqm42kruDWjQ90SMjWEB1e6VDs= -github.com/ti-mo/netfilter v0.3.1/go.mod h1:t/5HvCCHA1LAYj/AZF2fWcJ23BQTA7lzTPCuwwi7xQY= +github.com/ti-mo/conntrack v0.5.0 h1:OWiWm18gx6IA0c8FvLuXpcvHUsR0Cyw6FIFIZtYJ2W4= +github.com/ti-mo/conntrack v0.5.0/go.mod h1:xTW+s2bugPtNnx58p1yyz+UADwho2cZFom6SsK0UTw0= +github.com/ti-mo/netfilter v0.5.0 h1:MZmsUw5bFRecOb0AeyjOPxTHg4UxYzyEs0Ek/6Lxoy8= +github.com/ti-mo/netfilter v0.5.0/go.mod h1:nt+8B9hx/QpqHr7Hazq+2qMCCA8u2OTkyc/7+U9ARz8= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1109,15 +1109,16 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20211101163509-b10eb8fe5cf6 h1:167a2omrzz+nN9Of6lN/0yOB9itzw+IOioRThNZ30jA= github.com/vishvananda/netlink v1.1.1-0.20211101163509-b10eb8fe5cf6/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmware/go-ipfix v0.7.0 h1:7dOth2p5eL01GKzyXg2sibJcD9Fhb8KeLrn/ysctiwE= -github.com/vmware/go-ipfix v0.7.0/go.mod h1:Y3YKMFN/Nec6QwmXcDae+uy6xuDgbejwRAZv9RTzS9c= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vmware/go-ipfix v0.8.2 h1:7pnmXZpI0995psJgno4Bur5fr9PCxGQuKjCI/RYurzA= +github.com/vmware/go-ipfix v0.8.2/go.mod h1:NvEehcpptPOTBaLSkMA+88l2Oe8YNelVBdvj8PA/1d0= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1203,8 +1204,8 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= -go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= @@ -1240,8 +1241,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1278,8 +1279,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1325,7 +1326,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1342,8 +1342,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1368,8 +1368,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1446,7 +1446,6 @@ golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201017003518-b09fb700fbb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1486,15 +1485,15 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1507,16 +1506,16 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1584,8 +1583,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/agent/controller/networkpolicy/packetin.go b/pkg/agent/controller/networkpolicy/packetin.go index d0d3aa3b2c7..7d7561ebf6d 100644 --- a/pkg/agent/controller/networkpolicy/packetin.go +++ b/pkg/agent/controller/networkpolicy/packetin.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "net/netip" "time" "antrea.io/libOpenflow/openflow15" @@ -107,6 +108,7 @@ func (c *Controller) storeDenyConnection(pktIn *ofctrl.PacketIn) error { if err != nil { return fmt.Errorf("error in parsing packetIn: %v", err) } + matchers := pktIn.GetMatches() // Get 5-tuple information tuple := flowexporter.Tuple{ @@ -125,6 +127,15 @@ func (c *Controller) storeDenyConnection(pktIn *ofctrl.PacketIn) error { denyConn.FlowKey = tuple denyConn.DestinationServiceAddress = tuple.DestinationAddress denyConn.DestinationServicePort = tuple.DestinationPort + denyConn.Mark = getCTMarkValue(matchers) + dstSvcAddress := getCTNwDstValue(matchers) + dstSvcPort := getCTTpDstValue(matchers) + if dstSvcAddress.IsValid() { + denyConn.DestinationServiceAddress = dstSvcAddress + } + if dstSvcPort != 0 { + denyConn.DestinationServicePort = dstSvcPort + } // No need to obtain connection info again if it already exists in denyConnectionStore. if conn, exist := c.denyConnStore.GetConnByKey(flowexporter.NewConnectionKey(&denyConn)); exist { @@ -132,7 +143,6 @@ func (c *Controller) storeDenyConnection(pktIn *ofctrl.PacketIn) error { return nil } - matchers := pktIn.GetMatches() var match *ofctrl.MatchField // Get table ID tableID := getPacketInTableID(pktIn) @@ -224,3 +234,47 @@ func getPacketInTableID(pktIn *ofctrl.PacketIn) uint8 { } return tableID } + +func getCTMarkValue(matchers *ofctrl.Matchers) uint32 { + ctMark := matchers.GetMatchByName("NXM_NX_CT_MARK") + if ctMark == nil { + return 0 + } + ctMarkValue, ok := ctMark.GetValue().(uint32) + if !ok { + return 0 + } + return ctMarkValue +} + +func getCTNwDstValue(matchers *ofctrl.Matchers) netip.Addr { + nwDst := matchers.GetMatchByName("NXM_NX_CT_NW_DST") + if nwDst != nil { + if nwDstValue, ok := nwDst.GetValue().(net.IP); ok { + if ip, ok := netip.AddrFromSlice(nwDstValue.To4()); ok { + return ip + } + } + } + nwDst = matchers.GetMatchByName("NXM_NX_CT_IPV6_DST") + if nwDst != nil { + if nwDstValue, ok := nwDst.GetValue().(net.IP); ok { + if ip, ok := netip.AddrFromSlice(nwDstValue.To16()); ok { + return ip + } + } + } + return netip.Addr{} +} + +func getCTTpDstValue(matchers *ofctrl.Matchers) uint16 { + port := matchers.GetMatchByName("NXM_NX_CT_TP_DST") + if port == nil { + return 0 + } + portValue, ok := port.GetValue().(uint16) + if !ok { + return 0 + } + return portValue +} diff --git a/pkg/agent/flowexporter/connections/connections.go b/pkg/agent/flowexporter/connections/connections.go index 926d804cd90..1f61abc5eb9 100644 --- a/pkg/agent/flowexporter/connections/connections.go +++ b/pkg/agent/flowexporter/connections/connections.go @@ -108,9 +108,6 @@ func (cs *connectionStore) fillPodInfo(conn *flowexporter.Connection) { srcPod, srcFound := cs.podStore.GetPodByIPAndTime(srcIP, conn.StartTime) dstPod, dstFound := cs.podStore.GetPodByIPAndTime(dstIP, conn.StartTime) - if !srcFound && !dstFound { - klog.Warningf("Cannot map any of the IP %s or %s to a local Pod", srcIP, dstIP) - } if srcFound { conn.SourcePodName = srcPod.Name conn.SourcePodNamespace = srcPod.Namespace @@ -125,10 +122,10 @@ func (cs *connectionStore) fillServiceInfo(conn *flowexporter.Connection, servic // resolve destination Service information if cs.antreaProxier != nil { servicePortName, exists := cs.antreaProxier.GetServiceByIP(serviceStr) - if !exists { - klog.Warningf("Could not retrieve the Service info from antrea-agent-proxier for the serviceStr: %s", serviceStr) - } else { + if exists { conn.DestinationServicePortName = servicePortName.String() + } else { + klog.InfoS("Could not retrieve the Service info from antrea-agent-proxier", "serviceStr", serviceStr) } } } diff --git a/pkg/agent/flowexporter/connections/conntrack_connections.go b/pkg/agent/flowexporter/connections/conntrack_connections.go index 1f729ff062d..76f2f7a7fae 100644 --- a/pkg/agent/flowexporter/connections/conntrack_connections.go +++ b/pkg/agent/flowexporter/connections/conntrack_connections.go @@ -253,6 +253,12 @@ func (cs *ConntrackConnectionStore) AddOrUpdateConn(conn *flowexporter.Connectio klog.V(4).InfoS("Antrea flow updated", "connection", existingConn) } else { cs.fillPodInfo(conn) + if conn.SourcePodName == "" && conn.DestinationPodName == "" { + // We don't add connections to connection map or expirePriorityQueue if we can't find the pod + // information for both srcPod and dstPod + klog.V(5).InfoS("Skip this connection as we cannot map any of the connection IPs to a local Pod", "srcIP", conn.FlowKey.SourceAddress.String(), "dstIP", conn.FlowKey.DestinationAddress.String()) + return + } if conn.Mark&openflow.ServiceCTMark.GetRange().ToNXRange().ToUint32Mask() == openflow.ServiceCTMark.GetValue() { clusterIP := conn.DestinationServiceAddress.String() svcPort := conn.DestinationServicePort diff --git a/pkg/agent/flowexporter/connections/deny_connections.go b/pkg/agent/flowexporter/connections/deny_connections.go index 0f60ff0daeb..6645c3a66a6 100644 --- a/pkg/agent/flowexporter/connections/deny_connections.go +++ b/pkg/agent/flowexporter/connections/deny_connections.go @@ -23,6 +23,7 @@ import ( "antrea.io/antrea/pkg/agent/flowexporter" "antrea.io/antrea/pkg/agent/flowexporter/priorityqueue" "antrea.io/antrea/pkg/agent/metrics" + "antrea.io/antrea/pkg/agent/openflow" "antrea.io/antrea/pkg/agent/proxy" "antrea.io/antrea/pkg/util/ip" "antrea.io/antrea/pkg/util/podstore" @@ -96,9 +97,17 @@ func (ds *DenyConnectionStore) AddOrUpdateConn(conn *flowexporter.Connection, ti conn.OriginalBytes = bytes conn.OriginalPackets = uint64(1) ds.fillPodInfo(conn) + if conn.SourcePodName == "" && conn.DestinationPodName == "" { + // We don't add connections to connection map or expirePriorityQueue if we can't find the pod + // information for both srcPod and dstPod + klog.V(5).InfoS("Skip this connection as we cannot map any of the connection IPs to a local Pod", "srcIP", conn.FlowKey.SourceAddress.String(), "dstIP", conn.FlowKey.DestinationAddress.String()) + return + } protocolStr := ip.IPProtocolNumberToString(conn.FlowKey.Protocol, "UnknownProtocol") serviceStr := fmt.Sprintf("%s:%d/%s", conn.DestinationServiceAddress, conn.DestinationServicePort, protocolStr) - ds.fillServiceInfo(conn, serviceStr) + if conn.Mark&openflow.ServiceCTMark.GetRange().ToNXRange().ToUint32Mask() == openflow.ServiceCTMark.GetValue() { + ds.fillServiceInfo(conn, serviceStr) + } metrics.TotalDenyConnections.Inc() conn.IsActive = true ds.connections[connKey] = conn diff --git a/pkg/agent/flowexporter/connections/deny_connections_test.go b/pkg/agent/flowexporter/connections/deny_connections_test.go index de9766ae311..26b18ba65f7 100644 --- a/pkg/agent/flowexporter/connections/deny_connections_test.go +++ b/pkg/agent/flowexporter/connections/deny_connections_test.go @@ -30,6 +30,7 @@ import ( "antrea.io/antrea/pkg/agent/flowexporter" "antrea.io/antrea/pkg/agent/metrics" + "antrea.io/antrea/pkg/agent/openflow" proxytest "antrea.io/antrea/pkg/agent/proxy/testing" podstoretest "antrea.io/antrea/pkg/util/podstore/testing" k8sproxy "antrea.io/antrea/third_party/proxy" @@ -49,48 +50,83 @@ func TestDenyConnectionStore_AddOrUpdateConn(t *testing.T) { Port: "255", Protocol: v1.ProtocolTCP, } - // flow for testing adding and updating - testFlow := flowexporter.Connection{ - StopTime: refTime.Add(-(time.Second * 20)), - StartTime: refTime.Add(-(time.Second * 20)), - FlowKey: tuple, - DestinationServiceAddress: tuple.DestinationAddress, - DestinationServicePort: tuple.DestinationPort, - OriginalBytes: uint64(60), - OriginalPackets: uint64(1), - IsActive: true, + tc := []struct { + name string + // flow for testing adding and updating + testFlow flowexporter.Connection + isSvc bool + }{ + { + name: "Flow not through service", + testFlow: flowexporter.Connection{ + StopTime: refTime.Add(-(time.Second * 20)), + StartTime: refTime.Add(-(time.Second * 20)), + FlowKey: tuple, + DestinationServiceAddress: tuple.DestinationAddress, + DestinationServicePort: tuple.DestinationPort, + OriginalBytes: uint64(60), + OriginalPackets: uint64(1), + IsActive: true, + Mark: 0, + }, + isSvc: false, + }, { + name: "Flow through service", + testFlow: flowexporter.Connection{ + StopTime: refTime.Add(-(time.Second * 20)), + StartTime: refTime.Add(-(time.Second * 20)), + FlowKey: tuple, + DestinationServiceAddress: tuple.DestinationAddress, + DestinationServicePort: tuple.DestinationPort, + OriginalBytes: uint64(60), + OriginalPackets: uint64(1), + IsActive: true, + Mark: openflow.ServiceCTMark.GetValue(), + }, + isSvc: true, + }, } - mockPodStore := podstoretest.NewMockInterface(ctrl) - mockProxier := proxytest.NewMockProxier(ctrl) - protocol, _ := lookupServiceProtocol(tuple.Protocol) - serviceStr := fmt.Sprintf("%s:%d/%s", tuple.DestinationAddress.String(), tuple.DestinationPort, protocol) - mockProxier.EXPECT().GetServiceByIP(serviceStr).Return(servicePortName, true) - mockPodStore.EXPECT().GetPodByIPAndTime(tuple.SourceAddress.String(), gomock.Any()).Return(nil, false) - mockPodStore.EXPECT().GetPodByIPAndTime(tuple.DestinationAddress.String(), gomock.Any()).Return(nil, false) + for _, c := range tc { + t.Run(c.name, func(t *testing.T) { + // Reset the metrics. + metrics.TotalDenyConnections.Set(0) + mockPodStore := podstoretest.NewMockInterface(ctrl) + mockProxier := proxytest.NewMockProxier(ctrl) + protocol, _ := lookupServiceProtocol(tuple.Protocol) + serviceStr := fmt.Sprintf("%s:%d/%s", tuple.DestinationAddress.String(), tuple.DestinationPort, protocol) + if c.isSvc { + mockProxier.EXPECT().GetServiceByIP(serviceStr).Return(servicePortName, true) + } + mockPodStore.EXPECT().GetPodByIPAndTime(tuple.SourceAddress.String(), gomock.Any()).Return(pod1, true) + mockPodStore.EXPECT().GetPodByIPAndTime(tuple.DestinationAddress.String(), gomock.Any()).Return(pod1, true) - denyConnStore := NewDenyConnectionStore(mockPodStore, mockProxier, testFlowExporterOptions) + denyConnStore := NewDenyConnectionStore(mockPodStore, mockProxier, testFlowExporterOptions) - denyConnStore.AddOrUpdateConn(&testFlow, refTime.Add(-(time.Second * 20)), uint64(60)) - expConn := testFlow - expConn.DestinationServicePortName = servicePortName.String() - actualConn, ok := denyConnStore.GetConnByKey(flowexporter.NewConnectionKey(&testFlow)) - assert.Equal(t, ok, true, "deny connection should be there in deny connection store") - assert.Equal(t, expConn, *actualConn, "deny connections should be equal") - assert.Equal(t, 1, denyConnStore.connectionStore.expirePriorityQueue.Len(), "Length of the expire priority queue should be 1") - assert.Equal(t, refTime.Add(-(time.Second * 20)), actualConn.LastExportTime, "LastExportTime should be set to StartTime during Add") - checkDenyConnectionMetrics(t, len(denyConnStore.connections)) + denyConnStore.AddOrUpdateConn(&c.testFlow, refTime.Add(-(time.Second * 20)), uint64(60)) + expConn := c.testFlow + if c.isSvc { + expConn.DestinationServicePortName = servicePortName.String() + } + actualConn, ok := denyConnStore.GetConnByKey(flowexporter.NewConnectionKey(&c.testFlow)) + assert.Equal(t, ok, true, "deny connection should be there in deny connection store") + assert.Equal(t, expConn, *actualConn, "deny connections should be equal") + assert.Equal(t, 1, denyConnStore.connectionStore.expirePriorityQueue.Len(), "Length of the expire priority queue should be 1") + assert.Equal(t, refTime.Add(-(time.Second * 20)), actualConn.LastExportTime, "LastExportTime should be set to StartTime during Add") + checkDenyConnectionMetrics(t, len(denyConnStore.connections)) - denyConnStore.AddOrUpdateConn(&testFlow, refTime.Add(-(time.Second * 10)), uint64(60)) - expConn.OriginalBytes = uint64(120) - expConn.OriginalPackets = uint64(2) - expConn.StopTime = refTime.Add(-(time.Second * 10)) - actualConn, ok = denyConnStore.GetConnByKey(flowexporter.NewConnectionKey(&testFlow)) - assert.Equal(t, ok, true, "deny connection should be there in deny connection store") - assert.Equal(t, expConn, *actualConn, "deny connections should be equal") - assert.True(t, actualConn.IsActive) - assert.Equal(t, 1, denyConnStore.connectionStore.expirePriorityQueue.Len()) - assert.Equal(t, refTime.Add(-(time.Second * 20)), actualConn.LastExportTime, "LastExportTime should not be changed during Update") - checkDenyConnectionMetrics(t, len(denyConnStore.connections)) + denyConnStore.AddOrUpdateConn(&c.testFlow, refTime.Add(-(time.Second * 10)), uint64(60)) + expConn.OriginalBytes = uint64(120) + expConn.OriginalPackets = uint64(2) + expConn.StopTime = refTime.Add(-(time.Second * 10)) + actualConn, ok = denyConnStore.GetConnByKey(flowexporter.NewConnectionKey(&c.testFlow)) + assert.Equal(t, ok, true, "deny connection should be there in deny connection store") + assert.Equal(t, expConn, *actualConn, "deny connections should be equal") + assert.True(t, actualConn.IsActive) + assert.Equal(t, 1, denyConnStore.connectionStore.expirePriorityQueue.Len()) + assert.Equal(t, refTime.Add(-(time.Second * 20)), actualConn.LastExportTime, "LastExportTime should not be changed during Update") + checkDenyConnectionMetrics(t, len(denyConnStore.connections)) + }) + } } func checkDenyConnectionMetrics(t *testing.T, numConns int) { diff --git a/pkg/agent/flowexporter/exporter/exporter.go b/pkg/agent/flowexporter/exporter/exporter.go index fb853f895c5..5d7c6322252 100644 --- a/pkg/agent/flowexporter/exporter/exporter.go +++ b/pkg/agent/flowexporter/exporter/exporter.go @@ -256,8 +256,14 @@ func (exp *FlowExporter) Run(stopCh <-chan struct{}) { func (exp *FlowExporter) sendFlowRecords() (time.Duration, error) { currTime := time.Now() var expireTime1, expireTime2 time.Duration - exp.expiredConns, expireTime1 = exp.conntrackConnStore.GetExpiredConns(exp.expiredConns, currTime, maxConnsToExport) + // We export records from denyConnStore first, then conntrackConnStore. We enforce the ordering to handle a + // special case: for an inter-node connection with egress drop network policy, both conntrackConnStore and + // denyConnStore from the same Node will send out records to Flow Aggregator. If the record from conntrackConnStore + // arrives FA first, FA will not be able to capture the deny network policy metadata, and it will keep waiting + // for a record from destination Node to finish flow correlation until timeout. Later on we probably should + // consider doing a record deduplication between conntrackConnStore and denyConnStore before exporting records. exp.expiredConns, expireTime2 = exp.denyConnStore.GetExpiredConns(exp.expiredConns, currTime, maxConnsToExport) + exp.expiredConns, expireTime1 = exp.conntrackConnStore.GetExpiredConns(exp.expiredConns, currTime, maxConnsToExport) // Select the shorter time out among two connection stores to do the next round of export. nextExpireTime := getMinTime(expireTime1, expireTime2) for i := range exp.expiredConns { diff --git a/pkg/agent/flowexporter/exporter/exporter_test.go b/pkg/agent/flowexporter/exporter/exporter_test.go index 695c9229837..9d46a9dc667 100644 --- a/pkg/agent/flowexporter/exporter/exporter_test.go +++ b/pkg/agent/flowexporter/exporter/exporter_test.go @@ -501,7 +501,8 @@ func getDenyConnection(isIPv6 bool, protoID uint8) *flowexporter.Connection { tuple = flowexporter.Tuple{SourceAddress: srcIP, DestinationAddress: dstIP, Protocol: protoID, SourcePort: 65280, DestinationPort: 255} } conn := &flowexporter.Connection{ - FlowKey: tuple, + FlowKey: tuple, + SourcePodName: "pod", } return conn } diff --git a/pkg/antctl/transform/common/transform.go b/pkg/antctl/transform/common/transform.go index bf0a5df5acc..9f9a1dd7e4c 100644 --- a/pkg/antctl/transform/common/transform.go +++ b/pkg/antctl/transform/common/transform.go @@ -57,6 +57,10 @@ func Int64ToString(val int64) string { return strconv.Itoa(int(val)) } +func BoolToString(val bool) string { + return strconv.FormatBool(val) +} + func GenerateTableElementWithSummary(list []string, maxColumnLength int) string { element := "" sort.Strings(list) diff --git a/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler.go b/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler.go index 960de52d3d9..b41448f08a0 100644 --- a/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler.go +++ b/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler.go @@ -26,10 +26,14 @@ import ( // Response is the response struct of recordmetrics command. type Response struct { - NumRecordsExported int64 `json:"numRecordsExported,omitempty"` - NumRecordsReceived int64 `json:"numRecordsReceived,omitempty"` - NumFlows int64 `json:"numFlows,omitempty"` - NumConnToCollector int64 `json:"numConnToCollector,omitempty"` + NumRecordsExported int64 `json:"numRecordsExported,omitempty"` + NumRecordsReceived int64 `json:"numRecordsReceived,omitempty"` + NumFlows int64 `json:"numFlows,omitempty"` + NumConnToCollector int64 `json:"numConnToCollector,omitempty"` + WithClickHouseExporter bool `json:"withClickHouseExporter,omitempty"` + WithS3Exporter bool `json:"withS3Exporter,omitempty"` + WithLogExporter bool `json:"withLogExporter,omitempty"` + WithIPFIXExporter bool `json:"withIPFIXExporter,omitempty"` } // HandleFunc returns the function which can handle the /recordmetrics API request. @@ -37,10 +41,14 @@ func HandleFunc(faq querier.FlowAggregatorQuerier) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { metrics := faq.GetRecordMetrics() metricsResponse := Response{ - NumRecordsExported: metrics.NumRecordsExported, - NumRecordsReceived: metrics.NumRecordsReceived, - NumFlows: metrics.NumFlows, - NumConnToCollector: metrics.NumConnToCollector, + NumRecordsExported: metrics.NumRecordsExported, + NumRecordsReceived: metrics.NumRecordsReceived, + NumFlows: metrics.NumFlows, + NumConnToCollector: metrics.NumConnToCollector, + WithClickHouseExporter: metrics.WithClickHouseExporter, + WithS3Exporter: metrics.WithS3Exporter, + WithLogExporter: metrics.WithLogExporter, + WithIPFIXExporter: metrics.WithIPFIXExporter, } err := json.NewEncoder(w).Encode(metricsResponse) if err != nil { @@ -51,7 +59,7 @@ func HandleFunc(faq querier.FlowAggregatorQuerier) http.HandlerFunc { } func (r Response) GetTableHeader() []string { - return []string{"RECORDS-EXPORTED", "RECORDS-RECEIVED", "FLOWS", "EXPORTERS-CONNECTED"} + return []string{"RECORDS-EXPORTED", "RECORDS-RECEIVED", "FLOWS", "EXPORTERS-CONNECTED", "CLICKHOUSE-EXPORTER", "S3-EXPORTER", "LOG-EXPORTER", "IPFIX-EXPORTER"} } func (r Response) GetTableRow(maxColumnLength int) []string { @@ -60,6 +68,10 @@ func (r Response) GetTableRow(maxColumnLength int) []string { common.Int64ToString(r.NumRecordsReceived), common.Int64ToString(r.NumFlows), common.Int64ToString(r.NumConnToCollector), + common.BoolToString(r.WithClickHouseExporter), + common.BoolToString(r.WithS3Exporter), + common.BoolToString(r.WithLogExporter), + common.BoolToString(r.WithIPFIXExporter), } } diff --git a/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler_test.go b/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler_test.go index eb1af81495f..45a699e7fa1 100644 --- a/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler_test.go +++ b/pkg/flowaggregator/apiserver/handlers/recordmetrics/handler_test.go @@ -31,10 +31,14 @@ func TestRecordMetricsQuery(t *testing.T) { ctrl := gomock.NewController(t) faq := queriertest.NewMockFlowAggregatorQuerier(ctrl) faq.EXPECT().GetRecordMetrics().Return(querier.Metrics{ - NumRecordsExported: 20, - NumRecordsReceived: 15, - NumFlows: 30, - NumConnToCollector: 1, + NumRecordsExported: 20, + NumRecordsReceived: 15, + NumFlows: 30, + NumConnToCollector: 1, + WithClickHouseExporter: true, + WithS3Exporter: true, + WithLogExporter: true, + WithIPFIXExporter: true, }) handler := HandleFunc(faq) @@ -48,12 +52,16 @@ func TestRecordMetricsQuery(t *testing.T) { err = json.Unmarshal(recorder.Body.Bytes(), &received) assert.Nil(t, err) assert.Equal(t, Response{ - NumRecordsExported: 20, - NumRecordsReceived: 15, - NumFlows: 30, - NumConnToCollector: 1, + NumRecordsExported: 20, + NumRecordsReceived: 15, + NumFlows: 30, + NumConnToCollector: 1, + WithClickHouseExporter: true, + WithS3Exporter: true, + WithLogExporter: true, + WithIPFIXExporter: true, }, received) - assert.Equal(t, received.GetTableRow(0), []string{"20", "15", "30", "1"}) + assert.Equal(t, received.GetTableRow(0), []string{"20", "15", "30", "1", "true", "true", "true", "true"}) } diff --git a/pkg/flowaggregator/exporter/testing/mock_exporter.go b/pkg/flowaggregator/exporter/testing/mock_exporter.go index b19c002b0f9..8307816b5a8 100644 --- a/pkg/flowaggregator/exporter/testing/mock_exporter.go +++ b/pkg/flowaggregator/exporter/testing/mock_exporter.go @@ -1,4 +1,4 @@ -// Copyright 2023 Antrea Authors +// Copyright 2024 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pkg/flowaggregator/flowaggregator.go b/pkg/flowaggregator/flowaggregator.go index e674c1e6f63..e11ea72945b 100644 --- a/pkg/flowaggregator/flowaggregator.go +++ b/pkg/flowaggregator/flowaggregator.go @@ -553,10 +553,14 @@ func (fa *flowAggregator) GetFlowRecords(flowKey *ipfixintermediate.FlowKey) []m func (fa *flowAggregator) GetRecordMetrics() querier.Metrics { return querier.Metrics{ - NumRecordsExported: fa.numRecordsExported, - NumRecordsReceived: fa.collectingProcess.GetNumRecordsReceived(), - NumFlows: fa.aggregationProcess.GetNumFlows(), - NumConnToCollector: fa.collectingProcess.GetNumConnToCollector(), + NumRecordsExported: fa.numRecordsExported, + NumRecordsReceived: fa.collectingProcess.GetNumRecordsReceived(), + NumFlows: fa.aggregationProcess.GetNumFlows(), + NumConnToCollector: fa.collectingProcess.GetNumConnToCollector(), + WithClickHouseExporter: fa.clickHouseExporter != nil, + WithS3Exporter: fa.s3Exporter != nil, + WithLogExporter: fa.logExporter != nil, + WithIPFIXExporter: fa.ipfixExporter != nil, } } diff --git a/pkg/flowaggregator/flowaggregator_test.go b/pkg/flowaggregator/flowaggregator_test.go index 392ce829618..99e1951a50a 100644 --- a/pkg/flowaggregator/flowaggregator_test.go +++ b/pkg/flowaggregator/flowaggregator_test.go @@ -725,17 +725,29 @@ func TestFlowAggregator_GetRecordMetrics(t *testing.T) { ctrl := gomock.NewController(t) mockCollectingProcess := ipfixtesting.NewMockIPFIXCollectingProcess(ctrl) mockAggregationProcess := ipfixtesting.NewMockIPFIXAggregationProcess(ctrl) + mockIPFIXExporter := exportertesting.NewMockInterface(ctrl) + mockClickHouseExporter := exportertesting.NewMockInterface(ctrl) + mockS3Exporter := exportertesting.NewMockInterface(ctrl) + mockLogExporter := exportertesting.NewMockInterface(ctrl) want := querier.Metrics{ - NumRecordsExported: 1, - NumRecordsReceived: 1, - NumFlows: 1, - NumConnToCollector: 1, + NumRecordsExported: 1, + NumRecordsReceived: 1, + NumFlows: 1, + NumConnToCollector: 1, + WithClickHouseExporter: true, + WithS3Exporter: true, + WithLogExporter: true, + WithIPFIXExporter: true, } fa := &flowAggregator{ collectingProcess: mockCollectingProcess, aggregationProcess: mockAggregationProcess, numRecordsExported: 1, + clickHouseExporter: mockClickHouseExporter, + s3Exporter: mockS3Exporter, + logExporter: mockLogExporter, + ipfixExporter: mockIPFIXExporter, } mockCollectingProcess.EXPECT().GetNumRecordsReceived().Return(int64(1)) diff --git a/pkg/flowaggregator/querier/querier.go b/pkg/flowaggregator/querier/querier.go index de694375a1d..349f5ed9fbd 100644 --- a/pkg/flowaggregator/querier/querier.go +++ b/pkg/flowaggregator/querier/querier.go @@ -19,10 +19,14 @@ import ( ) type Metrics struct { - NumRecordsExported int64 - NumRecordsReceived int64 - NumFlows int64 - NumConnToCollector int64 + NumRecordsExported int64 + NumRecordsReceived int64 + NumFlows int64 + NumConnToCollector int64 + WithClickHouseExporter bool + WithS3Exporter bool + WithLogExporter bool + WithIPFIXExporter bool } type FlowAggregatorQuerier interface { diff --git a/test/e2e-secondary-network/framework.go b/test/e2e-secondary-network/framework.go new file mode 100644 index 00000000000..f87e789f575 --- /dev/null +++ b/test/e2e-secondary-network/framework.go @@ -0,0 +1,50 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "log" + "time" + + antreae2e "antrea.io/antrea/test/e2e" +) + +type TestData struct { + e2eTestData *antreae2e.TestData + logsDirForTestCase string +} + +const ( + busyboxImage = "projects.registry.vmware.com/antrea/busybox" + defaultInterval = 1 * time.Second +) + +var testData *TestData + +type ClusterInfo struct { + controlPlaneNodeName string +} + +var clusterInfo ClusterInfo + +func (data *TestData) createClient(kubeconfigPath string) error { + e2edata = &antreae2e.TestData{} + if err := e2edata.CreateClient(kubeconfigPath); err != nil { + log.Fatalf("Error when creating K8s ClientSet: %v", err) + return err + } + data.e2eTestData = e2edata + return nil +} diff --git a/test/e2e-secondary-network/main_test.go b/test/e2e-secondary-network/main_test.go new file mode 100644 index 00000000000..968513812b1 --- /dev/null +++ b/test/e2e-secondary-network/main_test.go @@ -0,0 +1,95 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main under directory cmd parses and validates user input, +// instantiates and initializes objects imported from pkg, and runs +// the process + +package e2e + +import ( + "flag" + "log" + "os" + "path" + "testing" + + antreae2e "antrea.io/antrea/test/e2e" +) + +type TestOptions struct { + logsExportDir string + enableAntreaIPAM bool + skipCases string + linuxVMs string +} + +var e2edata *antreae2e.TestData +var testOptions TestOptions +var homeDir, _ = os.UserHomeDir() + +// setupLogging creates a temporary directory to export the test logs if necessary. If a directory +// was provided by the user, it checks that the directory exists. +func (tOptions *TestOptions) setupLogging() func() { + if tOptions.logsExportDir == "" { + name, err := os.MkdirTemp("", "antrea-e2e-secondary-test-") + if err != nil { + log.Fatalf("Error when creating temporary directory to export logs: %v", err) + } + log.Printf("Test logs (if any) will be exported under the '%s' directory", name) + tOptions.logsExportDir = name + // we will delete the temporary directory if no logs are exported + return func() { + if empty, _ := antreae2e.IsDirEmpty(name); empty { + log.Printf("Removing empty logs directory '%s'", name) + _ = os.Remove(name) + } else { + log.Printf("Logs exported under '%s', it is your responsibility to delete the directory when you no longer need it", name) + } + } + } + fInfo, err := os.Stat(tOptions.logsExportDir) + if err != nil { + log.Fatalf("Cannot stat provided directory '%s': %v", tOptions.logsExportDir, err) + } + if !fInfo.Mode().IsDir() { + log.Fatalf("'%s' is not a valid directory", tOptions.logsExportDir) + } + // no-op cleanup function + return func() {} +} + +func testMain(m *testing.M) int { + flag.StringVar(&testOptions.logsExportDir, "logs-export-dir", "", "Export directory for test logs") + flag.BoolVar(&testOptions.enableAntreaIPAM, "antrea-ipam", false, "Run tests with AntreaIPAM") + flag.StringVar(&testOptions.skipCases, "skip", "", "Key words to skip cases") + flag.StringVar(&testOptions.linuxVMs, "linuxVMs", "", "hostname of Linux VMs") + flag.Parse() + + cleanupLogging := testOptions.setupLogging() + defer cleanupLogging() + + testData = &TestData{} + log.Println("Creating K8s ClientSet") + kubeconfigPath := path.Join(homeDir, ".kube", "secondary_network_cluster", "config") + if err := testData.createClient(kubeconfigPath); err != nil { + log.Fatalf("Error when creating K8s ClientSet: %v", err) + } + ret := m.Run() + return ret +} + +func TestMain(m *testing.M) { + os.Exit(testMain(m)) +} diff --git a/test/e2e-secondary-network/secondary_network_test.go b/test/e2e-secondary-network/secondary_network_test.go new file mode 100644 index 00000000000..e5caff7cd77 --- /dev/null +++ b/test/e2e-secondary-network/secondary_network_test.go @@ -0,0 +1,245 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "log" + "net" + "os" + "strings" + "testing" + "time" + + logs "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + antreae2e "antrea.io/antrea/test/e2e" +) + +// Structure to extract and store the secondary network configuration information parsed from secondary-network-configuration.yml. +type PodConfig struct { + InterfaceType struct { + interfaceType string `yaml:"interfacetype"` + } `yaml:"interface_type"` + SriovConf struct { + networkInterface string `yaml:"networkinterface"` + numberOfVfs int `yaml:"numberofvfs"` + } `yaml:"sriov_conf"` + VirNet struct { + totalNumberOfVirtualNetworks int `yaml:"totalnumberofvirtualnetworks"` + virtualNetworknames []string `yaml:"virtualnetworknames"` + } `yaml:"vir_net"` + CreatePod struct { + numberOfPods int `yaml:"numberofpods"` + describe [][]interface{} `yaml:"describe"` + } `yaml:"create_pod"` +} + +var service PodConfig + +// Structure for extracting the variables for describing the Pod from secondary-network-configuration.yml file. +type describePodInfo struct { + nameOfPods string + countOfVirtualNetworksPerPod int + nameOfVirtualNetworkPerPod []string + nameOfInterfacePerPod []string +} + +var podData []describePodInfo +var totalNumberOfPods int +var interfaceType string + +const ( + secondaryNetworkConfigYAML = "./infra/secondary-network-configuration.yml" + nameSpace = "kube-system" + ctrName = "busyboxpod" + testPodName = "testsecpod" + osType = "linux" + count = 5 + size = 40 + defaultTimeout = 10 * time.Second + reqName = "intel.com/intel_sriov_netdevice" + resNum = 3 +) +const ( + podName = iota + podVNsCount + podVirtualNetwork + podInterfaceName +) + +// setupTestWithSecondaryNetworkConfig sets up all the prerequisites for running the test including the antrea enabled and running, extracting Pod and secondary network interface information and setting log directory for the test +func (data *TestData) setupTestWithSecondaryNetworkConfig(tb testing.TB) (*TestData, error) { + // Extracting the Pods information from the secondary_network_configuration.yml file. + if err := data.extractPodsInfo(); err != nil { + tb.Errorf("Error in extracting Pods info from secondary-network-configuration.yml : %v", err) + return nil, err + } + // Set log directory for test execution. + if err := data.e2eTestData.SetupLogDirectoryForTest(tb.Name()); err != nil { + tb.Errorf("Error creating logs directory '%s': %v", data.logsDirForTestCase, err) + return nil, err + } + return data, nil +} + +// extractPodsInfo extracts the Pod and secondary network interface information for the creation of Podsfrom secondary-network-configuration.yaml file +func (data *TestData) extractPodsInfo() error { + var errYamlUnmarshal error + _, err := os.Stat(secondaryNetworkConfigYAML) + if err != nil { + return fmt.Errorf("Parsing of the Pod configuration file failed") + + } + secondaryNetworkConfigYAML, _ := os.ReadFile(secondaryNetworkConfigYAML) + errYamlUnmarshal = yaml.Unmarshal(secondaryNetworkConfigYAML, &service) + if errYamlUnmarshal != nil { + return fmt.Errorf("Parsing %s failed", secondaryNetworkConfigYAML) + } + interfaceType = service.InterfaceType.interfaceType + totalNumberOfPods = service.CreatePod.numberOfPods + for _, s := range service.CreatePod.describe { + output := describePodInfo{nameOfPods: s[podName].(string), countOfVirtualNetworksPerPod: s[podVNsCount].(int), nameOfVirtualNetworkPerPod: strings.Split(s[podVirtualNetwork].(string), ","), nameOfInterfacePerPod: strings.Split(s[podInterfaceName].(string), ",")} + podData = append(podData, output) + } + return nil +} + +// formAnnotationStringOfPod forms the annotation string, used in the generation of each Pod YAML file. +func (data *TestData) formAnnotationStringOfPod(pod int) string { + var annotationString = "" + for xPodVN := 0; xPodVN < podData[pod].countOfVirtualNetworksPerPod; xPodVN++ { + var podNetworkSpec = "{\"name\": \"" + podData[pod].nameOfVirtualNetworkPerPod[xPodVN] + "\" ,\"interface\": \"" + podData[pod].nameOfInterfacePerPod[xPodVN] + "\" , \"type\": \"" + interfaceType + "\"}" + if annotationString == "" { + annotationString = "[" + podNetworkSpec + } else { + annotationString = annotationString + "," + podNetworkSpec + } + } + annotationString = annotationString + "]" + return annotationString +} + +// createPodOnNode creates the Pod for the specific annotations as per the parsed Pod information using the NewPodBuilder API +func (data *TestData) createPodOnNode(t *testing.T, ns string, nodeName string) error { + var err error + for xPod := 0; xPod < totalNumberOfPods; xPod++ { + err := data.createPodForSecondaryNetwork(ns, nodeName, xPod, testPodName, resNum) + if err != nil { + return fmt.Errorf("Error in creating pods.., err: %v", err) + } + } + return err +} + +// getSecondaryInterface shows up the secondary interfaces created for the specific Pod and extracts the IP address for the same. +func (data *TestData) getSecondaryInterface(targetPod int, targetInterface int) (string, error) { + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("ip addr show %s | grep \"inet\" | awk '{print $2}' | cut -d/ -f1", podData[targetPod].nameOfInterfacePerPod[targetInterface])} + stdout, _, err := data.e2eTestData.RunCommandFromPod(nameSpace, podData[targetPod].nameOfPods, ctrName, cmd) + stdout = strings.TrimSuffix(stdout, "\n") + if stdout == "" { + log.Fatalf("Error: Interface %s not found on %s. err: %v", podData[targetPod].nameOfInterfacePerPod[targetInterface], podData[targetPod].nameOfPods, err) + } + return stdout, nil +} + +// checkSubnet checks if the IP address to be pinged has the same subnet as the Pod from which the IP Address is pinged. +func (data *TestData) checkSubnet(t *testing.T, sourcePod int, targetPod int, targetInterface int) (bool, error) { + for podCheckForSubnet := 0; podCheckForSubnet < podData[sourcePod].countOfVirtualNetworksPerPod; podCheckForSubnet++ { + if podData[sourcePod].nameOfVirtualNetworkPerPod[podCheckForSubnet] == podData[targetPod].nameOfVirtualNetworkPerPod[targetInterface] { + _, err := data.getSecondaryInterface(sourcePod, podCheckForSubnet) + if err != nil { + t.Logf("Error in ping: Interface %s for the source test Pod %s not created", podData[sourcePod].nameOfInterfacePerPod[podCheckForSubnet], podData[sourcePod].nameOfPods) + return false, err + } + } + } + return true, nil +} + +// pingBetweenInterfaces parses through all the created Podsand pings the other Pod if the IP Address of the secondary network interface of the Pod is in the same subnet. Sleep time of 3 seconds is ensured for the successful ping between the pods. +func (data *TestData) pingBetweenInterfaces(t *testing.T) error { + for sourcePod := 0; sourcePod < totalNumberOfPods; sourcePod++ { + for targetPod := 0; targetPod < totalNumberOfPods; targetPod++ { + for targetInterface := 0; targetInterface < podData[targetPod].countOfVirtualNetworksPerPod; targetInterface++ { + if podData[targetPod].nameOfPods == podData[sourcePod].nameOfPods { + continue + } + _, err := data.e2eTestData.PodWaitFor(defaultTimeout, podData[targetPod].nameOfPods, nameSpace, func(pod *corev1.Pod) (bool, error) { + return pod.Status.Phase == corev1.PodRunning, nil + }) + if err != nil { + t.Logf("Error when waiting for the perftest client Pod: %s", podData[targetPod].nameOfPods) + } + + flag, _ := data.checkSubnet(t, sourcePod, targetPod, targetInterface) + if flag != false { + secondaryIpAddress, _ := data.getSecondaryInterface(targetPod, targetInterface) + ip := net.ParseIP(secondaryIpAddress) + if ip != nil { + var IPToPing antreae2e.PodIPs + if ip.To4() != nil { + IPToPing = antreae2e.PodIPs{IPv4: &ip} + } else { + IPToPing = antreae2e.PodIPs{IPv6: &ip} + } + err := data.e2eTestData.RunPingCommandFromTestPod(antreae2e.PodInfo{Name: podData[sourcePod].nameOfPods, OS: osType, NodeName: clusterInfo.controlPlaneNodeName, Namespace: nameSpace}, nameSpace, &IPToPing, ctrName, count, size) + if err == nil { + logs.Infof("Ping '%s' -> '%s'( Interface: %s, IP Address: %s): OK", podData[sourcePod].nameOfPods, podData[targetPod].nameOfPods, podData[targetPod].nameOfInterfacePerPod[targetInterface], secondaryIpAddress) + } else { + t.Logf("Ping '%s' -> '%s'( Interface: %s, IP Address: %s): ERROR (%v)", podData[sourcePod].nameOfPods, podData[targetPod].nameOfPods, podData[targetPod].nameOfInterfacePerPod[targetInterface], secondaryIpAddress, err) + } + } else { + t.Logf("Error in Ping: Target interface %v of %v Pod not created", podData[targetPod].nameOfInterfacePerPod[targetInterface], podData[targetPod].nameOfPods) + } + } + } + } + } + return nil +} + +// The Wrapper function createPodForSecondaryNetwork creates the Pod adding the annotation, arguments, commands, Node, container name, +// resource requests and limits as arguments with the NewPodBuilder API +func (data *TestData) createPodForSecondaryNetwork(ns string, nodeName string, podNum int, testPodName string, resNum int64) error { + computeResources := resource.NewQuantity(resNum, resource.DecimalSI) + return antreae2e.NewPodBuilder(podData[podNum].nameOfPods, ns, busyboxImage).OnNode(nodeName).WithContainerName(ctrName).WithCommand([]string{"sleep", "infinity"}).WithAnnotations( + map[string]string{ + "k8s.v1.cni.cncf.io/networks": fmt.Sprintf("%s", data.formAnnotationStringOfPod(podNum)), + }).WithLabels( + map[string]string{ + "App": fmt.Sprintf("%s", testPodName), + }).WithResources(corev1.ResourceList{reqName: *computeResources}, corev1.ResourceList{reqName: *computeResources}).Create(data.e2eTestData) +} + +func TestNativeSecondaryNetwork(t *testing.T) { + // once the setupTestWithSecondaryNetworkConfig is successful, we have all the prerequisites enabled and running. + _, err := testData.setupTestWithSecondaryNetworkConfig(t) + if err != nil { + t.Logf("Error when setupTestWithSecondaryNetworkConfig: %v", err) + } + t.Run("testCreateTestPodOnNode", func(t *testing.T) { + testData.createPodOnNode(t, nameSpace, clusterInfo.controlPlaneNodeName) + }) + t.Run("testpingBetweenInterfaces", func(t *testing.T) { + err := testData.pingBetweenInterfaces(t) + if err != nil { + t.Logf("Error when pinging between interfaces: %v", err) + } + }) +} diff --git a/test/e2e/antreaipam_test.go b/test/e2e/antreaipam_test.go index 5b3ab9acf2c..ff4a34df9a7 100644 --- a/test/e2e/antreaipam_test.go +++ b/test/e2e/antreaipam_test.go @@ -255,42 +255,42 @@ func TestAntreaIPAM(t *testing.T) { func testAntreaIPAMPodConnectivitySameNode(t *testing.T, data *TestData) { numPods := 2 // Two AntreaIPAM Pods, can be increased - podInfos := make([]podInfo, numPods) - for idx := range podInfos { - podInfos[idx].name = randName(fmt.Sprintf("test-antrea-ipam-pod-%d-", idx)) - podInfos[idx].namespace = testAntreaIPAMNamespace + PodInfos := make([]PodInfo, numPods) + for idx := range PodInfos { + PodInfos[idx].Name = randName(fmt.Sprintf("test-antrea-ipam-pod-%d-", idx)) + PodInfos[idx].Namespace = testAntreaIPAMNamespace } // One Per-Node IPAM Pod - podInfos = append(podInfos, podInfo{ - name: randName("test-pod-0-"), - namespace: data.testNamespace, + PodInfos = append(PodInfos, PodInfo{ + Name: randName("test-pod-0-"), + Namespace: data.testNamespace, }) workerNode := workerNodeName(1) t.Logf("Creating %d agnhost Pods on '%s'", numPods+1, workerNode) - for i := range podInfos { - podInfos[i].os = clusterInfo.nodesOS[workerNode] - if err := data.createAgnhostPodOnNodeWithAnnotations(podInfos[i].name, podInfos[i].namespace, workerNode, nil); err != nil { - t.Fatalf("Error when creating agnhost test Pod '%s': %v", podInfos[i], err) + for i := range PodInfos { + PodInfos[i].OS = clusterInfo.nodesOS[workerNode] + if err := data.createAgnhostPodOnNodeWithAnnotations(PodInfos[i].Name, PodInfos[i].Namespace, workerNode, nil); err != nil { + t.Fatalf("Error when creating agnhost test Pod '%s': %v", PodInfos[i], err) } - defer deletePodWrapper(t, data, podInfos[i].namespace, podInfos[i].name) + defer deletePodWrapper(t, data, PodInfos[i].Namespace, PodInfos[i].Name) } - data.runPingMesh(t, podInfos, agnhostContainerName) + data.runPingMesh(t, PodInfos, agnhostContainerName) } func testAntreaIPAMPodConnectivityDifferentNodes(t *testing.T, data *TestData) { maxNodes := 3 - var podInfos []podInfo + var PodInfos []PodInfo for _, namespace := range []string{data.testNamespace, testAntreaIPAMNamespace, testAntreaIPAMNamespace11, testAntreaIPAMNamespace12} { createdPodInfos, deletePods := createPodsOnDifferentNodes(t, data, namespace, "differentnodes") defer deletePods() if len(createdPodInfos) > maxNodes { createdPodInfos = createdPodInfos[:maxNodes] } - podInfos = append(podInfos, createdPodInfos...) + PodInfos = append(PodInfos, createdPodInfos...) } - data.runPingMesh(t, podInfos, agnhostContainerName) + data.runPingMesh(t, PodInfos, agnhostContainerName) } func testAntreaIPAMStatefulSet(t *testing.T, data *TestData, dedicatedIPPoolKey *string) { @@ -345,7 +345,7 @@ func testAntreaIPAMStatefulSet(t *testing.T, data *TestData, dedicatedIPPoolKey if err != nil { t.Fatalf("Error when waiting Pod IPs: %v", err) } - isBelongTo, ipAddressState, err := checkIPPoolAllocation(t, data, ipPoolName, podIPs.ipv4.String()) + isBelongTo, ipAddressState, err := checkIPPoolAllocation(t, data, ipPoolName, podIPs.IPv4.String()) if err != nil { t.Fatalf("Error when checking IPPoolAllocation: %v", err) } diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index e63556d48c7..a4f67128f17 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -2251,13 +2251,13 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser testcases = append(testcases, []podToAddrTestStep{ { Pod(clientNamespace + "/agnhost-client"), - server0IP.ipv4.String(), + server0IP.IPv4.String(), 80, Rejected, }, { Pod(clientNamespace + "/agnhost-client"), - server1IP.ipv4.String(), + server1IP.IPv4.String(), 80, Rejected, }, @@ -2267,13 +2267,13 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser testcases = append(testcases, []podToAddrTestStep{ { Pod(clientNamespace + "/agnhost-client"), - server0IP.ipv6.String(), + server0IP.IPv6.String(), 80, Rejected, }, { Pod(clientNamespace + "/agnhost-client"), - server1IP.ipv6.String(), + server1IP.IPv6.String(), 80, Rejected, }, @@ -2726,7 +2726,7 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { // generate some traffic that wget the nginx service var wg sync.WaitGroup oneProbe := func(ns, pod string, matcher *auditLogMatcher) { - for _, ip := range serverIP.ipStrings { + for _, ip := range serverIP.IPStrings { ip := ip matcher.AddProbeAddr(appliedToRef, ns, pod, ip, serverPort) wg.Add(1) @@ -3505,13 +3505,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { ipv4Testcases := []podToAddrTestStep{ { Pod(namespaces["x"] + "/" + client0Name), - serverIP.ipv4.String(), + serverIP.IPv4.String(), 80, Dropped, }, { Pod(namespaces["x"] + "/" + client1Name), - serverIP.ipv4.String(), + serverIP.IPv4.String(), 80, Connected, }, @@ -3523,13 +3523,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { ipv6Testcases := []podToAddrTestStep{ { Pod(namespaces["x"] + "/" + client0Name), - serverIP.ipv6.String(), + serverIP.IPv6.String(), 80, Dropped, }, { Pod(namespaces["x"] + "/" + client1Name), - serverIP.ipv6.String(), + serverIP.IPv6.String(), 80, Connected, }, @@ -3640,13 +3640,13 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { ipv4TestCases := []podToAddrTestStep{ { Pod(namespaces["z"] + "/" + clientName), - serverIP0.ipv4.String(), + serverIP0.IPv4.String(), 80, Dropped, }, { Pod(namespaces["z"] + "/" + clientName), - serverIP1.ipv4.String(), + serverIP1.IPv4.String(), 80, Connected, }, @@ -3657,13 +3657,13 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { ipv6TestCases := []podToAddrTestStep{ { Pod(namespaces["z"] + "/" + clientName), - serverIP0.ipv6.String(), + serverIP0.IPv6.String(), 80, Dropped, }, { Pod(namespaces["z"] + "/" + clientName), - serverIP1.ipv6.String(), + serverIP1.IPv6.String(), 80, Connected, }, @@ -3714,13 +3714,13 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { testcases = append(testcases, []podToAddrTestStep{ { Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), - server0IP.ipv4.String(), + server0IP.IPv4.String(), -1, Rejected, }, { Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), - server1IP.ipv4.String(), + server1IP.IPv4.String(), -1, Dropped, }, @@ -3730,13 +3730,13 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { testcases = append(testcases, []podToAddrTestStep{ { Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), - server0IP.ipv6.String(), + server0IP.IPv6.String(), -1, Rejected, }, { Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), - server1IP.ipv6.String(), + server1IP.IPv6.String(), -1, Dropped, }, @@ -4692,11 +4692,11 @@ func testANNPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { // the first IP packet sent on a tunnel is always dropped because of a missing ARP entry. // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv4.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv6.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } var annp = &crdv1beta1.NetworkPolicy{ @@ -4753,14 +4753,14 @@ func testANNPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { wg.Add(1) go func() { if clusterInfo.podV4NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.ipv4.String())} - cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.ipv4.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.IPv4.String())} + cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.IPv4.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } if clusterInfo.podV6NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.ipv6.String())} - cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.ipv6.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.IPv6.String())} + cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.IPv6.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } @@ -4827,11 +4827,11 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { // the first IP packet sent on a tunnel is always dropped because of a missing ARP entry. // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv4.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv6.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } var acnp = &crdv1beta1.ClusterNetworkPolicy{ @@ -4888,14 +4888,14 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { wg.Add(1) go func() { if clusterInfo.podV4NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.ipv4.String())} - cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.ipv4.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.IPv4.String())} + cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.IPv4.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } if clusterInfo.podV6NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.ipv6.String())} - cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.ipv6.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.IPv6.String())} + cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.IPv6.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } diff --git a/test/e2e/bandwidth_test.go b/test/e2e/bandwidth_test.go index 3e95a8b0117..5e3f1b738d4 100644 --- a/test/e2e/bandwidth_test.go +++ b/test/e2e/bandwidth_test.go @@ -24,8 +24,6 @@ import ( v1 "k8s.io/api/core/v1" ) -const iperfPort = 5201 - // TestBandwidth is the top-level test which contains all subtests for // Bandwidth related test cases so they can share setup, teardown. func TestBandwidth(t *testing.T) { @@ -82,7 +80,7 @@ func testBenchmarkBandwidthIntraNode(t *testing.T, data *TestData) { if err != nil { t.Fatalf("Error when getting the perftest server Pod's IP: %v", err) } - podBIP := podBIPs.ipv4.String() + podBIP := podBIPs.IPv4.String() stdout, _, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", "toolbox", []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", podBIP)}) if err != nil { t.Fatalf("Error when running iperf3 client: %v", err) @@ -196,11 +194,11 @@ func testPodTrafficShaping(t *testing.T, data *TestData) { // Allow a certain deviation. assert.InEpsilon(t, actualBandwidth, tt.expectedBandwidth, 0.1) } - if podIPs.ipv4 != nil { - runIperf([]string{"bash", "-c", fmt.Sprintf("iperf3 -c %s -f m -O 1|grep sender|awk '{print $7}'", podIPs.ipv4.String())}) + if podIPs.IPv4 != nil { + runIperf([]string{"bash", "-c", fmt.Sprintf("iperf3 -c %s -f m -O 1|grep sender|awk '{print $7}'", podIPs.IPv4.String())}) } - if podIPs.ipv6 != nil { - runIperf([]string{"bash", "-c", fmt.Sprintf("iperf3 -6 -c %s -f m -O 1|grep sender|awk '{print $7}'", podIPs.ipv6.String())}) + if podIPs.IPv6 != nil { + runIperf([]string{"bash", "-c", fmt.Sprintf("iperf3 -6 -c %s -f m -O 1|grep sender|awk '{print $7}'", podIPs.IPv6.String())}) } }) } diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index ccb69995d90..b9258a2ca7a 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -78,13 +78,13 @@ func testPodAssignIP(t *testing.T, data *TestData, namespace string, podV4Networ podV4NetworkCIDR = clusterInfo.podV4NetworkCIDR } if podV4NetworkCIDR != "" { - checkPodIP(t, podV4NetworkCIDR, podIPs.ipv4) + checkPodIP(t, podV4NetworkCIDR, podIPs.IPv4) } if podV6NetworkCIDR == "" { podV6NetworkCIDR = clusterInfo.podV6NetworkCIDR } if podV6NetworkCIDR != "" { - checkPodIP(t, podV6NetworkCIDR, podIPs.ipv6) + checkPodIP(t, podV6NetworkCIDR, podIPs.IPv6) } } } @@ -849,7 +849,7 @@ func testGratuitousARP(t *testing.T, data *TestData, namespace string) { // be sent 100ms after processing CNI ADD request. time.Sleep(100 * time.Millisecond) - cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=ARPSpoofGuard,arp,arp_spa=%s", podIP.ipv4.String())} + cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=ARPSpoofGuard,arp,arp_spa=%s", podIP.IPv4.String())} stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("Error when querying openflow: %v", err) diff --git a/test/e2e/connectivity_test.go b/test/e2e/connectivity_test.go index f6bbf5e199e..f2d42363384 100644 --- a/test/e2e/connectivity_test.go +++ b/test/e2e/connectivity_test.go @@ -72,14 +72,14 @@ func TestConnectivity(t *testing.T) { }) } -func waitForPodIPs(t *testing.T, data *TestData, podInfos []podInfo) map[string]*PodIPs { +func waitForPodIPs(t *testing.T, data *TestData, podInfos []PodInfo) map[string]*PodIPs { t.Logf("Waiting for Pods to be ready and retrieving IPs") podIPs := make(map[string]*PodIPs) for _, pi := range podInfos { - podName := pi.name + podName := pi.Name podNamespace := data.testNamespace - if pi.namespace != "" { - podNamespace = pi.namespace + if pi.Namespace != "" { + podNamespace = pi.Namespace } if podIP, err := data.podWaitForIPs(defaultTimeout, podName, podNamespace); err != nil { t.Fatalf("Error when waiting for IP for Pod '%s': %v", podName, err) @@ -93,27 +93,27 @@ func waitForPodIPs(t *testing.T, data *TestData, podInfos []podInfo) map[string] // runPingMesh runs a ping mesh between all the provided Pods after first retrieving their IP // addresses. -func (data *TestData) runPingMesh(t *testing.T, podInfos []podInfo, ctrname string) { +func (data *TestData) runPingMesh(t *testing.T, podInfos []PodInfo, ctrname string) { podIPs := waitForPodIPs(t, data, podInfos) t.Logf("Ping mesh test between all Pods") for _, pi1 := range podInfos { for _, pi2 := range podInfos { - if pi1.name == pi2.name { + if pi1.Name == pi2.Name { continue } podNamespace := data.testNamespace - if pi1.namespace != "" { - podNamespace = pi1.namespace + if pi1.Namespace != "" { + podNamespace = pi1.Namespace } pod2Namespace := data.testNamespace - if pi2.namespace != "" { - pod2Namespace = pi2.namespace + if pi2.Namespace != "" { + pod2Namespace = pi2.Namespace } - if err := data.runPingCommandFromTestPod(pi1, podNamespace, podIPs[pi2.name], ctrname, pingCount, 0); err != nil { - t.Errorf("Ping '%s' -> '%s': ERROR (%v)", k8s.NamespacedName(podNamespace, pi1.name), k8s.NamespacedName(pod2Namespace, pi2.name), err) + if err := data.RunPingCommandFromTestPod(pi1, podNamespace, podIPs[pi2.Name], ctrname, pingCount, 0); err != nil { + t.Errorf("Ping '%s' -> '%s': ERROR (%v)", k8s.NamespacedName(podNamespace, pi1.Name), k8s.NamespacedName(pod2Namespace, pi2.Name), err) } else { - t.Logf("Ping '%s' -> '%s': OK", k8s.NamespacedName(podNamespace, pi1.name), k8s.NamespacedName(pod2Namespace, pi2.name)) + t.Logf("Ping '%s' -> '%s': OK", k8s.NamespacedName(podNamespace, pi1.Name), k8s.NamespacedName(pod2Namespace, pi2.Name)) } } } @@ -121,9 +121,9 @@ func (data *TestData) runPingMesh(t *testing.T, podInfos []podInfo, ctrname stri func (data *TestData) testPodConnectivitySameNode(t *testing.T) { numPods := 2 // can be increased - podInfos := make([]podInfo, numPods) + podInfos := make([]PodInfo, numPods) for idx := range podInfos { - podInfos[idx].name = randName(fmt.Sprintf("test-pod-%d-", idx)) + podInfos[idx].Name = randName(fmt.Sprintf("test-pod-%d-", idx)) } // If there are Windows Nodes, set workerNode to one of them. workerNode := workerNodeName(1) @@ -133,11 +133,11 @@ func (data *TestData) testPodConnectivitySameNode(t *testing.T) { t.Logf("Creating %d agnhost Pods on '%s'", numPods, workerNode) for i := range podInfos { - podInfos[i].os = clusterInfo.nodesOS[workerNode] - if err := data.createAgnhostPodOnNode(podInfos[i].name, data.testNamespace, workerNode, false); err != nil { + podInfos[i].OS = clusterInfo.nodesOS[workerNode] + if err := data.createAgnhostPodOnNode(podInfos[i].Name, data.testNamespace, workerNode, false); err != nil { t.Fatalf("Error when creating agnhost test Pod '%s': %v", podInfos[i], err) } - defer deletePodWrapper(t, data, data.testNamespace, podInfos[i].name) + defer deletePodWrapper(t, data, data.testNamespace, podInfos[i].Name) } data.runPingMesh(t, podInfos, agnhostContainerName) @@ -189,7 +189,7 @@ func testHostPortPodConnectivity(t *testing.T, data *TestData) { // Pods as well as a function which will delete the Pods when called. Since Pods can be on Nodes of different OSes, podInfo // slice instead of PodName slice is used to inform caller of correct commands and options. Linux and Windows Pods are // alternating in this podInfo slice so that the test can cover different connectivity cases between different OSes. -func createPodsOnDifferentNodes(t *testing.T, data *TestData, namespace, tag string) (podInfos []podInfo, cleanup func() error) { +func createPodsOnDifferentNodes(t *testing.T, data *TestData, namespace, tag string) (podInfos []PodInfo, cleanup func() error) { dsName := "connectivity-test" + tag _, deleteDaemonSet, err := data.createDaemonSet(dsName, namespace, agnhostContainerName, agnhostImage, []string{"sleep", "3600"}, nil) if err != nil { @@ -199,7 +199,7 @@ func createPodsOnDifferentNodes(t *testing.T, data *TestData, namespace, tag str t.Fatalf("Error when waiting for DaemonSet Pods to get IPs: %v", err) } - piMap := map[string][]podInfo{"linux": {}, "windows": {}} + piMap := map[string][]PodInfo{"linux": {}, "windows": {}} getDaemonSetPods := func() (*corev1.PodList, error) { return data.clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ @@ -213,7 +213,7 @@ func createPodsOnDifferentNodes(t *testing.T, data *TestData, namespace, tag str for _, p := range pods.Items { os := clusterInfo.nodesOS[p.Spec.NodeName] - piMap[os] = append(piMap[os], podInfo{p.Name, os, p.Spec.NodeName, namespace}) + piMap[os] = append(piMap[os], PodInfo{p.Name, os, p.Spec.NodeName, namespace}) } var linIdx, winIdx int for linIdx != len(piMap["linux"]) && winIdx != len(piMap["windows"]) { @@ -343,7 +343,7 @@ func testOVSRestartSameNode(t *testing.T, data *TestData, namespace string) { // that restarting Antrea takes less than that time. Unfortunately, the arping // utility in busybox does not let us choose a smaller interval than 1 second. count := 25 - cmd := fmt.Sprintf("arping -c %d %s", count, podIPs[1].ipv4.String()) + cmd := fmt.Sprintf("arping -c %d %s", count, podIPs[1].IPv4.String()) stdout, stderr, err := data.RunCommandFromPod(namespace, podNames[0], busyboxContainerName, strings.Fields(cmd)) if err != nil { return fmt.Errorf("error when running arping command: %v - stdout: %s - stderr: %s", err, stdout, stderr) @@ -389,20 +389,20 @@ func testOVSRestartSameNode(t *testing.T, data *TestData, namespace string) { // still works. func testOVSFlowReplay(t *testing.T, data *TestData, namespace string) { numPods := 2 - podInfos := make([]podInfo, numPods) + podInfos := make([]PodInfo, numPods) for i := range podInfos { - podInfos[i].name = randName(fmt.Sprintf("test-pod-%d-", i)) - podInfos[i].namespace = namespace + podInfos[i].Name = randName(fmt.Sprintf("test-pod-%d-", i)) + podInfos[i].Namespace = namespace } workerNode := workerNodeName(1) t.Logf("Creating %d busybox test Pods on '%s'", numPods, workerNode) for i := range podInfos { - podInfos[i].os = clusterInfo.nodesOS[workerNode] - if err := data.createBusyboxPodOnNode(podInfos[i].name, namespace, workerNode, false); err != nil { - t.Fatalf("Error when creating busybox test Pod '%s': %v", podInfos[i].name, err) + podInfos[i].OS = clusterInfo.nodesOS[workerNode] + if err := data.createBusyboxPodOnNode(podInfos[i].Name, namespace, workerNode, false); err != nil { + t.Fatalf("Error when creating busybox test Pod '%s': %v", podInfos[i].Name, err) } - defer deletePodWrapper(t, data, namespace, podInfos[i].name) + defer deletePodWrapper(t, data, namespace, podInfos[i].Name) } data.runPingMesh(t, podInfos, busyboxContainerName) @@ -514,8 +514,8 @@ func testPingLargeMTU(t *testing.T, data *TestData) { podIPs := waitForPodIPs(t, data, podInfos) pingSize := 2000 - t.Logf("Running ping with size %d between Pods %s and %s", pingSize, podInfos[0].name, podInfos[1].name) - if err := data.runPingCommandFromTestPod(podInfos[0], data.testNamespace, podIPs[podInfos[1].name], agnhostContainerName, pingCount, pingSize); err != nil { + t.Logf("Running ping with size %d between Pods %s and %s", pingSize, podInfos[0].Name, podInfos[1].Name) + if err := data.RunPingCommandFromTestPod(podInfos[0], data.testNamespace, podIPs[podInfos[1].Name], agnhostContainerName, pingCount, pingSize); err != nil { t.Error(err) } } diff --git a/test/e2e/fixtures.go b/test/e2e/fixtures.go index 34eb5ac91a7..45af883bccd 100644 --- a/test/e2e/fixtures.go +++ b/test/e2e/fixtures.go @@ -216,7 +216,7 @@ func createDirectory(path string) error { return os.Mkdir(path, 0700) } -func (data *TestData) setupLogDirectoryForTest(testName string) error { +func (data *TestData) SetupLogDirectoryForTest(testName string) error { // sanitize the testName: it can contain '/' if the test is a subtest testName = strings.ReplaceAll(testName, string(filepath.Separator), "_") path := filepath.Join(testOptions.logsExportDir, testName) @@ -232,7 +232,7 @@ func (data *TestData) setupLogDirectoryForTest(testName string) error { } func setupTest(tb testing.TB) (*TestData, error) { - if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil { + if err := testData.SetupLogDirectoryForTest(tb.Name()); err != nil { tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err) return nil, err } @@ -276,15 +276,15 @@ func setupTestForFlowAggregator(tb testing.TB, o flowVisibilityTestOptions) (*Te tb.Errorf("Error when creating the ipfix collector Pod: %v", err) } ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testData.testNamespace) - if err != nil || len(ipfixCollectorIP.ipStrings) == 0 { + if err != nil || len(ipfixCollectorIP.IPStrings) == 0 { tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err) return nil, v4Enabled, v6Enabled, err } var ipStr string - if v6Enabled && ipfixCollectorIP.ipv6 != nil { - ipStr = ipfixCollectorIP.ipv6.String() + if v6Enabled && ipfixCollectorIP.IPv6 != nil { + ipStr = ipfixCollectorIP.IPv6.String() } else { - ipStr = ipfixCollectorIP.ipv4.String() + ipStr = ipfixCollectorIP.IPv4.String() } ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort)) diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index 6afe450c5a8..f5d48d7174b 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "net" - "regexp" "strconv" "strings" "testing" @@ -30,14 +29,17 @@ import ( ipfixregistry "github.com/vmware/go-ipfix/pkg/registry" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/strings/slices" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/openflow" "antrea.io/antrea/pkg/antctl" "antrea.io/antrea/pkg/antctl/runtime" secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" + "antrea.io/antrea/pkg/flowaggregator/apiserver/handlers/recordmetrics" "antrea.io/antrea/test/e2e/utils" ) @@ -152,14 +154,23 @@ var ( // exporting to the flow aggregator at time 2s, 4s, 6s, 8s, 10s, and 12s after iperf traffic begins. // Since flow aggregator will aggregate records based on 5-tuple connection key and active timeout is 3.5 seconds, // we expect 3 records at time 5.5s, 9s, and 12.5s after iperf traffic begins. - expectedNumDataRecords = 3 + expectedNumDataRecords = 3 + podAIPs, podBIPs, podCIPs, podDIPs, podEIPs *PodIPs + serviceNames = []string{"perftest-a", "perftest-b", "perftest-c", "perftest-d", "perftest-e"} + podNames = serviceNames ) type testFlow struct { - srcIP string - dstIP string - srcPodName string - dstPodName string + srcIP string + dstIP string + srcPodName string + dstPodName string + svcIP string + checkDstSvc bool +} + +type IPFIXCollectorResponse struct { + FlowRecords []string `json:"flowRecords"` } func TestFlowAggregatorSecureConnection(t *testing.T) { @@ -203,6 +214,11 @@ func TestFlowAggregatorSecureConnection(t *testing.T) { if err != nil { t.Fatalf("Error when setting up test: %v", err) } + // Check recordmetrics of Flow Aggregator to make sure Antrea-agent Pods/ClickHouse/IPFIX collector and Flow Aggregator + // are correctly connected + if err := getAndCheckFlowAggregatorMetrics(t, data); err != nil { + t.Fatalf("Error when checking metrics of Flow Aggregator: %v", err) + } t.Run(o.name, func(t *testing.T) { defer func() { teardownTest(t, data) @@ -210,15 +226,15 @@ func TestFlowAggregatorSecureConnection(t *testing.T) { // of Flow Aggregator has been exported. teardownFlowAggregator(t, data) }() - podAIPs, podBIPs, _, _, _, err := createPerftestPods(data) + podAIPs, podBIPs, _, _, _, err = createPerftestPods(data) if err != nil { t.Fatalf("Error when creating perftest Pods: %v", err) } if v4Enabled { - checkIntraNodeFlows(t, data, podAIPs, podBIPs, false) + checkIntraNodeFlows(t, data, podAIPs, podBIPs, false, "") } if v6Enabled { - checkIntraNodeFlows(t, data, podAIPs, podBIPs, false) + checkIntraNodeFlows(t, data, podAIPs, podBIPs, true, "") } }) } @@ -234,6 +250,9 @@ func TestFlowAggregator(t *testing.T) { if err != nil { t.Fatalf("Error when setting up test: %v", err) } + if err := getAndCheckFlowAggregatorMetrics(t, data); err != nil { + t.Fatalf("Error when checking metrics of Flow Aggregator: %v", err) + } defer func() { teardownTest(t, data) // Execute teardownFlowAggregator later than teardownTest to ensure that the log @@ -246,22 +265,22 @@ func TestFlowAggregator(t *testing.T) { t.Fatalf("Error when creating Kubernetes utils client: %v", err) } - podAIPs, podBIPs, podCIPs, podDIPs, podEIPs, err := createPerftestPods(data) + podAIPs, podBIPs, podCIPs, podDIPs, podEIPs, err = createPerftestPods(data) if err != nil { t.Fatalf("Error when creating perftest Pods: %v", err) } if v4Enabled { - t.Run("IPv4", func(t *testing.T) { testHelper(t, data, podAIPs, podBIPs, podCIPs, podDIPs, podEIPs, false) }) + t.Run("IPv4", func(t *testing.T) { testHelper(t, data, false) }) } if v6Enabled { - t.Run("IPv6", func(t *testing.T) { testHelper(t, data, podAIPs, podBIPs, podCIPs, podDIPs, podEIPs, true) }) + t.Run("IPv6", func(t *testing.T) { testHelper(t, data, true) }) } } -func checkIntraNodeFlows(t *testing.T, data *TestData, podAIPs, podBIPs *PodIPs, isIPv6 bool) { +func checkIntraNodeFlows(t *testing.T, data *TestData, podAIPs, podBIPs *PodIPs, isIPv6 bool, labelFilter string) { np1, np2 := deployK8sNetworkPolicies(t, data, "perftest-a", "perftest-b") defer func() { if np1 != nil { @@ -276,14 +295,14 @@ func checkIntraNodeFlows(t *testing.T, data *TestData, podAIPs, podBIPs *PodIPs, } }() if !isIPv6 { - checkRecordsForFlows(t, data, podAIPs.ipv4.String(), podBIPs.ipv4.String(), isIPv6, true, false, true, false) + checkRecordsForFlows(t, data, podAIPs.IPv4.String(), podBIPs.IPv4.String(), isIPv6, true, false, true, false, labelFilter) } else { - checkRecordsForFlows(t, data, podAIPs.ipv6.String(), podBIPs.ipv6.String(), isIPv6, true, false, true, false) + checkRecordsForFlows(t, data, podAIPs.IPv6.String(), podBIPs.IPv6.String(), isIPv6, true, false, true, false, labelFilter) } } -func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs, podEIPs *PodIPs, isIPv6 bool) { - svcB, svcC, err := createPerftestServices(data, isIPv6) +func testHelper(t *testing.T, data *TestData, isIPv6 bool) { + _, svcB, svcC, svcD, svcE, err := createPerftestServices(data, isIPv6) if err != nil { t.Fatalf("Error when creating perftest Services: %v", err) } @@ -295,7 +314,13 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // and their flow information is exported as IPFIX flow records. // K8s network policies are being tested here. t.Run("IntraNodeFlows", func(t *testing.T) { - checkIntraNodeFlows(t, data, podAIPs, podBIPs, isIPv6) + label := "IntraNodeFlows" + // As we use the same perftest Pods to generate traffic across all test cases, there's a potential for collecting + // records from previous subtests. To mitigate this, we add a different label to perftest Pods during each subtest + // before initiating traffic. This label is then employed as a filter when collecting records from either the + // ClickHouse or the IPFIX collector Pod. + addLabelToTestPods(t, data, label, podNames) + checkIntraNodeFlows(t, data, podAIPs, podBIPs, isIPv6, label) }) // IntraNodeDenyConnIngressANP tests the case, where Pods are deployed on same Node with an Antrea ingress deny policy rule @@ -303,6 +328,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // perftest-a -> perftest-b (Ingress reject), perftest-a -> perftest-d (Ingress drop) t.Run("IntraNodeDenyConnIngressANP", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "IntraNodeDenyConnIngressANP" + addLabelToTestPods(t, data, label, podNames) anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-b", "perftest-d", controlPlaneNodeName(), controlPlaneNodeName(), true) defer func() { if anp1 != nil { @@ -325,11 +352,11 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs dstPodName: "perftest-d", } if !isIPv6 { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv4.String(), podBIPs.ipv4.String(), podAIPs.ipv4.String(), podDIPs.ipv4.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podBIPs.IPv4.String(), podAIPs.IPv4.String(), podDIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, false, label) } else { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv6.String(), podBIPs.ipv6.String(), podAIPs.ipv6.String(), podDIPs.ipv6.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podBIPs.IPv6.String(), podAIPs.IPv6.String(), podDIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, false, label) } }) @@ -338,6 +365,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // perftest-a (Egress reject) -> perftest-b , perftest-a (Egress drop) -> perftest-d t.Run("IntraNodeDenyConnEgressANP", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "IntraNodeDenyConnEgressANP" + addLabelToTestPods(t, data, label, podNames) anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-b", "perftest-d", controlPlaneNodeName(), controlPlaneNodeName(), false) defer func() { if anp1 != nil { @@ -360,11 +389,11 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs dstPodName: "perftest-d", } if !isIPv6 { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv4.String(), podBIPs.ipv4.String(), podAIPs.ipv4.String(), podDIPs.ipv4.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podBIPs.IPv4.String(), podAIPs.IPv4.String(), podDIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, false, label) } else { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv6.String(), podBIPs.ipv6.String(), podAIPs.ipv6.String(), podDIPs.ipv6.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podBIPs.IPv6.String(), podAIPs.IPv6.String(), podDIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, false, label) } }) @@ -373,6 +402,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // perftest-a -> perftest-b (Ingress deny), perftest-d (Egress deny) -> perftest-a t.Run("IntraNodeDenyConnNP", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "IntraNodeDenyConnNP" + addLabelToTestPods(t, data, label, podNames) np1, np2 := deployDenyNetworkPolicies(t, data, "perftest-b", "perftest-d", controlPlaneNodeName(), controlPlaneNodeName()) defer func() { if np1 != nil { @@ -395,11 +426,95 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs dstPodName: "perftest-a", } if !isIPv6 { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv4.String(), podBIPs.ipv4.String(), podDIPs.ipv4.String(), podAIPs.ipv4.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, false) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podBIPs.IPv4.String(), podDIPs.IPv4.String(), podAIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, false, false, label) + } else { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podBIPs.IPv6.String(), podDIPs.IPv6.String(), podAIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, false, false, label) + } + }) + + // IntraNodeDenyConnIngressANPThroughSvc tests the case, where Pods are deployed on same Node with an Antrea + // ingress deny policy rule applied to destination Pod (one reject rule, one drop rule) and their flow information + // is exported as IPFIX flow records. The test also verify if the service information is well filled in the record. + // perftest-a -> svcB -> perftest-b (Ingress reject), perftest-a -> svcD ->perftest-d (Ingress drop) + t.Run("IntraNodeDenyConnIngressANPThroughSvc", func(t *testing.T) { + skipIfAntreaPolicyDisabled(t) + label := "IntraNodeDenyConnIngressANPThroughSvc" + addLabelToTestPods(t, data, label, podNames) + anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-b", "perftest-d", controlPlaneNodeName(), controlPlaneNodeName(), true) + defer func() { + if anp1 != nil { + if err = data.deleteAntreaNetworkpolicy(anp1); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + if anp2 != nil { + if err = data.deleteAntreaNetworkpolicy(anp2); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + }() + testFlow1 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-b", + svcIP: svcB.Spec.ClusterIP, + checkDstSvc: true, + } + testFlow2 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-d", + svcIP: svcD.Spec.ClusterIP, + checkDstSvc: true, + } + if !isIPv6 { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podBIPs.IPv4.String(), podAIPs.IPv4.String(), podDIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, true, label) } else { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv6.String(), podBIPs.ipv6.String(), podDIPs.ipv6.String(), podAIPs.ipv6.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, false) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podBIPs.IPv6.String(), podAIPs.IPv6.String(), podDIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, true, label) + } + }) + + // IntraNodeDenyConnEgressANPThroughSvc tests the case, where Pods are deployed on same Node with an Antrea + // egress deny policy rule applied to source Pod (one reject rule, one drop rule) and their flow information + // is exported as IPFIX flow records. The test also verify if the service information is well filled in the record. + // perftest-a (Egress reject) -> svcB ->perftest-b, perftest-a (Egress drop) -> svcD -> perftest-d + t.Run("IntraNodeDenyConnEgressANPThroughSvc", func(t *testing.T) { + skipIfAntreaPolicyDisabled(t) + label := "IntraNodeDenyConnEgressANPThroughSvc" + addLabelToTestPods(t, data, label, podNames) + anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-b", "perftest-d", controlPlaneNodeName(), controlPlaneNodeName(), false) + defer func() { + if anp1 != nil { + if err = data.deleteAntreaNetworkpolicy(anp1); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + if anp2 != nil { + if err = data.deleteAntreaNetworkpolicy(anp2); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + }() + testFlow1 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-b", + svcIP: svcB.Spec.ClusterIP, + checkDstSvc: true, + } + testFlow2 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-d", + svcIP: svcD.Spec.ClusterIP, + checkDstSvc: true, + } + if !isIPv6 { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podBIPs.IPv4.String(), podAIPs.IPv4.String(), podDIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, true, label) + } else { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podBIPs.IPv6.String(), podAIPs.IPv6.String(), podDIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, true, true, true, label) } }) @@ -408,6 +523,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // Antrea network policies are being tested here. t.Run("InterNodeFlows", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "InterNodeFlows" + addLabelToTestPods(t, data, label, podNames) anp1, anp2 := deployAntreaNetworkPolicies(t, data, "perftest-a", "perftest-c", controlPlaneNodeName(), workerNodeName(1)) defer func() { if anp1 != nil { @@ -418,9 +535,9 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs } }() if !isIPv6 { - checkRecordsForFlows(t, data, podAIPs.ipv4.String(), podCIPs.ipv4.String(), isIPv6, false, false, false, true) + checkRecordsForFlows(t, data, podAIPs.IPv4.String(), podCIPs.IPv4.String(), isIPv6, false, false, false, true, label) } else { - checkRecordsForFlows(t, data, podAIPs.ipv6.String(), podCIPs.ipv6.String(), isIPv6, false, false, false, true) + checkRecordsForFlows(t, data, podAIPs.IPv6.String(), podCIPs.IPv6.String(), isIPv6, false, false, false, true, label) } }) @@ -429,6 +546,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // perftest-a -> perftest-c (Ingress reject), perftest-a -> perftest-e (Ingress drop) t.Run("InterNodeDenyConnIngressANP", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "InterNodeDenyConnIngressANP" + addLabelToTestPods(t, data, label, podNames) anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-c", "perftest-e", controlPlaneNodeName(), workerNodeName(1), true) defer func() { if anp1 != nil { @@ -451,11 +570,11 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs dstPodName: "perftest-e", } if !isIPv6 { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv4.String(), podCIPs.ipv4.String(), podAIPs.ipv4.String(), podEIPs.ipv4.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podCIPs.IPv4.String(), podAIPs.IPv4.String(), podEIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, false, label) } else { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv6.String(), podCIPs.ipv6.String(), podAIPs.ipv6.String(), podEIPs.ipv6.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podCIPs.IPv6.String(), podAIPs.IPv6.String(), podEIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, false, label) } }) @@ -464,6 +583,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // perftest-a (Egress reject) -> perftest-c, perftest-a (Egress drop)-> perftest-e t.Run("InterNodeDenyConnEgressANP", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "InterNodeDenyConnEgressANP" + addLabelToTestPods(t, data, label, podNames) anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-c", "perftest-e", controlPlaneNodeName(), workerNodeName(1), false) defer func() { if anp1 != nil { @@ -486,11 +607,11 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs dstPodName: "perftest-e", } if !isIPv6 { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv4.String(), podCIPs.ipv4.String(), podAIPs.ipv4.String(), podEIPs.ipv4.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podCIPs.IPv4.String(), podAIPs.IPv4.String(), podEIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, false, label) } else { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv6.String(), podCIPs.ipv6.String(), podAIPs.ipv6.String(), podEIPs.ipv6.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podCIPs.IPv6.String(), podAIPs.IPv6.String(), podEIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, false, label) } }) @@ -499,6 +620,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // perftest-a -> perftest-c (Ingress deny), perftest-b (Egress deny) -> perftest-e t.Run("InterNodeDenyConnNP", func(t *testing.T) { skipIfAntreaPolicyDisabled(t) + label := "InterNodeDenyConnNP" + addLabelToTestPods(t, data, label, podNames) np1, np2 := deployDenyNetworkPolicies(t, data, "perftest-c", "perftest-b", workerNodeName(1), controlPlaneNodeName()) defer func() { if np1 != nil { @@ -521,11 +644,100 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs dstPodName: "perftest-e", } if !isIPv6 { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv4.String(), podCIPs.ipv4.String(), podBIPs.ipv4.String(), podEIPs.ipv4.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, false) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podCIPs.IPv4.String(), podBIPs.IPv4.String(), podEIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, false, false, label) } else { - testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.ipv6.String(), podCIPs.ipv6.String(), podBIPs.ipv6.String(), podEIPs.ipv6.String() - checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, false) + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podCIPs.IPv6.String(), podBIPs.IPv6.String(), podEIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, false, false, label) + } + }) + + // InterNodeDenyConnIngressANPThroughSvc tests the case, where Pods are deployed on different Node with an Antrea + // ingress deny policy rule applied to destination Pod (one reject rule, one drop rule) and their flow information + // is exported as IPFIX flow records. The test also verify if the service information is well filled in the record. + // perftest-a -> svcC -> perftest-c (Ingress reject), perftest-a -> svcE -> perftest-e (Ingress drop) + t.Run("InterNodeDenyConnIngressANPThroughSvc", func(t *testing.T) { + skipIfAntreaPolicyDisabled(t) + label := "InterNodeDenyConnIngressANPThroughSvc" + addLabelToTestPods(t, data, label, podNames) + anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-c", "perftest-e", controlPlaneNodeName(), workerNodeName(1), true) + defer func() { + if anp1 != nil { + if err = data.deleteAntreaNetworkpolicy(anp1); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + if anp2 != nil { + if err = data.deleteAntreaNetworkpolicy(anp2); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + }() + // In theory, it's not possible to retrieve service information for these two flows because the packets are + // either rejected or dropped in other nodes. Nevertheless, we can still observe the connection being recorded + // in the conntrack table on the source node in cases of drop. This results in the aggregation process still + // occurring within our flow-aggregator. Consequently, we can still see the service information when dealing + // with inter-node traffic subject to an ingress drop network policy + testFlow1 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-c", + svcIP: svcC.Spec.ClusterIP, + checkDstSvc: false, + } + testFlow2 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-e", + svcIP: svcE.Spec.ClusterIP, + checkDstSvc: true, + } + if !isIPv6 { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podCIPs.IPv4.String(), podAIPs.IPv4.String(), podEIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, true, label) + } else { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podCIPs.IPv6.String(), podAIPs.IPv6.String(), podEIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, true, label) + } + }) + + // InterNodeDenyConnEgressANPThroughSvc tests the case, where Pods are deployed on different Node with an Antrea + // egress deny policy rule applied to source Pod (one reject rule, one drop rule) and their flow information + // is exported as IPFIX flow records. The test also verify if the service information is well filled in the record. + // perftest-a (Egress reject) -> svcC -> perftest-c, perftest-a (Egress drop) -> svcE -> perftest-e + t.Run("InterNodeDenyConnEgressANPThroughSvc", func(t *testing.T) { + skipIfAntreaPolicyDisabled(t) + label := "InterNodeDenyConnEgressANPThroughSvc" + addLabelToTestPods(t, data, label, podNames) + anp1, anp2 := deployDenyAntreaNetworkPolicies(t, data, "perftest-a", "perftest-c", "perftest-e", controlPlaneNodeName(), workerNodeName(1), false) + defer func() { + if anp1 != nil { + if err = data.deleteAntreaNetworkpolicy(anp1); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + if anp2 != nil { + if err = data.deleteAntreaNetworkpolicy(anp2); err != nil { + t.Errorf("Error when deleting Antrea Network Policy: %v", err) + } + } + }() + testFlow1 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-c", + svcIP: svcC.Spec.ClusterIP, + checkDstSvc: true, + } + testFlow2 := testFlow{ + srcPodName: "perftest-a", + dstPodName: "perftest-e", + svcIP: svcE.Spec.ClusterIP, + checkDstSvc: true, + } + if !isIPv6 { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv4.String(), podCIPs.IPv4.String(), podAIPs.IPv4.String(), podEIPs.IPv4.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, true, label) + } else { + testFlow1.srcIP, testFlow1.dstIP, testFlow2.srcIP, testFlow2.dstIP = podAIPs.IPv6.String(), podCIPs.IPv6.String(), podAIPs.IPv6.String(), podEIPs.IPv6.String() + checkRecordsForDenyFlows(t, data, testFlow1, testFlow2, isIPv6, false, true, true, label) } }) @@ -545,6 +757,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // Deploy the client Pod on the control-plane node clientName, clientIPs, clientCleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", nodeName(0), data.testNamespace, false) defer clientCleanupFunc() + label := "ToExternalEgressOnSourceNode" + addLabelToTestPods(t, data, label, []string{clientName}) // Create an Egress and the Egress IP is assigned to the Node running the client Pods var egressNodeIP string @@ -560,14 +774,13 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs } t.Logf("Egress %s is realized with Egress IP %s", egress.Name, egressNodeIP) defer data.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{}) - if !isIPv6 { - if clientIPs.ipv4 != nil && serverIPs.ipv4 != nil { - checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.ipv4.String(), serverIPs.ipv4.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP) + if clientIPs.IPv4 != nil && serverIPs.IPv4 != nil { + checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.IPv4.String(), serverIPs.IPv4.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP, label) } } else { - if clientIPs.ipv6 != nil && serverIPs.ipv6 != nil { - checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.ipv6.String(), serverIPs.ipv6.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP) + if clientIPs.IPv6 != nil && serverIPs.IPv6 != nil { + checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.IPv6.String(), serverIPs.IPv6.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP, label) } } }) @@ -585,6 +798,8 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // Deploy the client Pod on the control-plane node clientName, clientIPs, clientCleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", nodeName(0), data.testNamespace, false) defer clientCleanupFunc() + label := "ToExternalEgressOnOtherNode" + addLabelToTestPods(t, data, label, []string{clientName}) // Create an Egress and the Egress IP is assigned to the Node not running the client Pods var egressNodeIP string @@ -600,14 +815,13 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs } t.Logf("Egress %s is realized with Egress IP %s", egress.Name, egressNodeIP) defer data.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{}) - if !isIPv6 { - if clientIPs.ipv4 != nil && serverIPs.ipv4 != nil { - checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.ipv4.String(), serverIPs.ipv4.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP) + if clientIPs.IPv4 != nil && serverIPs.IPv4 != nil { + checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.IPv4.String(), serverIPs.IPv4.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP, label) } } else { - if clientIPs.ipv6 != nil && serverIPs.ipv6 != nil { - checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.ipv6.String(), serverIPs.ipv6.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP) + if clientIPs.IPv6 != nil && serverIPs.IPv6 != nil { + checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.IPv6.String(), serverIPs.IPv6.String(), serverPodPort, isIPv6, egress.Name, egressNodeIP, label) } } }) @@ -618,14 +832,15 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // Deploy the client Pod on the control-plane node clientName, clientIPs, clientCleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", nodeName(0), data.testNamespace, false) defer clientCleanupFunc() - + label := "ToExternalFlows" + addLabelToTestPods(t, data, label, []string{clientName}) if !isIPv6 { - if clientIPs.ipv4 != nil && serverIPs.ipv4 != nil { - checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.ipv4.String(), serverIPs.ipv4.String(), serverPodPort, isIPv6, "", "") + if clientIPs.IPv4 != nil && serverIPs.IPv4 != nil { + checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.IPv4.String(), serverIPs.IPv4.String(), serverPodPort, isIPv6, "", "", label) } } else { - if clientIPs.ipv6 != nil && serverIPs.ipv6 != nil { - checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.ipv6.String(), serverIPs.ipv6.String(), serverPodPort, isIPv6, "", "") + if clientIPs.IPv6 != nil && serverIPs.IPv6 != nil { + checkRecordsForToExternalFlows(t, data, nodeName(0), clientName, clientIPs.IPv6.String(), serverIPs.IPv6.String(), serverPodPort, isIPv6, "", "", label) } } }) @@ -633,28 +848,32 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // LocalServiceAccess tests the case, where Pod and Service are deployed on the same Node and their flow information is exported as IPFIX flow records. t.Run("LocalServiceAccess", func(t *testing.T) { skipIfProxyDisabled(t, data) + label := "LocalServiceAccess" + addLabelToTestPods(t, data, label, podNames) // In dual stack cluster, Service IP can be assigned as different IP family from specified. // In that case, source IP and destination IP will align with IP family of Service IP. // For IPv4-only and IPv6-only cluster, IP family of Service IP will be same as Pod IPs. isServiceIPv6 := net.ParseIP(svcB.Spec.ClusterIP).To4() == nil if isServiceIPv6 { - checkRecordsForFlows(t, data, podAIPs.ipv6.String(), svcB.Spec.ClusterIP, isServiceIPv6, true, true, false, false) + checkRecordsForFlows(t, data, podAIPs.IPv6.String(), svcB.Spec.ClusterIP, isServiceIPv6, true, true, false, false, label) } else { - checkRecordsForFlows(t, data, podAIPs.ipv4.String(), svcB.Spec.ClusterIP, isServiceIPv6, true, true, false, false) + checkRecordsForFlows(t, data, podAIPs.IPv4.String(), svcB.Spec.ClusterIP, isServiceIPv6, true, true, false, false, label) } }) // RemoteServiceAccess tests the case, where Pod and Service are deployed on different Nodes and their flow information is exported as IPFIX flow records. t.Run("RemoteServiceAccess", func(t *testing.T) { skipIfProxyDisabled(t, data) + label := "RemoteServiceAccess" + addLabelToTestPods(t, data, label, podNames) // In dual stack cluster, Service IP can be assigned as different IP family from specified. // In that case, source IP and destination IP will align with IP family of Service IP. // For IPv4-only and IPv6-only cluster, IP family of Service IP will be same as Pod IPs. isServiceIPv6 := net.ParseIP(svcC.Spec.ClusterIP).To4() == nil if isServiceIPv6 { - checkRecordsForFlows(t, data, podAIPs.ipv6.String(), svcC.Spec.ClusterIP, isServiceIPv6, false, true, false, false) + checkRecordsForFlows(t, data, podAIPs.IPv6.String(), svcC.Spec.ClusterIP, isServiceIPv6, false, true, false, false, label) } else { - checkRecordsForFlows(t, data, podAIPs.ipv4.String(), svcC.Spec.ClusterIP, isServiceIPv6, false, true, false, false) + checkRecordsForFlows(t, data, podAIPs.IPv4.String(), svcC.Spec.ClusterIP, isServiceIPv6, false, true, false, false, label) } }) @@ -694,12 +913,12 @@ func checkAntctlGetFlowRecordsJson(t *testing.T, data *TestData, podName string, var cmdStr, srcIP, dstIP string // trigger a flow with iperf if !isIPv6 { - srcIP = podAIPs.ipv4.String() - dstIP = podBIPs.ipv4.String() + srcIP = podAIPs.IPv4.String() + dstIP = podBIPs.IPv4.String() cmdStr = fmt.Sprintf("iperf3 -c %s -t %d", dstIP, iperfTimeSecShort) } else { - srcIP = podAIPs.ipv6.String() - dstIP = podBIPs.ipv6.String() + srcIP = podAIPs.IPv6.String() + dstIP = podBIPs.IPv6.String() cmdStr = fmt.Sprintf("iperf3 -6 -c %s -t %d", dstIP, iperfTimeSecShort) } stdout, _, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", "iperf", []string{"bash", "-c", cmdStr}) @@ -754,13 +973,16 @@ func checkAntctlRecord(t *testing.T, record map[string]interface{}, srcIP, dstIP assert.EqualValues(protocolIdentifierTCP, record["protocolIdentifier"], "The record from antctl does not have correct protocolIdentifier") } -func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP string, isIPv6 bool, isIntraNode bool, checkService bool, checkK8sNetworkPolicy bool, checkAntreaNetworkPolicy bool) { +func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP string, isIPv6 bool, isIntraNode bool, checkService bool, checkK8sNetworkPolicy bool, checkAntreaNetworkPolicy bool, labelFilter string) { var cmdStr string if !isIPv6 { cmdStr = fmt.Sprintf("iperf3 -c %s -t %d -b %s", dstIP, iperfTimeSec, iperfBandwidth) } else { cmdStr = fmt.Sprintf("iperf3 -6 -c %s -t %d -b %s", dstIP, iperfTimeSec, iperfBandwidth) } + if checkService { + cmdStr += fmt.Sprintf(" -p %d", iperfSvcPort) + } stdout, _, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", "iperf", []string{"bash", "-c", cmdStr}) require.NoErrorf(t, err, "Error when running iperf3 client: %v", err) bwSlice, srcPort, _ := getBandwidthAndPorts(stdout) @@ -775,92 +997,84 @@ func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP stri t.Fatalf("Unit of the traffic bandwidth reported by iperf should be Mbits.") } - checkRecordsForFlowsCollector(t, data, srcIP, dstIP, srcPort, isIPv6, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy, bandwidthInMbps) - checkRecordsForFlowsClickHouse(t, data, srcIP, dstIP, srcPort, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy, bandwidthInMbps) + checkRecordsForFlowsCollector(t, data, srcIP, dstIP, srcPort, isIPv6, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy, bandwidthInMbps, labelFilter) + checkRecordsForFlowsClickHouse(t, data, srcIP, dstIP, srcPort, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy, bandwidthInMbps, labelFilter) } -func checkRecordsForFlowsCollector(t *testing.T, data *TestData, srcIP, dstIP, srcPort string, isIPv6, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy bool, bandwidthInMbps float64) { - collectorOutput, recordSlices := getCollectorOutput(t, srcIP, dstIP, srcPort, checkService, true, isIPv6, data) +func checkRecordsForFlowsCollector(t *testing.T, data *TestData, srcIP, dstIP, srcPort string, isIPv6, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy bool, bandwidthInMbps float64, labelFilter string) { + collectorOutput, recordSlices := getCollectorOutput(t, srcIP, dstIP, srcPort, checkService, true, isIPv6, data, labelFilter) + // Checking only data records as data records cannot be decoded without template + // record. + assert.GreaterOrEqualf(t, len(recordSlices), expectedNumDataRecords, "IPFIX collector should receive expected number of flow records. Considered records: %s \n Collector output: %s", recordSlices, collectorOutput) // Iterate over recordSlices and build some results to test with expected results - dataRecordsCount := 0 - src, dst := matchSrcAndDstAddress(srcIP, dstIP, checkService, isIPv6) for _, record := range recordSlices { - // Check the source port along with source and destination IPs as there - // are flow records for control flows during the iperf with same IPs - // and destination port. - if strings.Contains(record, src) && strings.Contains(record, dst) && strings.Contains(record, srcPort) { - dataRecordsCount = dataRecordsCount + 1 - // Check if record has both Pod name of source and destination Pod. + // Check if record has both Pod name of source and destination Pod. + if isIntraNode { + checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-b", controlPlaneNodeName(), data.testNamespace) + checkFlowType(t, record, ipfixregistry.FlowTypeIntraNode) + } else { + checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-c", workerNodeName(1), data.testNamespace) + checkFlowType(t, record, ipfixregistry.FlowTypeInterNode) + } + assert := assert.New(t) + if checkService { if isIntraNode { - checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-b", controlPlaneNodeName(), data.testNamespace) - checkFlowType(t, record, ipfixregistry.FlowTypeIntraNode) + assert.Contains(record, data.testNamespace+"/perftest-b", "Record with ServiceIP does not have Service name") } else { - checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-c", workerNodeName(1), data.testNamespace) - checkFlowType(t, record, ipfixregistry.FlowTypeInterNode) - } - assert := assert.New(t) - if checkService { - if isIntraNode { - assert.Contains(record, data.testNamespace+"/perftest-b", "Record with ServiceIP does not have Service name") - } else { - assert.Contains(record, data.testNamespace+"/perftest-c", "Record with ServiceIP does not have Service name") - } - } - if checkK8sNetworkPolicy { - // Check if records have both ingress and egress network policies. - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyName: %s", ingressAllowNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeK8sNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyName: %s", egressAllowNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeK8sNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress rule") - } - if checkAntreaNetworkPolicy { - // Check if records have both ingress and egress network policies. - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyName: %s", ingressAntreaNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleName: %s", testIngressRuleName), "Record does not have the correct NetworkPolicy RuleName with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionAllow), "Record does not have the correct NetworkPolicy RuleAction with the ingress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyName: %s", egressAntreaNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleName: %s", testEgressRuleName), "Record does not have the correct NetworkPolicy RuleName with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionAllow), "Record does not have the correct NetworkPolicy RuleAction with the egress rule") + assert.Contains(record, data.testNamespace+"/perftest-c", "Record with ServiceIP does not have Service name") } + } + if checkK8sNetworkPolicy { + // Check if records have both ingress and egress network policies. + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyName: %s", ingressAllowNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeK8sNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyName: %s", egressAllowNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeK8sNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress rule") + } + if checkAntreaNetworkPolicy { + // Check if records have both ingress and egress network policies. + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyName: %s", ingressAntreaNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleName: %s", testIngressRuleName), "Record does not have the correct NetworkPolicy RuleName with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionAllow), "Record does not have the correct NetworkPolicy RuleAction with the ingress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyName: %s", egressAntreaNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleName: %s", testEgressRuleName), "Record does not have the correct NetworkPolicy RuleName with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionAllow), "Record does not have the correct NetworkPolicy RuleAction with the egress rule") + } - // Skip the bandwidth check for the iperf control flow records which have 0 throughput. - if !strings.Contains(record, "throughput: 0") { - flowStartTime := int64(getUint64FieldFromRecord(t, record, "flowStartSeconds")) - exportTime := int64(getUint64FieldFromRecord(t, record, "flowEndSeconds")) - flowEndReason := int64(getUint64FieldFromRecord(t, record, "flowEndReason")) - var recBandwidth float64 - // flowEndReason == 3 means the end of flow detected - if exportTime >= flowStartTime+iperfTimeSec || flowEndReason == 3 { - // Check average bandwidth on the last record. - octetTotalCount := getUint64FieldFromRecord(t, record, "octetTotalCount") - recBandwidth = float64(octetTotalCount) * 8 / float64(iperfTimeSec) / 1000000 - } else { - // Check bandwidth with the field "throughput" except for the last record, - // as their throughput may be significantly lower than the average Iperf throughput. - throughput := getUint64FieldFromRecord(t, record, "throughput") - recBandwidth = float64(throughput) / 1000000 - } - t.Logf("Throughput check on record with flowEndSeconds-flowStartSeconds: %v, Iperf throughput: %.2f Mbits/s, IPFIX record throughput: %.2f Mbits/s", exportTime-flowStartTime, bandwidthInMbps, recBandwidth) - assert.InDeltaf(recBandwidth, bandwidthInMbps, bandwidthInMbps*0.15, "Difference between Iperf bandwidth and IPFIX record bandwidth should be lower than 15%%, record: %s", record) + // Skip the bandwidth check for the iperf control flow records which have 0 throughput. + if !strings.Contains(record, "throughput: 0") { + flowStartTime := int64(getUint64FieldFromRecord(t, record, "flowStartSeconds")) + exportTime := int64(getUint64FieldFromRecord(t, record, "flowEndSeconds")) + flowEndReason := int64(getUint64FieldFromRecord(t, record, "flowEndReason")) + var recBandwidth float64 + // flowEndReason == 3 means the end of flow detected + if flowEndReason == 3 { + // Check average bandwidth on the last record. + octetTotalCount := getUint64FieldFromRecord(t, record, "octetTotalCount") + recBandwidth = float64(octetTotalCount) * 8 / float64(iperfTimeSec) / 1000000 + } else { + // Check bandwidth with the field "throughput" except for the last record, + // as their throughput may be significantly lower than the average Iperf throughput. + throughput := getUint64FieldFromRecord(t, record, "throughput") + recBandwidth = float64(throughput) / 1000000 } + t.Logf("Throughput check on record with flowEndSeconds-flowStartSeconds: %v, Iperf throughput: %.2f Mbits/s, IPFIX record throughput: %.2f Mbits/s", exportTime-flowStartTime, bandwidthInMbps, recBandwidth) + assert.InDeltaf(recBandwidth, bandwidthInMbps, bandwidthInMbps*0.15, "Difference between Iperf bandwidth and IPFIX record bandwidth should be lower than 15%%, record: %s", record) } } - // Checking only data records as data records cannot be decoded without template - // record. - assert.GreaterOrEqualf(t, dataRecordsCount, expectedNumDataRecords, "IPFIX collector should receive expected number of flow records. Considered records: %s \n Collector output: %s", recordSlices, collectorOutput) } -func checkRecordsForFlowsClickHouse(t *testing.T, data *TestData, srcIP, dstIP, srcPort string, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy bool, bandwidthInMbps float64) { +func checkRecordsForFlowsClickHouse(t *testing.T, data *TestData, srcIP, dstIP, srcPort string, isIntraNode, checkService, checkK8sNetworkPolicy, checkAntreaNetworkPolicy bool, bandwidthInMbps float64, labelFilter string) { // Check the source port along with source and destination IPs as there // are flow records for control flows during the iperf with same IPs // and destination port. - clickHouseRecords := getClickHouseOutput(t, data, srcIP, dstIP, srcPort, checkService, true) + clickHouseRecords := getClickHouseOutput(t, data, srcIP, dstIP, srcPort, checkService, true, labelFilter) for _, record := range clickHouseRecords { // Check if record has both Pod name of source and destination Pod. @@ -908,7 +1122,7 @@ func checkRecordsForFlowsClickHouse(t *testing.T, data *TestData, srcIP, dstIP, exportTime := record.FlowEndSeconds.Unix() var recBandwidth float64 // flowEndReason == 3 means the end of flow detected - if exportTime >= flowStartTime+iperfTimeSec || record.FlowEndReason == 3 { + if record.FlowEndReason == 3 { octetTotalCount := record.OctetTotalCount recBandwidth = float64(octetTotalCount) * 8 / float64(exportTime-flowStartTime) / 1000000 } else { @@ -926,7 +1140,7 @@ func checkRecordsForFlowsClickHouse(t *testing.T, data *TestData, srcIP, dstIP, assert.GreaterOrEqualf(t, len(clickHouseRecords), expectedNumDataRecords, "ClickHouse should receive expected number of flow records. Considered records: %s", clickHouseRecords) } -func checkRecordsForToExternalFlows(t *testing.T, data *TestData, srcNodeName string, srcPodName string, srcIP string, dstIP string, dstPort int32, isIPv6 bool, egressName, egressIP string) { +func checkRecordsForToExternalFlows(t *testing.T, data *TestData, srcNodeName string, srcPodName string, srcIP string, dstIP string, dstPort int32, isIPv6 bool, egressName, egressIP, labelFilter string) { var cmd string if !isIPv6 { cmd = fmt.Sprintf("wget -O- %s:%d", dstIP, dstPort) @@ -935,63 +1149,72 @@ func checkRecordsForToExternalFlows(t *testing.T, data *TestData, srcNodeName st } stdout, stderr, err := data.RunCommandFromPod(data.testNamespace, srcPodName, busyboxContainerName, strings.Fields(cmd)) require.NoErrorf(t, err, "Error when running wget command, stdout: %s, stderr: %s", stdout, stderr) - - _, recordSlices := getCollectorOutput(t, srcIP, dstIP, "", false, false, isIPv6, data) + _, recordSlices := getCollectorOutput(t, srcIP, dstIP, "", false, false, isIPv6, data, labelFilter) for _, record := range recordSlices { - if strings.Contains(record, srcIP) && strings.Contains(record, dstIP) { - checkPodAndNodeData(t, record, srcPodName, srcNodeName, "", "", data.testNamespace) - checkFlowType(t, record, ipfixregistry.FlowTypeToExternal) - assert.NotContains(t, record, "octetDeltaCount: 0", "octetDeltaCount should be non-zero") - if egressName != "" { - checkEgressInfo(t, record, egressName, egressIP) - } + checkPodAndNodeData(t, record, srcPodName, srcNodeName, "", "", data.testNamespace) + checkFlowType(t, record, ipfixregistry.FlowTypeToExternal) + if egressName != "" { + checkEgressInfo(t, record, egressName, egressIP) } } - clickHouseRecords := getClickHouseOutput(t, data, srcIP, dstIP, "", false, false) + clickHouseRecords := getClickHouseOutput(t, data, srcIP, dstIP, "", false, false, labelFilter) for _, record := range clickHouseRecords { checkPodAndNodeDataClickHouse(data, t, record, srcPodName, srcNodeName, "", "") checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeToExternal) - assert.Greater(t, record.OctetDeltaCount, uint64(0), "octetDeltaCount should be non-zero") if egressName != "" { checkEgressInfoClickHouse(t, record, egressName, egressIP) } } } -func checkRecordsForDenyFlows(t *testing.T, data *TestData, testFlow1, testFlow2 testFlow, isIPv6, isIntraNode, isANP bool) { +func checkRecordsForDenyFlows(t *testing.T, data *TestData, testFlow1, testFlow2 testFlow, isIPv6, isIntraNode, isANP, useSvcIP bool, labelFilter string) { var cmdStr1, cmdStr2 string if !isIPv6 { - cmdStr1 = fmt.Sprintf("iperf3 -c %s -n 1", testFlow1.dstIP) - cmdStr2 = fmt.Sprintf("iperf3 -c %s -n 1", testFlow2.dstIP) + if useSvcIP { + cmdStr1 = fmt.Sprintf("iperf3 -c %s -p %d -n 1", testFlow1.svcIP, iperfSvcPort) + cmdStr2 = fmt.Sprintf("iperf3 -c %s -p %d -n 1", testFlow2.svcIP, iperfSvcPort) + } else { + cmdStr1 = fmt.Sprintf("iperf3 -c %s -n 1", testFlow1.dstIP) + cmdStr2 = fmt.Sprintf("iperf3 -c %s -n 1", testFlow2.dstIP) + } + } else { - cmdStr1 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow1.dstIP) - cmdStr2 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow2.dstIP) + if useSvcIP { + cmdStr1 = fmt.Sprintf("iperf3 -6 -c %s -p %d -n 1", testFlow1.svcIP, iperfSvcPort) + cmdStr2 = fmt.Sprintf("iperf3 -6 -c %s -p %d -n 1", testFlow2.svcIP, iperfSvcPort) + } else { + cmdStr1 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow1.dstIP) + cmdStr2 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow2.dstIP) + } } _, _, err := data.RunCommandFromPod(data.testNamespace, testFlow1.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr1}) assert.Error(t, err) _, _, err = data.RunCommandFromPod(data.testNamespace, testFlow2.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr2}) assert.Error(t, err) - checkRecordsForDenyFlowsCollector(t, data, testFlow1, testFlow2, isIPv6, isIntraNode, isANP) - checkRecordsForDenyFlowsClickHouse(t, data, testFlow1, testFlow2, isIPv6, isIntraNode, isANP) + checkRecordsForDenyFlowsCollector(t, data, testFlow1, testFlow2, isIPv6, isIntraNode, isANP, labelFilter) + checkRecordsForDenyFlowsClickHouse(t, data, testFlow1, testFlow2, isIPv6, isIntraNode, isANP, labelFilter) } -func checkRecordsForDenyFlowsCollector(t *testing.T, data *TestData, testFlow1, testFlow2 testFlow, isIPv6, isIntraNode, isANP bool) { - _, recordSlices1 := getCollectorOutput(t, testFlow1.srcIP, testFlow1.dstIP, "", false, false, isIPv6, data) - _, recordSlices2 := getCollectorOutput(t, testFlow2.srcIP, testFlow2.dstIP, "", false, false, isIPv6, data) +func checkRecordsForDenyFlowsCollector(t *testing.T, data *TestData, testFlow1, testFlow2 testFlow, isIPv6, isIntraNode, isANP bool, labelFilter string) { + _, recordSlices1 := getCollectorOutput(t, testFlow1.srcIP, testFlow1.dstIP, "", false, false, isIPv6, data, labelFilter) + _, recordSlices2 := getCollectorOutput(t, testFlow2.srcIP, testFlow2.dstIP, "", false, false, isIPv6, data, labelFilter) recordSlices := append(recordSlices1, recordSlices2...) src_flow1, dst_flow1 := matchSrcAndDstAddress(testFlow1.srcIP, testFlow1.dstIP, false, isIPv6) src_flow2, dst_flow2 := matchSrcAndDstAddress(testFlow2.srcIP, testFlow2.dstIP, false, isIPv6) // Iterate over recordSlices and build some results to test with expected results for _, record := range recordSlices { var srcPodName, dstPodName string + var checkDstSvc bool if strings.Contains(record, src_flow1) && strings.Contains(record, dst_flow1) { srcPodName = testFlow1.srcPodName dstPodName = testFlow1.dstPodName + checkDstSvc = testFlow1.checkDstSvc } else if strings.Contains(record, src_flow2) && strings.Contains(record, dst_flow2) { srcPodName = testFlow2.srcPodName dstPodName = testFlow2.dstPodName + checkDstSvc = testFlow2.checkDstSvc } if strings.Contains(record, src_flow1) && strings.Contains(record, dst_flow1) || strings.Contains(record, src_flow2) && strings.Contains(record, dst_flow2) { ingressRejectStr := fmt.Sprintf("ingressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionReject) @@ -1036,24 +1259,34 @@ func checkRecordsForDenyFlowsCollector(t *testing.T, data *TestData, testFlow1, assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleName: %s", testEgressRuleName), "Record does not have the correct NetworkPolicy RuleName with the egress drop rule") } } - + if checkDstSvc { + destinationServicePortName := data.testNamespace + "/" + dstPodName + assert.Contains(record, fmt.Sprintf("destinationServicePortName: %s", destinationServicePortName), "Record does not have correct destinationServicePortName") + assert.Contains(record, fmt.Sprintf("destinationServicePort: %d", iperfSvcPort), "Record does not have correct destinationServicePort") + } else { + assert.Contains(record, "destinationServicePortName: \n", "Record does not have correct destinationServicePortName") + assert.Contains(record, "destinationServicePort: 0 \n", "Record does not have correct destinationServicePort") + } } } } -func checkRecordsForDenyFlowsClickHouse(t *testing.T, data *TestData, testFlow1, testFlow2 testFlow, isIPv6, isIntraNode, isANP bool) { - clickHouseRecords1 := getClickHouseOutput(t, data, testFlow1.srcIP, testFlow1.dstIP, "", false, false) - clickHouseRecords2 := getClickHouseOutput(t, data, testFlow2.srcIP, testFlow2.dstIP, "", false, false) +func checkRecordsForDenyFlowsClickHouse(t *testing.T, data *TestData, testFlow1, testFlow2 testFlow, isIPv6, isIntraNode, isANP bool, labelFilter string) { + clickHouseRecords1 := getClickHouseOutput(t, data, testFlow1.srcIP, testFlow1.dstIP, "", false, false, labelFilter) + clickHouseRecords2 := getClickHouseOutput(t, data, testFlow2.srcIP, testFlow2.dstIP, "", false, false, labelFilter) recordSlices := append(clickHouseRecords1, clickHouseRecords2...) // Iterate over recordSlices and build some results to test with expected results for _, record := range recordSlices { var srcPodName, dstPodName string - if record.SourceIP == testFlow1.srcIP && record.DestinationIP == testFlow1.dstIP { + var checkDstSvc bool + if record.SourceIP == testFlow1.srcIP && (record.DestinationIP == testFlow1.dstIP || record.DestinationClusterIP == testFlow1.dstIP) { srcPodName = testFlow1.srcPodName dstPodName = testFlow1.dstPodName - } else if record.SourceIP == testFlow2.srcIP && record.DestinationIP == testFlow2.dstIP { + checkDstSvc = testFlow1.checkDstSvc + } else if record.SourceIP == testFlow2.srcIP && (record.DestinationIP == testFlow2.dstIP || record.DestinationClusterIP == testFlow2.dstIP) { srcPodName = testFlow2.srcPodName dstPodName = testFlow2.dstPodName + checkDstSvc = testFlow2.checkDstSvc } if isIntraNode { @@ -1063,6 +1296,14 @@ func checkRecordsForDenyFlowsClickHouse(t *testing.T, data *TestData, testFlow1, checkPodAndNodeDataClickHouse(data, t, record, srcPodName, controlPlaneNodeName(), dstPodName, workerNodeName(1)) checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeInterNode) } + if checkDstSvc { + destinationServicePortName := data.testNamespace + "/" + dstPodName + assert.Contains(t, record.DestinationServicePortName, destinationServicePortName) + assert.Equal(t, iperfSvcPort, int(record.DestinationServicePort)) + } else { + assert.Equal(t, "", record.DestinationServicePortName) + assert.Equal(t, 0, int(record.DestinationServicePort)) + } assert := assert.New(t) if !isANP { // K8s Network Policies if (record.IngressNetworkPolicyRuleAction == ipfixregistry.NetworkPolicyRuleActionDrop) && (record.IngressNetworkPolicyName != ingressDropANPName) { @@ -1108,10 +1349,10 @@ func checkPodAndNodeData(t *testing.T, record, srcPod, srcNode, dstPod, dstNode assert.Contains(record, dstPod, "Record with dstIP does not have Pod name: %s", dstPod) assert.Contains(record, fmt.Sprintf("destinationPodNamespace: %s", namespace), "Record does not have correct destinationPodNamespace: %s", namespace) assert.Contains(record, fmt.Sprintf("destinationNodeName: %s", dstNode), "Record does not have correct destinationNodeName: %s", dstNode) - assert.Contains(record, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"iperf\"}", srcPod), "Record does not have correct label for source Pod") - assert.Contains(record, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"iperf\"}", dstPod), "Record does not have correct label for destination Pod") + assert.Contains(record, fmt.Sprintf("\"antrea-e2e\":\"%s\",\"app\":\"iperf\"", srcPod), "Record does not have correct label for source Pod") + assert.Contains(record, fmt.Sprintf("\"antrea-e2e\":\"%s\",\"app\":\"iperf\"", dstPod), "Record does not have correct label for destination Pod") } else { - assert.Contains(record, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"busybox\"}", srcPod), "Record does not have correct label for source Pod") + assert.Contains(record, fmt.Sprintf("\"antrea-e2e\":\"%s\",\"app\":\"busybox\"", srcPod), "Record does not have correct label for source Pod") } } @@ -1127,10 +1368,10 @@ func checkPodAndNodeDataClickHouse(data *TestData, t *testing.T, record *ClickHo assert.Equal(record.DestinationPodName, dstPod, "Record with dstIP does not have Pod name: %s", dstPod) assert.Equal(record.DestinationPodNamespace, data.testNamespace, "Record does not have correct destinationPodNamespace: %s", data.testNamespace) assert.Equal(record.DestinationNodeName, dstNode, "Record does not have correct destinationNodeName: %s", dstNode) - assert.Equal(record.SourcePodLabels, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"iperf\"}", srcPod), "Record does not have correct label for source Pod") - assert.Equal(record.DestinationPodLabels, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"iperf\"}", dstPod), "Record does not have correct label for destination Pod") + assert.Contains(record.SourcePodLabels, fmt.Sprintf("\"antrea-e2e\":\"%s\",\"app\":\"iperf\"", srcPod), "Record does not have correct label for source Pod") + assert.Contains(record.DestinationPodLabels, fmt.Sprintf("\"antrea-e2e\":\"%s\",\"app\":\"iperf\"", dstPod), "Record does not have correct label for destination Pod") } else { - assert.Equal(record.SourcePodLabels, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"busybox\"}", srcPod), "Record does not have correct label for source Pod") + assert.Contains(record.SourcePodLabels, fmt.Sprintf("\"antrea-e2e\":\"%s\",\"app\":\"busybox\"", srcPod), "Record does not have correct label for source Pod") } } @@ -1172,7 +1413,7 @@ func getUint64FieldFromRecord(t *testing.T, record string, field string) uint64 // received all the expected records for a given flow with source IP, destination IP // and source port. We send source port to ignore the control flows during the // iperf test. -func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService bool, checkAllRecords bool, isIPv6 bool, data *TestData) (string, []string) { +func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService bool, checkAllRecords bool, isIPv6 bool, data *TestData, labelFilter string) (string, []string) { var collectorOutput string var recordSlices []string // In the ToExternalFlows test, flow record will arrive 5.5s (exporterActiveFlowExportTimeout+aggregatorActiveFlowRecordTimeout) after executing wget command @@ -1180,31 +1421,36 @@ func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService err := wait.PollImmediate(500*time.Millisecond, exporterActiveFlowExportTimeout+aggregatorActiveFlowRecordTimeout*2, func() (bool, error) { var rc int var err error - // `pod-running-timeout` option is added to cover scenarios where ipfix flow-collector has crashed after being deployed - rc, collectorOutput, _, err = data.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl logs --pod-running-timeout=%v ipfix-collector -n %s", aggregatorInactiveFlowRecordTimeout.String(), data.testNamespace)) + var cmd string + ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testData.testNamespace) + if err != nil || len(ipfixCollectorIP.IPStrings) == 0 { + require.NoErrorf(t, err, "Should be able to get IP from IPFIX collector Pod") + } + if !isIPv6 { + cmd = fmt.Sprintf("curl http://%s:8080/records", ipfixCollectorIP.IPv4.String()) + } else { + cmd = fmt.Sprintf("curl http://[%s]:8080/records", ipfixCollectorIP.IPv6.String()) + } + rc, collectorOutput, _, err = data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return false, err } // Checking that all the data records which correspond to the iperf flow are received - recordSlices = getRecordsFromOutput(collectorOutput) src, dst := matchSrcAndDstAddress(srcIP, dstIP, isDstService, isIPv6) + recordSlices = getRecordsFromOutput(t, collectorOutput, labelFilter, src, dst, srcPort) if checkAllRecords { for _, record := range recordSlices { - flowStartTime := int64(getUint64FieldFromRecord(t, record, "flowStartSeconds")) - exportTime := int64(getUint64FieldFromRecord(t, record, "flowEndSeconds")) flowEndReason := int64(getUint64FieldFromRecord(t, record, "flowEndReason")) - if strings.Contains(record, src) && strings.Contains(record, dst) && strings.Contains(record, srcPort) { - // flowEndReason == 3 means the end of flow detected - if exportTime >= flowStartTime+iperfTimeSec || flowEndReason == 3 { - return true, nil - } + // flowEndReason == 3 means the end of flow detected + if flowEndReason == 3 { + return true, nil } } return false, nil } - return strings.Contains(collectorOutput, src) && strings.Contains(collectorOutput, dst) && strings.Contains(collectorOutput, srcPort), nil + return len(recordSlices) != 0, nil }) - require.NoErrorf(t, err, "IPFIX collector did not receive the expected records in collector output: %v iperf source port: %s", collectorOutput, srcPort) + require.NoErrorf(t, err, "IPFIX collector did not receive the expected records in collector, recordSlices ares: %v, output: %v iperf source port: %s", recordSlices, collectorOutput, srcPort) return collectorOutput, recordSlices } @@ -1212,17 +1458,20 @@ func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService // received all the expected records for a given flow with source IP, destination IP // and source port. We send source port to ignore the control flows during the iperf test. // Polling timeout is coded assuming IPFIX output has been checked first. -func getClickHouseOutput(t *testing.T, data *TestData, srcIP, dstIP, srcPort string, isDstService, checkAllRecords bool) []*ClickHouseFullRow { +func getClickHouseOutput(t *testing.T, data *TestData, srcIP, dstIP, srcPort string, isDstService, checkAllRecords bool, labelFilter string) []*ClickHouseFullRow { var flowRecords []*ClickHouseFullRow var queryOutput string - query := fmt.Sprintf("SELECT * FROM flows WHERE (sourceIP = '%s') AND (destinationIP = '%s')", srcIP, dstIP) + query := fmt.Sprintf("SELECT * FROM flows WHERE (sourceIP = '%s') AND (destinationIP = '%s') AND (octetDeltaCount != 0)", srcIP, dstIP) if isDstService { - query = fmt.Sprintf("SELECT * FROM flows WHERE (sourceIP = '%s') AND (destinationClusterIP = '%s')", srcIP, dstIP) + query = fmt.Sprintf("SELECT * FROM flows WHERE (sourceIP = '%s') AND (destinationClusterIP = '%s') AND (octetDeltaCount != 0)", srcIP, dstIP) } if len(srcPort) > 0 { query = fmt.Sprintf("%s AND (sourceTransportPort = %s)", query, srcPort) } + if labelFilter != "" { + query = fmt.Sprintf("%s AND (sourcePodLabels LIKE '%%%s%%')", query, labelFilter) + } cmd := []string{ "clickhouse-client", "--date_time_output_format=iso", @@ -1236,7 +1485,6 @@ func getClickHouseOutput(t *testing.T, data *TestData, srcIP, dstIP, srcPort str if err != nil { return false, err } - rows := strings.Split(queryOutput, "\n") flowRecords = make([]*ClickHouseFullRow, 0, len(rows)) for _, row := range rows { @@ -1254,10 +1502,8 @@ func getClickHouseOutput(t *testing.T, data *TestData, srcIP, dstIP, srcPort str if checkAllRecords { for _, record := range flowRecords { - flowStartTime := record.FlowStartSeconds.Unix() - exportTime := record.FlowEndSeconds.Unix() // flowEndReason == 3 means the end of flow detected - if exportTime >= flowStartTime+iperfTimeSec || record.FlowEndReason == 3 { + if record.FlowEndReason == 3 { return true, nil } } @@ -1269,12 +1515,28 @@ func getClickHouseOutput(t *testing.T, data *TestData, srcIP, dstIP, srcPort str return flowRecords } -func getRecordsFromOutput(output string) []string { - re := regexp.MustCompile("(?m)^.*" + "#" + ".*$[\r\n]+") - output = re.ReplaceAllString(output, "") - output = strings.TrimSpace(output) - recordSlices := strings.Split(output, "IPFIX-HDR:") - return recordSlices +func getRecordsFromOutput(t *testing.T, output, labelFilter, src, dst, srcPort string) []string { + var response IPFIXCollectorResponse + err := json.Unmarshal([]byte(output), &response) + if err != nil { + require.NoErrorf(t, err, "error when unmarshall output from IPFIX collector Pod") + } + recordSlices := response.FlowRecords + records := []string{} + for _, recordSlice := range recordSlices { + // We don't check the last record. + if strings.Contains(recordSlice, "octetDeltaCount: 0") { + continue + } + // We don't check the record that can't match the srcIP, dstIP and srcPort. + if !strings.Contains(recordSlice, src) || !strings.Contains(recordSlice, dst) || !strings.Contains(recordSlice, srcPort) { + continue + } + if labelFilter == "" || strings.Contains(recordSlice, labelFilter) { + records = append(records, recordSlice) + } + } + return records } func deployK8sNetworkPolicies(t *testing.T, data *TestData, srcPod, dstPod string) (np1 *networkingv1.NetworkPolicy, np2 *networkingv1.NetworkPolicy) { @@ -1453,76 +1715,49 @@ func deployDenyNetworkPolicies(t *testing.T, data *TestData, pod1, pod2 string, return np1, np2 } -func createPerftestPods(data *TestData) (podAIPs *PodIPs, podBIPs *PodIPs, podCIPs *PodIPs, podDIPs *PodIPs, podEIPs *PodIPs, err error) { +func createPerftestPods(data *TestData) (*PodIPs, *PodIPs, *PodIPs, *PodIPs, *PodIPs, error) { cmd := []string{"iperf3", "-s"} create := func(name string, nodeName string, ports []corev1.ContainerPort) error { return NewPodBuilder(name, data.testNamespace, toolboxImage).WithContainerName("iperf").WithCommand(cmd).OnNode(nodeName).WithPorts(ports).Create(data) } - - if err := create("perftest-a", controlPlaneNodeName(), nil); err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest client Pod: %v", err) - } - podAIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-a", data.testNamespace) - if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when waiting for the perftest client Pod: %v", err) - } - - if err := create("perftest-b", controlPlaneNodeName(), []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}); err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) - } - podBIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-b", data.testNamespace) - if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) - } - - if err := create("perftest-c", workerNodeName(1), []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}); err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) - } - podCIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-c", data.testNamespace) - if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) - } - - if err := create("perftest-d", controlPlaneNodeName(), []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}); err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) - } - podDIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-d", data.testNamespace) - if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) - } - - if err := create("perftest-e", workerNodeName(1), []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}); err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) - } - podEIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-e", data.testNamespace) - if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) + var err error + var podIPsArray [5]*PodIPs + for i, podName := range podNames { + var nodeName string + if slices.Contains([]string{"perftest-a", "perftest-b", "perftest-d"}, podName) { + nodeName = controlPlaneNodeName() + } else { + nodeName = workerNodeName(1) + } + if err := create(podName, nodeName, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}); err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("error when creating the perftest client Pod: %v", err) + } + podIPsArray[i], err = data.podWaitForIPs(defaultTimeout, podName, data.testNamespace) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("error when waiting for the perftest client Pod: %v", err) + } } - - return podAIPs, podBIPs, podCIPs, podDIPs, podEIPs, nil + return podIPsArray[0], podIPsArray[1], podIPsArray[2], podIPsArray[3], podIPsArray[4], nil } -func createPerftestServices(data *TestData, isIPv6 bool) (svcB *corev1.Service, svcC *corev1.Service, err error) { +func createPerftestServices(data *TestData, isIPv6 bool) (*corev1.Service, *corev1.Service, *corev1.Service, *corev1.Service, *corev1.Service, error) { svcIPFamily := corev1.IPv4Protocol if isIPv6 { svcIPFamily = corev1.IPv6Protocol } - - svcB, err = data.CreateService("perftest-b", data.testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) - if err != nil { - return nil, nil, fmt.Errorf("Error when creating perftest-b Service: %v", err) - } - - svcC, err = data.CreateService("perftest-c", data.testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-c"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) - if err != nil { - return nil, nil, fmt.Errorf("Error when creating perftest-c Service: %v", err) + var err error + var services [5]*corev1.Service + for i, serviceName := range serviceNames { + services[i], err = data.CreateService(serviceName, data.testNamespace, iperfSvcPort, iperfPort, map[string]string{"antrea-e2e": serviceName}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("error when creating perftest-b Service: %v", err) + } } - - return svcB, svcC, nil + return services[0], services[1], services[2], services[3], services[4], nil } func deletePerftestServices(t *testing.T, data *TestData) { - for _, serviceName := range []string{"perftest-b", "perftest-c"} { + for _, serviceName := range serviceNames { err := data.deleteService(data.testNamespace, serviceName) if err != nil { t.Logf("Error when deleting %s Service: %v", serviceName, err) @@ -1530,6 +1765,27 @@ func deletePerftestServices(t *testing.T, data *TestData) { } } +func addLabelToTestPods(t *testing.T, data *TestData, label string, podNames []string) { + for _, podName := range podNames { + testPod, err := data.clientset.CoreV1().Pods(data.testNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) + require.NoErrorf(t, err, "Error when getting Pod %s in %s", testPod, data.testNamespace) + testPod.Labels["targetLabel"] = label + _, err = data.clientset.CoreV1().Pods(data.testNamespace).Update(context.TODO(), testPod, metav1.UpdateOptions{}) + require.NoErrorf(t, err, "Error when adding label to %s", testPod.Name) + err = wait.Poll(defaultInterval, timeout, func() (bool, error) { + pod, err := data.clientset.CoreV1().Pods(data.testNamespace).Get(context.TODO(), testPod.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("error when getting Pod '%s': %w", pod.Name, err) + } + return pod.Labels["targetLabel"] == label, nil + }) + require.NoErrorf(t, err, "Error when verifying the label on %s", testPod.Name) + } +} + // getBandwidthAndPorts parses iperf commands output and returns bandwidth, // source port and destination port. Bandwidth is returned as a slice containing // two strings (bandwidth value and bandwidth unit). @@ -1581,6 +1837,34 @@ func createToExternalTestServer(t *testing.T, data *TestData) *PodIPs { return serverIPs } +func getAndCheckFlowAggregatorMetrics(t *testing.T, data *TestData) error { + flowAggPod, err := data.getFlowAggregator() + if err != nil { + return fmt.Errorf("error when getting flow-aggregator Pod: %w", err) + } + podName := flowAggPod.Name + command := []string{"antctl", "get", "recordmetrics", "-o", "json"} + if err := wait.Poll(defaultInterval, 2*defaultTimeout, func() (bool, error) { + stdout, _, err := runAntctl(podName, command, data) + if err != nil { + t.Logf("Error when requesting recordmetrics, %v", err) + return false, nil + } + metrics := &recordmetrics.Response{} + if err := json.Unmarshal([]byte(stdout), metrics); err != nil { + return false, fmt.Errorf("error when decoding recordmetrics: %w", err) + } + if metrics.NumConnToCollector != int64(clusterInfo.numNodes) || !metrics.WithClickHouseExporter || !metrics.WithIPFIXExporter || metrics.NumRecordsExported == 0 { + t.Logf("Metrics are not correct. Current metrics: NumConnToCollector=%d, ClickHouseExporter=%v, IPFIXExporter=%v, NumRecordsExported=%d", metrics.NumConnToCollector, metrics.WithClickHouseExporter, metrics.WithIPFIXExporter, metrics.NumRecordsExported) + return false, nil + } + return true, nil + }); err != nil { + return fmt.Errorf("error when checking recordmetrics for Flow Aggregator: %w", err) + } + return nil +} + type ClickHouseFullRow struct { TimeInserted time.Time `json:"timeInserted"` FlowStartSeconds time.Time `json:"flowStartSeconds"` diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 0c8ad225edd..a371360da6c 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -75,42 +75,44 @@ const ( defaultInterval = 1 * time.Second // antreaNamespace is the K8s Namespace in which all Antrea resources are running. - antreaNamespace = "kube-system" - kubeNamespace = "kube-system" - flowAggregatorNamespace = "flow-aggregator" - antreaConfigVolume = "antrea-config" - antreaWindowsConfigVolume = "antrea-windows-config" - flowAggregatorConfigVolume = "flow-aggregator-config" - antreaDaemonSet = "antrea-agent" - antreaWindowsDaemonSet = "antrea-agent-windows" - antreaDeployment = "antrea-controller" - flowAggregatorDeployment = "flow-aggregator" - flowAggregatorCHSecret = "clickhouse-ca" - antreaDefaultGW = "antrea-gw0" - testAntreaIPAMNamespace = "antrea-ipam-test" - testAntreaIPAMNamespace11 = "antrea-ipam-test-11" - testAntreaIPAMNamespace12 = "antrea-ipam-test-12" - busyboxContainerName = "busybox" - mcjoinContainerName = "mcjoin" - agnhostContainerName = "agnhost" - toolboxContainerName = "toolbox" - nginxContainerName = "nginx" - controllerContainerName = "antrea-controller" - ovsContainerName = "antrea-ovs" - agentContainerName = "antrea-agent" - antreaYML = "antrea.yml" - antreaIPSecYML = "antrea-ipsec.yml" - antreaCovYML = "antrea-coverage.yml" - antreaIPSecCovYML = "antrea-ipsec-coverage.yml" - flowAggregatorYML = "flow-aggregator.yml" - flowAggregatorCovYML = "flow-aggregator-coverage.yml" - flowVisibilityYML = "flow-visibility.yml" - flowVisibilityTLSYML = "flow-visibility-tls.yml" - chOperatorYML = "clickhouse-operator-install-bundle.yml" - flowVisibilityCHPodName = "chi-clickhouse-clickhouse-0-0-0" - flowVisibilityNamespace = "flow-visibility" - defaultBridgeName = "br-int" - monitoringNamespace = "monitoring" + antreaNamespace = "kube-system" + kubeNamespace = "kube-system" + flowAggregatorNamespace = "flow-aggregator" + antreaConfigVolume = "antrea-config" + antreaWindowsConfigVolume = "antrea-windows-config" + flowAggregatorConfigVolume = "flow-aggregator-config" + antreaDaemonSet = "antrea-agent" + antreaWindowsDaemonSet = "antrea-agent-windows" + antreaDeployment = "antrea-controller" + flowAggregatorDeployment = "flow-aggregator" + flowAggregatorCHSecret = "clickhouse-ca" + antreaDefaultGW = "antrea-gw0" + testAntreaIPAMNamespace = "antrea-ipam-test" + testAntreaIPAMNamespace11 = "antrea-ipam-test-11" + testAntreaIPAMNamespace12 = "antrea-ipam-test-12" + busyboxContainerName = "busybox" + mcjoinContainerName = "mcjoin" + agnhostContainerName = "agnhost" + toolboxContainerName = "toolbox" + nginxContainerName = "nginx" + controllerContainerName = "antrea-controller" + ovsContainerName = "antrea-ovs" + agentContainerName = "antrea-agent" + flowAggregatorContainerName = "flow-aggregator" + + antreaYML = "antrea.yml" + antreaIPSecYML = "antrea-ipsec.yml" + antreaCovYML = "antrea-coverage.yml" + antreaIPSecCovYML = "antrea-ipsec-coverage.yml" + flowAggregatorYML = "flow-aggregator.yml" + flowAggregatorCovYML = "flow-aggregator-coverage.yml" + flowVisibilityYML = "flow-visibility.yml" + flowVisibilityTLSYML = "flow-visibility-tls.yml" + chOperatorYML = "clickhouse-operator-install-bundle.yml" + flowVisibilityCHPodName = "chi-clickhouse-clickhouse-0-0-0" + flowVisibilityNamespace = "flow-visibility" + defaultBridgeName = "br-int" + monitoringNamespace = "monitoring" antreaControllerCovBinary = "antrea-controller-coverage" antreaAgentCovBinary = "antrea-agent-coverage" @@ -132,7 +134,7 @@ const ( nginxImage = "projects.registry.vmware.com/antrea/nginx:1.21.6-alpine" iisImage = "mcr.microsoft.com/windows/servercore/iis" toolboxImage = "projects.registry.vmware.com/antrea/toolbox:1.2-1" - ipfixCollectorImage = "projects.registry.vmware.com/antrea/ipfix-collector:v0.6.2" + ipfixCollectorImage = "projects.registry.vmware.com/antrea/ipfix-collector:v0.8.2" ipfixCollectorPort = "4739" clickHouseHTTPPort = "8123" @@ -148,6 +150,8 @@ const ( statefulSetRestartAnnotationKey = "antrea-e2e/restartedAt" defaultCHDatabaseURL = "tcp://clickhouse-clickhouse.flow-visibility.svc:9000" + iperfPort = 5201 + iperfSvcPort = 9999 ) type ClusterNode struct { @@ -213,12 +217,12 @@ type flowVisibilityTestOptions struct { var testOptions TestOptions -// podInfo combines OS info with a Pod name. It is useful when choosing commands and options on Pods of different OS (Windows, Linux). -type podInfo struct { - name string - os string - nodeName string - namespace string +// PodInfo combines OS info with a Pod name. It is useful when choosing commands and options on Pods of different OS (Windows, Linux). +type PodInfo struct { + Name string + OS string + NodeName string + Namespace string } // TestData stores the state required for each test case. @@ -236,9 +240,9 @@ type TestData struct { var testData *TestData type PodIPs struct { - ipv4 *net.IP - ipv6 *net.IP - ipStrings []string + IPv4 *net.IP + IPv6 *net.IP + IPStrings []string } type deployAntreaOptions int @@ -276,23 +280,23 @@ var ( func (p PodIPs) String() string { res := "" - if p.ipv4 != nil { - res += fmt.Sprintf("IPv4(%s),", p.ipv4.String()) + if p.IPv4 != nil { + res += fmt.Sprintf("IPv4(%s),", p.IPv4.String()) } - if p.ipv6 != nil { - res += fmt.Sprintf("IPv6(%s),", p.ipv6.String()) + if p.IPv6 != nil { + res += fmt.Sprintf("IPv6(%s),", p.IPv6.String()) } - return fmt.Sprintf("%sIPstrings(%s)", res, strings.Join(p.ipStrings, ",")) + return fmt.Sprintf("%sIPstrings(%s)", res, strings.Join(p.IPStrings, ",")) } func (p *PodIPs) hasSameIP(p1 *PodIPs) bool { - if len(p.ipStrings) == 0 && len(p1.ipStrings) == 0 { + if len(p.IPStrings) == 0 && len(p1.IPStrings) == 0 { return true } - if p.ipv4 != nil && p1.ipv4 != nil && p.ipv4.Equal(*(p1.ipv4)) { + if p.IPv4 != nil && p1.IPv4 != nil && p.IPv4.Equal(*(p1.IPv4)) { return true } - if p.ipv6 != nil && p1.ipv6 != nil && p.ipv6.Equal(*(p1.ipv6)) { + if p.IPv6 != nil && p1.IPv6 != nil && p.IPv6.Equal(*(p1.IPv6)) { return true } return false @@ -1626,10 +1630,10 @@ func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace strin } if !pod.Spec.HostNetwork { - if clusterInfo.podV4NetworkCIDR != "" && ips.ipv4 == nil { + if clusterInfo.podV4NetworkCIDR != "" && ips.IPv4 == nil { return nil, fmt.Errorf("no IPv4 address is assigned while cluster was configured with IPv4 Pod CIDR %s", clusterInfo.podV4NetworkCIDR) } - if clusterInfo.podV6NetworkCIDR != "" && ips.ipv6 == nil { + if clusterInfo.podV6NetworkCIDR != "" && ips.IPv6 == nil { return nil, fmt.Errorf("no IPv6 address is assigned while cluster was configured with IPv6 Pod CIDR %s", clusterInfo.podV6NetworkCIDR) } } @@ -1642,24 +1646,24 @@ func parsePodIPs(podIPStrings sets.Set[string]) (*PodIPs, error) { ipStr := sets.List(podIPStrings)[idx] ip := net.ParseIP(ipStr) if ip.To4() != nil { - if ips.ipv4 != nil && ipStr != ips.ipv4.String() { - return nil, fmt.Errorf("Pod is assigned multiple IPv4 addresses: %s and %s", ips.ipv4.String(), ipStr) + if ips.IPv4 != nil && ipStr != ips.IPv4.String() { + return nil, fmt.Errorf("Pod is assigned multiple IPv4 addresses: %s and %s", ips.IPv4.String(), ipStr) } - if ips.ipv4 == nil { - ips.ipv4 = &ip - ips.ipStrings = append(ips.ipStrings, ipStr) + if ips.IPv4 == nil { + ips.IPv4 = &ip + ips.IPStrings = append(ips.IPStrings, ipStr) } } else { - if ips.ipv6 != nil && ipStr != ips.ipv6.String() { - return nil, fmt.Errorf("Pod is assigned multiple IPv6 addresses: %s and %s", ips.ipv6.String(), ipStr) + if ips.IPv6 != nil && ipStr != ips.IPv6.String() { + return nil, fmt.Errorf("Pod is assigned multiple IPv6 addresses: %s and %s", ips.IPv6.String(), ipStr) } - if ips.ipv6 == nil { - ips.ipv6 = &ip - ips.ipStrings = append(ips.ipStrings, ipStr) + if ips.IPv6 == nil { + ips.IPv6 = &ip + ips.IPStrings = append(ips.IPStrings, ipStr) } } } - if len(ips.ipStrings) == 0 { + if len(ips.IPStrings) == 0 { return nil, fmt.Errorf("pod is running but has no assigned IP, which should never happen") } return ips, nil @@ -2065,6 +2069,10 @@ func (data *TestData) deleteNetworkpolicy(policy *networkingv1.NetworkPolicy) er return nil } +func RandName(prefix string) string { + return prefix + randSeq(nameSuffixLength) +} + // A DNS-1123 subdomain must consist of lower case alphanumeric characters var lettersAndDigits = []rune("abcdefghijklmnopqrstuvwxyz0123456789") @@ -2172,19 +2180,19 @@ func parseArpingStdout(out string) (sent uint32, received uint32, loss float32, return sent, received, loss, nil } -func (data *TestData) runPingCommandFromTestPod(podInfo podInfo, ns string, targetPodIPs *PodIPs, ctrName string, count int, size int) error { - if podInfo.os != "windows" && podInfo.os != "linux" { - return fmt.Errorf("OS of Pod '%s' is not clear", podInfo.name) +func (data *TestData) RunPingCommandFromTestPod(podInfo PodInfo, ns string, targetPodIPs *PodIPs, ctrName string, count int, size int) error { + if podInfo.OS != "windows" && podInfo.OS != "linux" { + return fmt.Errorf("OS of Pod '%s' is not clear", podInfo.Name) } - if targetPodIPs.ipv4 != nil { - cmdV4 := getPingCommand(count, size, podInfo.os, targetPodIPs.ipv4) - if stdout, stderr, err := data.RunCommandFromPod(ns, podInfo.name, ctrName, cmdV4); err != nil { + if targetPodIPs.IPv4 != nil { + cmdV4 := getPingCommand(count, size, podInfo.OS, targetPodIPs.IPv4) + if stdout, stderr, err := data.RunCommandFromPod(ns, podInfo.Name, ctrName, cmdV4); err != nil { return fmt.Errorf("error when running ping command '%s': %v - stdout: %s - stderr: %s", strings.Join(cmdV4, " "), err, stdout, stderr) } } - if targetPodIPs.ipv6 != nil { - cmdV6 := getPingCommand(count, size, podInfo.os, targetPodIPs.ipv6) - if stdout, stderr, err := data.RunCommandFromPod(ns, podInfo.name, ctrName, cmdV6); err != nil { + if targetPodIPs.IPv6 != nil { + cmdV6 := getPingCommand(count, size, podInfo.OS, targetPodIPs.IPv6) + if stdout, stderr, err := data.RunCommandFromPod(ns, podInfo.Name, ctrName, cmdV6); err != nil { return fmt.Errorf("error when running ping command '%s': %v - stdout: %s - stderr: %s", strings.Join(cmdV6, " "), err, stdout, stderr) } } diff --git a/test/e2e/ipsec_test.go b/test/e2e/ipsec_test.go index 268a7976d01..c51cd5375da 100644 --- a/test/e2e/ipsec_test.go +++ b/test/e2e/ipsec_test.go @@ -136,11 +136,11 @@ func testIPSecTunnelConnectivity(t *testing.T, data *TestData, certAuth bool) { } podInfos, deletePods := createPodsOnDifferentNodes(t, data, data.testNamespace, tag) defer deletePods() - t.Logf("Executing ping tests across Nodes: '%s' <-> '%s'", podInfos[0].nodeName, podInfos[1].nodeName) + t.Logf("Executing ping tests across Nodes: '%s' <-> '%s'", podInfos[0].NodeName, podInfos[1].NodeName) data.runPingMesh(t, podInfos[:2], agnhostContainerName) // Check that there is at least one 'up' Security Association on the Node - nodeName := podInfos[0].nodeName + nodeName := podInfos[0].NodeName if up, _, isCertAuth, err := data.readSecurityAssociationsStatus(nodeName); err != nil { t.Errorf("Error when reading Security Associations: %v", err) } else if up == 0 { diff --git a/test/e2e/l7networkpolicy_test.go b/test/e2e/l7networkpolicy_test.go index 067a34c7f8b..378834c80f5 100644 --- a/test/e2e/l7networkpolicy_test.go +++ b/test/e2e/l7networkpolicy_test.go @@ -206,11 +206,11 @@ func testL7NetworkPolicyHTTP(t *testing.T, data *TestData) { } require.NoError(t, data.podWaitForRunning(defaultTimeout, serverPodName, data.testNamespace)) var serverIPs []*net.IP - if podIPs.ipv4 != nil { - serverIPs = append(serverIPs, podIPs.ipv4) + if podIPs.IPv4 != nil { + serverIPs = append(serverIPs, podIPs.IPv4) } - if podIPs.ipv6 != nil { - serverIPs = append(serverIPs, podIPs.ipv6) + if podIPs.IPv6 != nil { + serverIPs = append(serverIPs, podIPs.IPv6) } l7ProtocolAllowsPathHostname := []crdv1beta1.L7Protocol{ diff --git a/test/e2e/networkpolicy_test.go b/test/e2e/networkpolicy_test.go index 606d6aa9248..dd8eb1d30b9 100644 --- a/test/e2e/networkpolicy_test.go +++ b/test/e2e/networkpolicy_test.go @@ -113,11 +113,11 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { // the first IP packet sent on a tunnel is always dropped because of a missing ARP entry. // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv4.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv6.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } @@ -173,11 +173,11 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { wg.Add(1) go func() { if clusterInfo.podV4NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv4.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} + cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.IPv6.String())} data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } wg.Done() @@ -276,11 +276,11 @@ func (data *TestData) setupDifferentNamedPorts(t *testing.T) (checkFn func(), cl } // Precondition check: client is able to access server with the given IP address. if clusterInfo.podV4NetworkCIDR != "" { - preCheckFunc(server0IPs.ipv4.String(), server1IPs.ipv4.String()) + preCheckFunc(server0IPs.IPv4.String(), server1IPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - preCheckFunc(server0IPs.ipv6.String(), server1IPs.ipv6.String()) + preCheckFunc(server0IPs.IPv6.String(), server1IPs.IPv6.String()) } // Create NetworkPolicy rule. @@ -339,11 +339,11 @@ func (data *TestData) setupDifferentNamedPorts(t *testing.T) (checkFn func(), cl checkFn = func() { // NetworkPolicy check. if clusterInfo.podV4NetworkCIDR != "" { - npCheck(server0IPs.ipv4.String(), server1IPs.ipv4.String()) + npCheck(server0IPs.IPv4.String(), server1IPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(server0IPs.ipv6.String(), server1IPs.ipv6.String()) + npCheck(server0IPs.IPv6.String(), server1IPs.IPv6.String()) } } success = true @@ -399,17 +399,17 @@ func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) { // Locally generated traffic can always access the Pods regardless of NetworkPolicy configuration. if clusterInfo.podV4NetworkCIDR != "" { - npCheck(client1Name, serverIPs.ipv4.String(), serverPort, false) + npCheck(client1Name, serverIPs.IPv4.String(), serverPort, false) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(client1Name, serverIPs.ipv6.String(), serverPort, false) + npCheck(client1Name, serverIPs.IPv6.String(), serverPort, false) } if clusterInfo.podV4NetworkCIDR != "" { - npCheck(client2Name, serverIPs.ipv4.String(), serverPort, true) + npCheck(client2Name, serverIPs.IPv4.String(), serverPort, true) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(client2Name, serverIPs.ipv6.String(), serverPort, true) + npCheck(client2Name, serverIPs.IPv6.String(), serverPort, true) } npCheck(client2Name, serverNodeIP, service.Spec.Ports[0].NodePort, true) } @@ -428,10 +428,10 @@ func testDefaultDenyEgressPolicy(t *testing.T, data *TestData) { } } if clusterInfo.podV4NetworkCIDR != "" { - preCheckFunc(serverIPs.ipv4.String()) + preCheckFunc(serverIPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - preCheckFunc(serverIPs.ipv6.String()) + preCheckFunc(serverIPs.IPv6.String()) } spec := &networkingv1.NetworkPolicySpec{ @@ -456,10 +456,10 @@ func testDefaultDenyEgressPolicy(t *testing.T, data *TestData) { } if clusterInfo.podV4NetworkCIDR != "" { - npCheck(serverIPs.ipv4.String()) + npCheck(serverIPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(serverIPs.ipv6.String()) + npCheck(serverIPs.IPv6.String()) } } @@ -479,12 +479,12 @@ func testEgressToServerInCIDRBlock(t *testing.T, data *TestData) { defer cleanupFunc() var serverCIDR string var serverAIP, serverBIP string - if serverAIPs.ipv6 == nil { + if serverAIPs.IPv6 == nil { t.Fatal("server IPv6 address is empty") } - serverCIDR = fmt.Sprintf("%s/128", serverAIPs.ipv6.String()) - serverAIP = serverAIPs.ipv6.String() - serverBIP = serverBIPs.ipv6.String() + serverCIDR = fmt.Sprintf("%s/128", serverAIPs.IPv6.String()) + serverAIP = serverAIPs.IPv6.String() + serverBIP = serverBIPs.IPv6.String() if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverAIP, 80); err != nil { t.Fatalf("%s should be able to netcat %s", clientA, serverAName) @@ -545,16 +545,16 @@ func testEgressToServerInCIDRBlockWithException(t *testing.T, data *TestData) { var serverAAllowCIDR string var serverAExceptList []string var serverAIP string - if serverAIPs.ipv6 == nil { + if serverAIPs.IPv6 == nil { t.Fatal("server IPv6 address is empty") } - _, serverAAllowSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/%d", serverAIPs.ipv6.String(), 64)) + _, serverAAllowSubnet, err := net.ParseCIDR(fmt.Sprintf("%s/%d", serverAIPs.IPv6.String(), 64)) if err != nil { t.Fatalf("could not parse allow subnet") } serverAAllowCIDR = serverAAllowSubnet.String() - serverAExceptList = []string{fmt.Sprintf("%s/%d", serverAIPs.ipv6.String(), 128)} - serverAIP = serverAIPs.ipv6.String() + serverAExceptList = []string{fmt.Sprintf("%s/%d", serverAIPs.IPv6.String(), 128)} + serverAIP = serverAIPs.IPv6.String() if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverAIP, 80); err != nil { t.Fatalf("%s should be able to netcat %s", clientA, serverAName) @@ -644,10 +644,10 @@ func testNetworkPolicyResyncAfterRestart(t *testing.T, data *TestData) { } } if clusterInfo.podV4NetworkCIDR != "" { - preCheckFunc(server0IPs.ipv4.String(), server1IPs.ipv4.String()) + preCheckFunc(server0IPs.IPv4.String(), server1IPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - preCheckFunc(server0IPs.ipv6.String(), server1IPs.ipv6.String()) + preCheckFunc(server0IPs.IPv6.String(), server1IPs.IPv6.String()) } scaleFunc := func(replicas int32) { @@ -701,10 +701,10 @@ func testNetworkPolicyResyncAfterRestart(t *testing.T, data *TestData) { } if clusterInfo.podV4NetworkCIDR != "" { - npCheck(server0IPs.ipv4.String(), server1IPs.ipv4.String()) + npCheck(server0IPs.IPv4.String(), server1IPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(server0IPs.ipv6.String(), server1IPs.ipv6.String()) + npCheck(server0IPs.IPv6.String(), server1IPs.IPv6.String()) } } @@ -824,10 +824,10 @@ func testIngressPolicyWithoutPortNumber(t *testing.T, data *TestData) { } if clusterInfo.podV4NetworkCIDR != "" { - preCheckFunc(serverIPs.ipv4.String()) + preCheckFunc(serverIPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - preCheckFunc(serverIPs.ipv6.String()) + preCheckFunc(serverIPs.IPv6.String()) } protocol := corev1.ProtocolTCP @@ -874,10 +874,10 @@ func testIngressPolicyWithoutPortNumber(t *testing.T, data *TestData) { } if clusterInfo.podV4NetworkCIDR != "" { - npCheck(serverIPs.ipv4.String()) + npCheck(serverIPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(serverIPs.ipv6.String()) + npCheck(serverIPs.IPv6.String()) } } @@ -964,10 +964,10 @@ func testIngressPolicyWithEndPort(t *testing.T, data *TestData) { } if clusterInfo.podV4NetworkCIDR != "" { - preCheck(serverIPs.ipv4.String()) + preCheck(serverIPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - preCheck(serverIPs.ipv6.String()) + preCheck(serverIPs.IPv6.String()) } protocol := corev1.ProtocolTCP @@ -1026,10 +1026,10 @@ func testIngressPolicyWithEndPort(t *testing.T, data *TestData) { } if clusterInfo.podV4NetworkCIDR != "" { - npCheck(serverIPs.ipv4.String()) + npCheck(serverIPs.IPv4.String()) } if clusterInfo.podV6NetworkCIDR != "" { - npCheck(serverIPs.ipv6.String()) + npCheck(serverIPs.IPv6.String()) } } diff --git a/test/e2e/nodeportlocal_test.go b/test/e2e/nodeportlocal_test.go index 2e76f7ed7b3..fee7792110b 100644 --- a/test/e2e/nodeportlocal_test.go +++ b/test/e2e/nodeportlocal_test.go @@ -123,12 +123,12 @@ func getNPLAnnotations(t *testing.T, data *TestData, r *require.Assertions, test } testPodIP, err = parsePodIPs(podIPStrings) - if err != nil || testPodIP.ipv4 == nil { + if err != nil || testPodIP.IPv4 == nil { return false, nil } ann := pod.GetAnnotations() - t.Logf("Got annotations %v for Pod with IP %v", ann, testPodIP.ipv4.String()) + t.Logf("Got annotations %v for Pod with IP %v", ann, testPodIP.IPv4.String()) nplAnn, found := ann[types.NPLAnnotationKey] if !found { return false, nil @@ -150,7 +150,7 @@ func getNPLAnnotations(t *testing.T, data *TestData, r *require.Assertions, test time.Sleep(time.Millisecond * 100) } r.NoError(err, "Poll for Pod check failed") - return nplAnnotations, testPodIP.ipv4.String() + return nplAnnotations, testPodIP.IPv4.String() } func checkNPLRules(t *testing.T, data *TestData, r *require.Assertions, nplAnnotations []types.NPLAnnotation, antreaPod, podIP string, nodeName string, present bool) { diff --git a/test/e2e/performance_test.go b/test/e2e/performance_test.go index ee88b58e1c1..1b73b9ec88e 100644 --- a/test/e2e/performance_test.go +++ b/test/e2e/performance_test.go @@ -212,7 +212,7 @@ func httpRequest(requests, policyRules int, data *TestData, b *testing.B) { nginxPodIP, _ := setupTestPods(data, b) // performance_test only runs in IPv4 cluster, so here only check the IPv4 address of nginx server Pod. - nginxPodIPStr := nginxPodIP.ipv4.String() + nginxPodIPStr := nginxPodIP.IPv4.String() err := setupTestPodsConnection(data) // enable Pods connectivity policy first if err != nil { diff --git a/test/e2e/proxy_test.go b/test/e2e/proxy_test.go index 76e78aee429..ff75a762208 100644 --- a/test/e2e/proxy_test.go +++ b/test/e2e/proxy_test.go @@ -166,9 +166,9 @@ func testProxyLoadBalancerService(t *testing.T, isIPv6 bool) { podName, ips, _ := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, fmt.Sprintf("busybox-%d-", idx), node, data.testNamespace, false) busyboxes = append(busyboxes, podName) if !isIPv6 { - busyboxIPs = append(busyboxIPs, ips.ipv4.String()) + busyboxIPs = append(busyboxIPs, ips.IPv4.String()) } else { - busyboxIPs = append(busyboxIPs, ips.ipv6.String()) + busyboxIPs = append(busyboxIPs, ips.IPv6.String()) } } @@ -313,9 +313,9 @@ func testProxyNodePortService(t *testing.T, isIPv6 bool) { podName, ips, _ := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, fmt.Sprintf("busybox-%d-", idx), node, data.testNamespace, false) busyboxes = append(busyboxes, podName) if !isIPv6 { - busyboxIPs = append(busyboxIPs, ips.ipv4.String()) + busyboxIPs = append(busyboxIPs, ips.IPv4.String()) } else { - busyboxIPs = append(busyboxIPs, ips.ipv6.String()) + busyboxIPs = append(busyboxIPs, ips.IPv6.String()) } } @@ -642,14 +642,14 @@ func testProxyServiceSessionAffinity(ipFamily *corev1.IPFamily, ingressIPs []str require.NoError(t, err) if *ipFamily == corev1.IPv4Protocol { require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("nw_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) - require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_REG3[]", strings.TrimLeft(hex.EncodeToString(nginxIP.ipv4.To4()), "0"))) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_REG3[]", strings.TrimLeft(hex.EncodeToString(nginxIP.IPv4.To4()), "0"))) for _, ingressIP := range ingressIPs { require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("nw_dst=%s,tp_dst=80", ingressIP)) } } else { require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("ipv6_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) - require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.ipv6)[8:16]), "0"))) - require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[64..127]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.ipv6)[0:8]), "0"))) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.IPv6)[8:16]), "0"))) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[64..127]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.IPv6)[0:8]), "0"))) for _, ingressIP := range ingressIPs { require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("ipv6_dst=%s,tp_dst=80", ingressIP)) } @@ -910,9 +910,9 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te require.NoError(t, err) var nginxIP string if *ipFamily == corev1.IPv6Protocol { - nginxIP = nginxIPs.ipv6.String() + nginxIP = nginxIPs.IPv6.String() } else { - nginxIP = nginxIPs.ipv4.String() + nginxIP = nginxIPs.IPv4.String() } keywords := make(map[string]string) @@ -921,9 +921,9 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te var groupKeywords []string if *ipFamily == corev1.IPv6Protocol { groupKeywords = append(groupKeywords, - fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63],load:0x%s->NXM_NX_XXREG3[64..127]", strings.TrimLeft(hex.EncodeToString((*nginxIPs.ipv6)[8:16]), "0"), strings.TrimLeft(hex.EncodeToString((*nginxIPs.ipv6)[:8]), "0"))) + fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63],load:0x%s->NXM_NX_XXREG3[64..127]", strings.TrimLeft(hex.EncodeToString((*nginxIPs.IPv6)[8:16]), "0"), strings.TrimLeft(hex.EncodeToString((*nginxIPs.IPv6)[:8]), "0"))) } else { - groupKeywords = append(groupKeywords, fmt.Sprintf("0x%s->NXM_NX_REG3[]", strings.TrimLeft(hex.EncodeToString(nginxIPs.ipv4.To4()), "0"))) + groupKeywords = append(groupKeywords, fmt.Sprintf("0x%s->NXM_NX_REG3[]", strings.TrimLeft(hex.EncodeToString(nginxIPs.IPv4.To4()), "0"))) } for tableName, keyword := range keywords { @@ -997,9 +997,9 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d require.NoError(t, err) var nginxIP string if *ipFamily == corev1.IPv6Protocol { - nginxIP = nginxIPs.ipv6.String() + nginxIP = nginxIPs.IPv6.String() } else { - nginxIP = nginxIPs.ipv4.String() + nginxIP = nginxIPs.IPv4.String() } svc, err := data.createNginxClusterIPService(nginx, data.testNamespace, false, ipFamily) defer data.deleteServiceAndWait(defaultTimeout, nginx, data.testNamespace) @@ -1044,11 +1044,11 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d var groupKeyword string if *ipFamily == corev1.IPv6Protocol { groupKeyword = fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63],load:0x%s->NXM_NX_XXREG3[64..127],load:0x%x->NXM_NX_REG4[0..15]", - strings.TrimLeft(hex.EncodeToString(nginxIPs.ipv6.To16()[8:16]), "0"), - strings.TrimLeft(hex.EncodeToString(nginxIPs.ipv6.To16()[:8]), "0"), + strings.TrimLeft(hex.EncodeToString(nginxIPs.IPv6.To16()[8:16]), "0"), + strings.TrimLeft(hex.EncodeToString(nginxIPs.IPv6.To16()[:8]), "0"), 80) } else { - groupKeyword = fmt.Sprintf("load:0x%s->NXM_NX_REG3[],load:0x%x->NXM_NX_REG4[0..15]", strings.TrimLeft(hex.EncodeToString(nginxIPs.ipv4.To4()), "0"), 80) + groupKeyword = fmt.Sprintf("load:0x%s->NXM_NX_REG3[],load:0x%x->NXM_NX_REG4[0..15]", strings.TrimLeft(hex.EncodeToString(nginxIPs.IPv4.To4()), "0"), 80) } groupOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) require.NoError(t, err) @@ -1137,7 +1137,7 @@ func TestProxyLoadBalancerModeDSR(t *testing.T) { ingressNodeIP := controlPlaneNodeIPv4() ipProtocol := corev1.IPv4Protocol lbIP := "1.1.2.1" - internalClientIP := internalClientIPs.ipv4.String() + internalClientIP := internalClientIPs.IPv4.String() externalClientIP := "1.1.1.1" externalClientGateway := "1.1.1.254" externalIPPrefix := 24 diff --git a/test/e2e/service_test.go b/test/e2e/service_test.go index 79d7f8bc2cb..4f4830ed5ce 100644 --- a/test/e2e/service_test.go +++ b/test/e2e/service_test.go @@ -162,7 +162,7 @@ func (data *TestData) testNodePort(t *testing.T, isWindows bool, clientNamespace defer data.DeletePodAndWait(defaultTimeout, clientName, clientNamespace) podIPs, err := data.podWaitForIPs(defaultTimeout, clientName, clientNamespace) require.NoError(t, err) - t.Logf("Created client Pod IPs %v", podIPs.ipStrings) + t.Logf("Created client Pod IPs %v", podIPs.IPStrings) nodeIP := clusterInfo.nodes[0].ip() nodePort := int(svc.Spec.Ports[0].NodePort) @@ -189,8 +189,8 @@ func (data *TestData) createAgnhostServiceAndBackendPods(t *testing.T, name, nam }).Create(data)) podIPs, err := data.podWaitForIPs(defaultTimeout, name, namespace) require.NoError(t, err) - t.Logf("Created service Pod IPs %v", podIPs.ipStrings) - if podIPs.ipv4 == nil { + t.Logf("Created service Pod IPs %v", podIPs.IPStrings) + if podIPs.IPv4 == nil { // "IPv4" is invalid in IPv6 only cluster with K8s>=1.21 // error: Service "s1" is invalid: spec.ipFamilies[0]: Invalid value: "IPv4": not configured on this cluster ipProtocol = corev1.IPv6Protocol diff --git a/test/e2e/supportbundle_test.go b/test/e2e/supportbundle_test.go index 403fe566795..a7a51e07cd0 100644 --- a/test/e2e/supportbundle_test.go +++ b/test/e2e/supportbundle_test.go @@ -78,7 +78,7 @@ func testSupportBundle(name string, t *testing.T) { podIP, err := data.podWaitForIPs(defaultTimeout, podName, metav1.NamespaceSystem) require.NoError(t, err) - for _, podIPStr := range podIP.ipStrings { + for _, podIPStr := range podIP.IPStrings { getAndCheckSupportBundle(t, name, podIPStr, podPort, token, podName, data) } } diff --git a/test/e2e/traceflow_test.go b/test/e2e/traceflow_test.go index 028fb648fa5..0248cb1c9f2 100644 --- a/test/e2e/traceflow_test.go +++ b/test/e2e/traceflow_test.go @@ -319,17 +319,17 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { // Containerd configures port asynchronously, which could cause execution time of installing flow longer than docker. time.Sleep(time.Second * 1) var pod0IPv4Str, pod1IPv4Str, dstPodIPv4Str, dstPodIPv6Str string - if node1IPs[0].ipv4 != nil { - pod0IPv4Str = node1IPs[0].ipv4.String() + if node1IPs[0].IPv4 != nil { + pod0IPv4Str = node1IPs[0].IPv4.String() } - if node1IPs[1].ipv4 != nil { - pod1IPv4Str = node1IPs[1].ipv4.String() + if node1IPs[1].IPv4 != nil { + pod1IPv4Str = node1IPs[1].IPv4.String() } - if node1IPs[2].ipv4 != nil { - dstPodIPv4Str = node1IPs[2].ipv4.String() + if node1IPs[2].IPv4 != nil { + dstPodIPv4Str = node1IPs[2].IPv4.String() } - if node1IPs[2].ipv6 != nil { - dstPodIPv6Str = node1IPs[2].ipv6.String() + if node1IPs[2].IPv6 != nil { + dstPodIPv6Str = node1IPs[2].IPv6.String() } gwIPv4Str, gwIPv6Str := nodeGatewayIPs(nodeIdx) @@ -1105,11 +1105,11 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { defer node1CleanupFn() defer node2CleanupFn() var dstPodIPv4Str, dstPodIPv6Str string - if node2IPs[0].ipv4 != nil { - dstPodIPv4Str = node2IPs[0].ipv4.String() + if node2IPs[0].IPv4 != nil { + dstPodIPv4Str = node2IPs[0].IPv4.String() } - if node2IPs[0].ipv6 != nil { - dstPodIPv6Str = node2IPs[0].ipv6.String() + if node2IPs[0].IPv6 != nil { + dstPodIPv6Str = node2IPs[0].IPv6.String() } // Create Service backend Pod. The "hairpin" testcases require the Service to have a single backend Pod, @@ -1120,15 +1120,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { require.NoError(t, err) var agnhostIPv4Str, agnhostIPv6Str, svcIPv4Name, svcIPv6Name string - if agnhostIP.ipv4 != nil { - agnhostIPv4Str = agnhostIP.ipv4.String() + if agnhostIP.IPv4 != nil { + agnhostIPv4Str = agnhostIP.IPv4.String() ipv4Protocol := corev1.IPv4Protocol svcIPv4, err := data.CreateService("agnhost-ipv4", data.testNamespace, 80, 8080, map[string]string{"app": "agnhost-server"}, false, false, corev1.ServiceTypeClusterIP, &ipv4Protocol) require.NoError(t, err) svcIPv4Name = svcIPv4.Name } - if agnhostIP.ipv6 != nil { - agnhostIPv6Str = agnhostIP.ipv6.String() + if agnhostIP.IPv6 != nil { + agnhostIPv6Str = agnhostIP.IPv6.String() ipv6Protocol := corev1.IPv6Protocol svcIPv6, err := data.CreateService("agnhost-ipv6", data.testNamespace, 80, 8080, map[string]string{"app": "agnhost-server"}, false, false, corev1.ServiceTypeClusterIP, &ipv6Protocol) require.NoError(t, err) @@ -1140,13 +1140,13 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { // dropped on tunnel because the ARP entry doesn't exist in host cache. isWindows := len(clusterInfo.windowsNodes) != 0 if isWindows { - podInfos := make([]podInfo, 2) - podInfos[0].name = node1Pods[0] - podInfos[0].namespace = data.testNamespace - podInfos[0].os = "windows" - podInfos[1].name = node2Pods[2] - podInfos[1].namespace = data.testNamespace - podInfos[1].os = "windows" + podInfos := make([]PodInfo, 2) + podInfos[0].Name = node1Pods[0] + podInfos[0].Namespace = data.testNamespace + podInfos[0].OS = "windows" + podInfos[1].Name = node2Pods[2] + podInfos[1].Namespace = data.testNamespace + podInfos[1].OS = "windows" data.runPingMesh(t, podInfos, agnhostContainerName) } @@ -2458,19 +2458,19 @@ func runTestTraceflow(t *testing.T, data *TestData, tc testcase) { if dstIP := tc.tf.Spec.Destination.IP; dstIP != "" { ip := net.ParseIP(dstIP) if ip.To4() != nil { - dstPodIPs = &PodIPs{ipv4: &ip} + dstPodIPs = &PodIPs{IPv4: &ip} } else { - dstPodIPs = &PodIPs{ipv6: &ip} + dstPodIPs = &PodIPs{IPv6: &ip} } } else { dstPod := tc.tf.Spec.Destination.Pod - podIPs := waitForPodIPs(t, data, []podInfo{{dstPod, osString, "", ""}}) + podIPs := waitForPodIPs(t, data, []PodInfo{{dstPod, osString, "", ""}}) dstPodIPs = podIPs[dstPod] } // Give a little time for Nodes to install OVS flows. time.Sleep(time.Second * 2) // Send an ICMP echo packet from the source Pod to the destination. - if err := data.runPingCommandFromTestPod(podInfo{srcPod, osString, "", ""}, data.testNamespace, dstPodIPs, agnhostContainerName, 2, 0); err != nil { + if err := data.RunPingCommandFromTestPod(PodInfo{srcPod, osString, "", ""}, data.testNamespace, dstPodIPs, agnhostContainerName, 2, 0); err != nil { t.Logf("Ping '%s' -> '%v' failed: ERROR (%v)", srcPod, *dstPodIPs, err) } } diff --git a/test/e2e/trafficcontrol_test.go b/test/e2e/trafficcontrol_test.go index 834464b7162..77a534ffc05 100644 --- a/test/e2e/trafficcontrol_test.go +++ b/test/e2e/trafficcontrol_test.go @@ -89,11 +89,11 @@ func createTrafficControlTestPod(t *testing.T, data *TestData, podName string) { } require.NoError(t, data.podWaitForRunning(defaultTimeout, podName, data.testNamespace)) - if ips.ipv4 != nil { - tcTestConfig.podIPs[corev1.IPv4Protocol] = ips.ipv4.String() + if ips.IPv4 != nil { + tcTestConfig.podIPs[corev1.IPv4Protocol] = ips.IPv4.String() } - if ips.ipv6 != nil { - tcTestConfig.podIPs[corev1.IPv6Protocol] = ips.ipv6.String() + if ips.IPv6 != nil { + tcTestConfig.podIPs[corev1.IPv6Protocol] = ips.IPv6.String() } } @@ -105,11 +105,11 @@ func createTrafficControlPacketsCollectorPod(t *testing.T, data *TestData, podNa } require.NoError(t, data.podWaitForRunning(defaultTimeout, podName, data.testNamespace)) - if ips.ipv4 != nil { - tcTestConfig.collectorPodIPs[corev1.IPv4Protocol] = ips.ipv4.String() + if ips.IPv4 != nil { + tcTestConfig.collectorPodIPs[corev1.IPv4Protocol] = ips.IPv4.String() } - if ips.ipv6 != nil { - tcTestConfig.collectorPodIPs[corev1.IPv6Protocol] = ips.ipv6.String() + if ips.IPv6 != nil { + tcTestConfig.collectorPodIPs[corev1.IPv6Protocol] = ips.IPv6.String() } } diff --git a/test/e2e/wireguard_test.go b/test/e2e/wireguard_test.go index 87f9c786074..b746ffe07a7 100644 --- a/test/e2e/wireguard_test.go +++ b/test/e2e/wireguard_test.go @@ -89,11 +89,11 @@ func testPodConnectivity(t *testing.T, data *TestData) { } podIPs := waitForPodIPs(t, data, podInfos) for _, pi := range podInfos { - if pi.os == "linux" && pi.nodeName != nodeName(0) { - if podIPs[pi.name].ipv4 != nil { - peerPodIP = podIPs[pi.name].ipv4.String() + if pi.OS == "linux" && pi.NodeName != nodeName(0) { + if podIPs[pi.Name].IPv4 != nil { + peerPodIP = podIPs[pi.Name].IPv4.String() } else { - peerPodIP = podIPs[pi.name].ipv6.String() + peerPodIP = podIPs[pi.Name].IPv6.String() } break }