From 0cde7a55a0192b1c9d71c7a07f9ad0b24c992e9a Mon Sep 17 00:00:00 2001 From: Yongming Ding Date: Tue, 16 Mar 2021 16:16:55 -0700 Subject: [PATCH] Add IPv4/v6 dual stack support in Flow aggregator Fix the e2e test failure in dual stack cluster of flow aggregator. --- test/e2e/fixtures.go | 6 ------ test/e2e/flowaggregator_test.go | 15 ++++++++++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/test/e2e/fixtures.go b/test/e2e/fixtures.go index 9f15fed5992..7c86c24993b 100644 --- a/test/e2e/fixtures.go +++ b/test/e2e/fixtures.go @@ -68,12 +68,6 @@ func skipIfNotIPv6Cluster(tb testing.TB) { } } -func skipIfDualStackCluster(tb testing.TB) { - if clusterInfo.podV6NetworkCIDR != "" && clusterInfo.podV4NetworkCIDR != "" { - tb.Skipf("Skipping test as it is not supported in dual stack cluster") - } -} - func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) { for _, module := range requiredModules { // modprobe with "--dry-run" does not require root privileges diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index 1ea578c1212..ad7b68d2cc7 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -102,7 +102,6 @@ const ( ) func TestFlowAggregator(t *testing.T) { - skipIfDualStackCluster(t) data, isIPv6, err := setupTestWithIPFIXCollector(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -170,7 +169,12 @@ func TestFlowAggregator(t *testing.T) { if !isIPv6 { checkRecordsForFlows(t, data, podAIP.ipv4.String(), svcB.Spec.ClusterIP, isIPv6, true, true, false, false) } else { - checkRecordsForFlows(t, data, podAIP.ipv6.String(), svcB.Spec.ClusterIP, isIPv6, true, true, false, false) + // Perftest service may be assigned a IPv4 ClusterIP in dual stack cluster + if strings.Count(svcB.Spec.ClusterIP, ":") >= 2 { + checkRecordsForFlows(t, data, podAIP.ipv6.String(), svcB.Spec.ClusterIP, isIPv6, true, true, false, false) + } else { + checkRecordsForFlows(t, data, podAIP.ipv4.String(), svcB.Spec.ClusterIP, false, true, true, false, false) + } } }) @@ -180,7 +184,11 @@ func TestFlowAggregator(t *testing.T) { if !isIPv6 { checkRecordsForFlows(t, data, podAIP.ipv4.String(), svcC.Spec.ClusterIP, isIPv6, false, true, false, true) } else { - checkRecordsForFlows(t, data, podAIP.ipv6.String(), svcC.Spec.ClusterIP, isIPv6, false, true, false, true) + if strings.Count(svcC.Spec.ClusterIP, ":") >= 2 { + checkRecordsForFlows(t, data, podAIP.ipv6.String(), svcC.Spec.ClusterIP, isIPv6, false, true, false, true) + } else { + checkRecordsForFlows(t, data, podAIP.ipv4.String(), svcC.Spec.ClusterIP, false, false, true, false, true) + } } }) } @@ -198,6 +206,7 @@ func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP stri t.Errorf("Error when running iperf3 client: %v", err) } bandwidth := strings.TrimSpace(stdout) + t.Logf("cmd:%s, srcIP:%s, dstIP:%s, isIPv6:%v, stdout:%s", cmdStr, srcIP, dstIP, isIPv6, stdout) // Polling to make sure all the data records corresponding to the iperf flow // are received.