From 8db2547974a8e8b5c5a7f811a9cf55a75dde8a99 Mon Sep 17 00:00:00 2001 From: Thomas Guettler Date: Thu, 18 Jan 2024 14:34:05 +0100 Subject: [PATCH] :seedling: Fixed e2e tests. --- test/e2e/caphv.go | 28 ++++++++++++++++++++++++++-- test/e2e/config/hivelocity-ci.yaml | 2 +- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/test/e2e/caphv.go b/test/e2e/caphv.go index 7e7342765..94f4ffb94 100644 --- a/test/e2e/caphv.go +++ b/test/e2e/caphv.go @@ -19,8 +19,10 @@ package e2e import ( "context" "fmt" + "log" "os" "path/filepath" + "runtime/trace" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -60,7 +62,7 @@ func CaphvClusterDeploymentSpec(ctx context.Context, inputGetter func() CaphvClu gomega.Expect(input.E2EConfig).ToNot(gomega.BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) gomega.Expect(input.ClusterctlConfigPath).To(gomega.BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) gomega.Expect(input.BootstrapClusterProxy).ToNot(gomega.BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) - gomega.Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(gomega.Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + gomega.Expect(os.MkdirAll(input.ArtifactFolder, 0o750)).To(gomega.Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) gomega.Expect(input.E2EConfig.Variables).To(gomega.HaveKey(KubernetesVersion)) gomega.Expect(input.E2EConfig.Variables).To(gomega.HaveKey(HivelocityRegion)) gomega.Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion))) @@ -74,7 +76,27 @@ func CaphvClusterDeploymentSpec(ctx context.Context, inputGetter func() CaphvClu clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) }) + traceFile := filepath.Join(input.ArtifactFolder, "trace.out") + f, err := os.Create(traceFile) + if err != nil { + log.Fatal("could not open file for tracing output: ", err) + } + if err := trace.Start(f); err != nil { + log.Fatal("could not start trace: ", err) + } ginkgo.It("Should successfully create a cluster with control planes and worker machines", func() { + traceFile := filepath.Join(input.ArtifactFolder, "trace.out") + f, err := os.Create(traceFile) + if err != nil { + log.Fatal("could not open file for tracing output: ", err) + } + defer f.Close() + if err := trace.Start(f); err != nil { + log.Fatal("could not start trace: ", err) + } + defer trace.Stop() + fmt.Printf("Created trace file ----------------- %s\n", traceFile) + ginkgo.By(fmt.Sprintf("Creating a cluster: %d control-planes %d worker", input.ControlPlaneMachineCount, input.WorkerMachineCount)) clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ @@ -98,7 +120,9 @@ func CaphvClusterDeploymentSpec(ctx context.Context, inputGetter func() CaphvClu ginkgo.By("PASSED!") }) - + trace.Stop() + fmt.Printf("Created trace file ----------------- %s\n", traceFile) + f.Close() ginkgo.AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) diff --git a/test/e2e/config/hivelocity-ci.yaml b/test/e2e/config/hivelocity-ci.yaml index d8569f08f..1f6324ab3 100644 --- a/test/e2e/config/hivelocity-ci.yaml +++ b/test/e2e/config/hivelocity-ci.yaml @@ -128,4 +128,4 @@ intervals: default/wait-cluster: ["5m", "10s"] ## wait until Infrastructure == ready and ControlPlaneEndpoint is valid default/wait-control-plane: ["26m", "10s"] ## wait until first control plane is ready. default/wait-worker-nodes: ["26m", "10s"] ## wait until all workers are ready from the moment when the control plane is ready - default/wait-delete-cluster: ["20m", "10s"] ## wait until cluster is deleted + default/wait-delete-cluster: ["5m", "10s"] ## wait until cluster is deleted