diff --git a/test/e2e/config/operator.yaml b/test/e2e/config/operator.yaml index a4b851c7d..ef9a81015 100644 --- a/test/e2e/config/operator.yaml +++ b/test/e2e/config/operator.yaml @@ -27,6 +27,7 @@ intervals: default/wait-turtles-uninstall: ["10m", "30s"] variables: + MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" # supported options are eks, isolated-kind, kind RANCHER_VERSION: "v2.8.1" KUBERNETES_VERSION: "v1.28.6" KUBERNETES_MANAGEMENT_VERSION: "v1.27.0" diff --git a/test/e2e/const.go b/test/e2e/const.go index 9a723ffd7..e94e4fbdd 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -95,7 +95,17 @@ const ( NginxIngressDeployment = "ingress-nginx-controller" ) +type ManagementClusterInfrastuctureType string + const ( + ManagementClusterInfrastuctureEKS ManagementClusterInfrastuctureType = "eks" + ManagementClusterInfrastuctureIsolatedKind ManagementClusterInfrastuctureType = "isolated-kind" + ManagementClusterInfrastuctureKind ManagementClusterInfrastuctureType = "kind" +) + +const ( + ManagementClusterInfrastucture = "MANAGEMENT_CLUSTER_INFRASTRUCTURE" + KubernetesManagementVersionVar = "KUBERNETES_MANAGEMENT_VERSION" KubernetesVersionVar = "KUBERNETES_VERSION" diff --git a/test/e2e/flags.go b/test/e2e/flags.go index 249007b55..3be038b8c 100644 --- a/test/e2e/flags.go +++ b/test/e2e/flags.go @@ -30,9 +30,6 @@ type FlagValues struct { // UseExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply). UseExistingCluster bool - // UseEKS instructs the test to create an EKS cluster instead of using kind. - UseEKS bool - // ArtifactFolder is the folder to store e2e test artifacts. ArtifactFolder string @@ -48,10 +45,6 @@ type FlagValues struct { // ChartPath is the path to the operator chart. ChartPath string - // IsolatedMode instructs the test to run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD - // or other providers that run in the same network as the bootstrap cluster. - IsolatedMode bool - // ClusterctlBinaryPath is the path to the clusterctl binary to use. ClusterctlBinaryPath string @@ -65,11 +58,9 @@ func InitFlags(values *FlagValues) { flag.StringVar(&values.ArtifactFolder, "e2e.artifacts-folder", "_artifacts", "folder where e2e test artifact should be stored") flag.BoolVar(&values.SkipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&values.UseExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") - flag.BoolVar(&values.UseEKS, "e2e.use-eks", true, "if true, the test uses EKS for the management cluster") flag.StringVar(&values.HelmBinaryPath, "e2e.helm-binary-path", "helm", "path to the helm binary") flag.StringVar(&values.HelmExtraValuesDir, "e2e.helm-extra-values-path", "/tmp", "path to the extra values file") flag.StringVar(&values.ClusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary") flag.StringVar(&values.ChartPath, "e2e.chart-path", "", "path to the operator chart") - flag.BoolVar(&values.IsolatedMode, "e2e.isolated-mode", false, "if true, the test will run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD or other providers that run in the same network as the bootstrap cluster.") flag.BoolVar(&values.GiteaCustomIngress, "e2e.gitea-custom-ingress", false, "if true, the test will use a custom ingress for Gitea") } diff --git a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go index 4e0fa034f..93cf3c4c8 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go @@ -31,9 +31,7 @@ import ( . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" @@ -84,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -115,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -137,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. // and the deploy Rancher Turtles. rancherInput := testenv.DeployRancherInput{ @@ -177,7 +129,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherFeatures: "embedded-cluster-api=false", @@ -186,14 +137,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -201,7 +156,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -210,14 +165,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.embedded-capi.disabled": "false", }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) // NOTE: there are no short or local tests in this suite @@ -233,7 +183,7 @@ var _ = BeforeSuite(func() { }, CAPIProvidersYAML: e2e.FullProviders, TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "AWSEncodedCredentials": awsCreds, }, WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), WaitForDeployments: []testenv.NamespaceName{ @@ -248,21 +198,7 @@ var _ = BeforeSuite(func() { }, }) - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -270,16 +206,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { diff --git a/test/e2e/suites/embedded-capi-disabled/suite_test.go b/test/e2e/suites/embedded-capi-disabled/suite_test.go index b29c9c77a..4f38d3a76 100644 --- a/test/e2e/suites/embedded-capi-disabled/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled/suite_test.go @@ -29,14 +29,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -85,26 +83,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -116,16 +95,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -138,32 +116,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. // and the deploy Rancher Turtles. rancherInput := testenv.DeployRancherInput{ @@ -178,7 +130,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherFeatures: "embedded-cluster-api=false", @@ -187,14 +138,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -202,7 +157,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -212,14 +167,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.managementv3-cluster.enabled": "false", }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) // NOTE: there are no short or local tests in this suite @@ -235,7 +185,7 @@ var _ = BeforeSuite(func() { }, CAPIProvidersYAML: e2e.FullProviders, TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "AWSEncodedCredentials": awsCreds, }, WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), WaitForDeployments: []testenv.NamespaceName{ @@ -250,21 +200,7 @@ var _ = BeforeSuite(func() { }, }) - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -272,16 +208,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { diff --git a/test/e2e/suites/import-gitops-v3/suite_test.go b/test/e2e/suites/import-gitops-v3/suite_test.go index 1db18d201..9733b23ea 100644 --- a/test/e2e/suites/import-gitops-v3/suite_test.go +++ b/test/e2e/suites/import-gitops-v3/suite_test.go @@ -30,10 +30,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" - "github.com/rancher/turtles/test/framework" turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" @@ -84,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -115,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -137,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -175,7 +127,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherPatches: [][]byte{e2e.RancherSettingPatch}, @@ -183,14 +134,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -206,14 +161,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.addon-provider-fleet.enabled": "true", }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) if !shortTestOnly() && !localTestOnly() { @@ -229,7 +179,7 @@ var _ = BeforeSuite(func() { }, CAPIProvidersYAML: e2e.FullProviders, TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "AWSEncodedCredentials": awsCreds, }, WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), WaitForDeployments: []testenv.NamespaceName{ @@ -245,21 +195,7 @@ var _ = BeforeSuite(func() { }) } - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -267,16 +203,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index db86bf141..f429bc53f 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -30,14 +30,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" - opframework "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" + "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - "sigs.k8s.io/cluster-api/test/framework" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -87,26 +85,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -118,16 +97,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -140,32 +118,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - opframework.CreateDockerRegistrySecret(ctx, opframework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -178,7 +130,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherPatches: [][]byte{e2e.RancherSettingPatch}, @@ -186,14 +137,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) if shortTestOnly() { @@ -202,7 +157,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, @@ -219,7 +174,7 @@ var _ = BeforeSuite(func() { upgradeInput := testenv.UpgradeRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -227,21 +182,14 @@ var _ = BeforeSuite(func() { PostUpgradeSteps: []func(){}, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller upgradeInput.PostUpgradeSteps = append(upgradeInput.PostUpgradeSteps, func() { By("Waiting for CAAPF deployment to be available") - framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "caapf-controller-manager", @@ -257,20 +205,14 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller testenv.DeployRancherTurtles(ctx, rtInput) @@ -322,21 +264,7 @@ var _ = BeforeSuite(func() { }) } - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -344,16 +272,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { @@ -366,7 +300,7 @@ var _ = AfterSuite(func() { testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) diff --git a/test/e2e/suites/migrate-gitops/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go index c514e1432..2a95077ae 100644 --- a/test/e2e/suites/migrate-gitops/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -30,14 +30,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" - turtlesframework "github.com/rancher/turtles/test/framework" + "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - "sigs.k8s.io/cluster-api/test/framework" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -90,19 +89,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ UseExistingCluster: flagVals.UseExistingCluster, @@ -111,16 +98,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -133,10 +119,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -157,11 +139,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -169,11 +158,14 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ @@ -186,7 +178,7 @@ var _ = BeforeSuite(func() { upgradeInput := testenv.UpgradeRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -195,13 +187,14 @@ var _ = BeforeSuite(func() { // NOTE: this was the default previously in the chart locally and ok as // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller + testenv.PreRancherTurtlesUpgradelHook(&upgradeInput, e2eConfig) + upgradeInput.PostUpgradeSteps = append(upgradeInput.PostUpgradeSteps, func() { By("Waiting for CAAPF deployment to be available") - framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "caapf-controller-manager", @@ -212,21 +205,7 @@ var _ = BeforeSuite(func() { testenv.UpgradeRancherTurtles(ctx, upgradeInput) - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -234,16 +213,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { @@ -256,7 +241,7 @@ var _ = AfterSuite(func() { testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) diff --git a/test/e2e/suites/update-labels/suite_test.go b/test/e2e/suites/update-labels/suite_test.go index 5afa860b9..8384ddf9b 100644 --- a/test/e2e/suites/update-labels/suite_test.go +++ b/test/e2e/suites/update-labels/suite_test.go @@ -35,7 +35,6 @@ import ( "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -83,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -114,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -136,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -174,7 +127,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherPatches: [][]byte{e2e.RancherSettingPatch}, @@ -182,14 +134,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -197,7 +153,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -206,14 +162,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.rancher-kubeconfigs.label": "true", // force to be true even if the default in teh chart changes }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) testenv.RestartRancher(ctx, testenv.RestartRancherInput{ diff --git a/test/e2e/suites/v2prov/suite_test.go b/test/e2e/suites/v2prov/suite_test.go index 71f05cd84..e9022a89c 100644 --- a/test/e2e/suites/v2prov/suite_test.go +++ b/test/e2e/suites/v2prov/suite_test.go @@ -34,7 +34,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "github.com/rancher/turtles/test/e2e" - "github.com/rancher/turtles/test/framework" turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -83,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -114,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -136,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -175,7 +128,6 @@ var _ = BeforeSuite(func() { RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), Development: true, - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar), @@ -184,14 +136,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -205,14 +161,9 @@ var _ = BeforeSuite(func() { WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) testenv.RestartRancher(ctx, testenv.RestartRancherInput{ diff --git a/test/testenv/gitea.go b/test/testenv/gitea.go index 263d72804..396ad9b53 100644 --- a/test/testenv/gitea.go +++ b/test/testenv/gitea.go @@ -25,13 +25,14 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/turtles/test/e2e" + turtlesframework "github.com/rancher/turtles/test/framework" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" - - turtlesframework "github.com/rancher/turtles/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) type DeployGiteaInput struct { @@ -236,3 +237,16 @@ func UninstallGitea(ctx context.Context, input UninstallGiteaInput) { _, err := removeChart.Run(nil) Expect(err).ToNot(HaveOccurred()) } + +func PreGiteaInstallHook(giteaInput *DeployGiteaInput, e2eConfig *clusterctl.E2EConfig) { + switch e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { + case e2e.ManagementClusterInfrastuctureEKS: + giteaInput.ServiceType = corev1.ServiceTypeLoadBalancer + case e2e.ManagementClusterInfrastuctureIsolatedKind: + giteaInput.ServiceType = corev1.ServiceTypeNodePort + case e2e.ManagementClusterInfrastuctureKind: + giteaInput.ServiceType = corev1.ServiceTypeClusterIP + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + } +} diff --git a/test/testenv/rancher.go b/test/testenv/rancher.go index 9c7a19306..10a64bf3b 100644 --- a/test/testenv/rancher.go +++ b/test/testenv/rancher.go @@ -18,6 +18,7 @@ package testenv import ( "context" + "fmt" "io/ioutil" "os" @@ -32,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest/komega" "sigs.k8s.io/yaml" @@ -414,3 +416,62 @@ func deployNgrokIngress(ctx context.Context, input RancherDeployIngressInput) { By("Setting up default ingress class") Expect(input.BootstrapClusterProxy.Apply(ctx, input.DefaultIngressClassPatch, "--server-side")).To(Succeed()) } + +type PreRancherInstallHookInput struct { + Ctx context.Context + RancherInput *DeployRancherInput + PreSetupOutput PreManagementClusterSetupResult + SetupClusterResult *SetupTestClusterResult + E2EConfig *clusterctl.E2EConfig +} + +type PreRancherInstallHookResult struct { + HostName string +} + +// PreRancherInstallHook is a hook that can be used to perform actions before Rancher is installed. +func PreRancherInstallHook(input *PreRancherInstallHookInput) PreRancherInstallHookResult { + hostName := "" + + switch e2e.ManagementClusterInfrastuctureType(input.E2EConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { + case e2e.ManagementClusterInfrastuctureEKS: + By("Getting ingress hostname") + svcRes := &WaitForServiceIngressHostnameResult{} + WaitForServiceIngressHostname(input.Ctx, WaitForServiceIngressHostnameInput{ + BootstrapClusterProxy: input.SetupClusterResult.BootstrapClusterProxy, + ServiceName: "ingress-nginx-controller", + ServiceNamespace: "ingress-nginx", + IngressWaitInterval: input.E2EConfig.GetIntervals(input.SetupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + }, svcRes) + + hostName = svcRes.Hostname + input.RancherInput.RancherHost = hostName + + By("Deploying ghcr details") + turtlesframework.CreateDockerRegistrySecret(input.Ctx, turtlesframework.CreateDockerRegistrySecretInput{ + Name: "regcred", + BootstrapClusterProxy: input.SetupClusterResult.BootstrapClusterProxy, + Namespace: "rancher-turtles-system", + DockerServer: "https://ghcr.io", + DockerUsername: input.PreSetupOutput.DockerUsername, + DockerPassword: input.PreSetupOutput.DockerPassword, + }) + + input.RancherInput.RancherIngressClassName = "nginx" + case e2e.ManagementClusterInfrastuctureIsolatedKind: + hostName = input.SetupClusterResult.IsolatedHostName + input.RancherInput.RancherHost = hostName + case e2e.ManagementClusterInfrastuctureKind: + // i.e. we are using ngrok locally + input.RancherInput.RancherIngressConfig = e2e.IngressConfig + input.RancherInput.RancherServicePatch = e2e.RancherServicePatch + hostName = input.E2EConfig.GetVariable(e2e.RancherHostnameVar) + input.RancherInput.RancherHost = hostName + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", input.E2EConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + } + + return PreRancherInstallHookResult{ + HostName: hostName, + } +} diff --git a/test/testenv/setupcluster.go b/test/testenv/setupcluster.go index e1814ecc0..91dc55859 100644 --- a/test/testenv/setupcluster.go +++ b/test/testenv/setupcluster.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/turtles/test/e2e" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/test/framework" @@ -43,7 +44,6 @@ type SetupTestClusterInput struct { ArtifactFolder string // Hostname string KubernetesVersion string - IsolatedMode bool HelmBinaryPath string CustomClusterProvider CustomClusterProvider } @@ -145,3 +145,35 @@ func getInternalClusterHostname(ctx context.Context, clusterProxy framework.Clus func createClusterName(baseName string) string { return fmt.Sprintf("%s-%s", baseName, util.RandomString(6)) } + +// PreManagementClusterSetupResult is the output of the preManagementClusterSetupHook. +type PreManagementClusterSetupResult struct { + IngressType IngressType + DockerUsername string + DockerPassword string + CustomClusterProvider CustomClusterProvider +} + +// PreManagementClusterSetupHook is a hook that can be used to perform actions before the management cluster is setup. +func PreManagementClusterSetupHook(e2eConfig *clusterctl.E2EConfig) PreManagementClusterSetupResult { + output := PreManagementClusterSetupResult{} + + switch e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { + case e2e.ManagementClusterInfrastuctureEKS: + output.DockerUsername = os.Getenv("GITHUB_USERNAME") + Expect(output.DockerUsername).NotTo(BeEmpty(), "Github username is required") + output.DockerPassword = os.Getenv("GITHUB_TOKEN") + Expect(output.DockerPassword).NotTo(BeEmpty(), "Github token is required") + output.CustomClusterProvider = EKSBootsrapCluster + Expect(output.CustomClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + output.IngressType = EKSNginxIngress + case e2e.ManagementClusterInfrastuctureIsolatedKind: + output.IngressType = CustomIngress + case e2e.ManagementClusterInfrastuctureKind: + output.IngressType = NgrokIngress + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + } + + return output +} diff --git a/test/testenv/turtles.go b/test/testenv/turtles.go index 8a5525893..632aa8c62 100644 --- a/test/testenv/turtles.go +++ b/test/testenv/turtles.go @@ -26,12 +26,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/turtles/test/e2e" + turtlesframework "github.com/rancher/turtles/test/framework" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" - - turtlesframework "github.com/rancher/turtles/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) type DeployRancherTurtlesInput struct { @@ -292,3 +293,37 @@ func UninstallRancherTurtles(ctx context.Context, input UninstallRancherTurtlesI _, err := removeChart.Run(nil) Expect(err).ToNot(HaveOccurred()) } + +// PreRancherTurtlesInstallHook is a hook that can be used to perform actions before Rancher Turtles is installed. +func PreRancherTurtlesInstallHook(rtInput *DeployRancherTurtlesInput, e2eConfig *clusterctl.E2EConfig) { + switch e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { + case e2e.ManagementClusterInfrastuctureEKS: + rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" + case e2e.ManagementClusterInfrastuctureIsolatedKind: + // NOTE: this was the default previously in the chart locally and ok as + // we where loading the image into kind manually. + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + case e2e.ManagementClusterInfrastuctureKind: + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + } +} + +// PreRancherTurtlesInstallHook is a hook that can be used to perform actions before Rancher Turtles is installed. +func PreRancherTurtlesUpgradelHook(rtUpgradeInput *UpgradeRancherTurtlesInput, e2eConfig *clusterctl.E2EConfig) { + switch e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { + case e2e.ManagementClusterInfrastuctureEKS: + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" + case e2e.ManagementClusterInfrastuctureIsolatedKind: + // NOTE: this was the default previously in the chart locally and ok as + // we where loading the image into kind manually. + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + case e2e.ManagementClusterInfrastuctureKind: + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + } +}