Skip to content

Commit

Permalink
Add different hooks to make the setup easier
Browse files Browse the repository at this point in the history
Signed-off-by: Alexandr Demicev <alexandr.demicev@suse.com>
  • Loading branch information
alexander-demicev committed Aug 6, 2024
1 parent 99a043d commit f11a7ad
Show file tree
Hide file tree
Showing 14 changed files with 388 additions and 599 deletions.
1 change: 1 addition & 0 deletions test/e2e/config/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ intervals:
default/wait-turtles-uninstall: ["10m", "30s"]

variables:
MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" # supported options are eks, isolated-kind, kind
RANCHER_VERSION: "v2.8.1"
KUBERNETES_VERSION: "v1.28.6"
KUBERNETES_MANAGEMENT_VERSION: "v1.27.0"
Expand Down
10 changes: 10 additions & 0 deletions test/e2e/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,17 @@ const (
NginxIngressDeployment = "ingress-nginx-controller"
)

type ManagementClusterInfrastuctureType string

const (
ManagementClusterInfrastuctureEKS ManagementClusterInfrastuctureType = "eks"
ManagementClusterInfrastuctureIsolatedKind ManagementClusterInfrastuctureType = "isolated-kind"
ManagementClusterInfrastuctureKind ManagementClusterInfrastuctureType = "kind"
)

const (
ManagementClusterInfrastucture = "MANAGEMENT_CLUSTER_INFRASTRUCTURE"

KubernetesManagementVersionVar = "KUBERNETES_MANAGEMENT_VERSION"

KubernetesVersionVar = "KUBERNETES_VERSION"
Expand Down
9 changes: 0 additions & 9 deletions test/e2e/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ type FlagValues struct {
// UseExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply).
UseExistingCluster bool

// UseEKS instructs the test to create an EKS cluster instead of using kind.
UseEKS bool

// ArtifactFolder is the folder to store e2e test artifacts.
ArtifactFolder string

Expand All @@ -48,10 +45,6 @@ type FlagValues struct {
// ChartPath is the path to the operator chart.
ChartPath string

// IsolatedMode instructs the test to run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD
// or other providers that run in the same network as the bootstrap cluster.
IsolatedMode bool

// ClusterctlBinaryPath is the path to the clusterctl binary to use.
ClusterctlBinaryPath string

Expand All @@ -65,11 +58,9 @@ func InitFlags(values *FlagValues) {
flag.StringVar(&values.ArtifactFolder, "e2e.artifacts-folder", "_artifacts", "folder where e2e test artifact should be stored")
flag.BoolVar(&values.SkipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped")
flag.BoolVar(&values.UseExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)")
flag.BoolVar(&values.UseEKS, "e2e.use-eks", true, "if true, the test uses EKS for the management cluster")
flag.StringVar(&values.HelmBinaryPath, "e2e.helm-binary-path", "helm", "path to the helm binary")
flag.StringVar(&values.HelmExtraValuesDir, "e2e.helm-extra-values-path", "/tmp", "path to the extra values file")
flag.StringVar(&values.ClusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary")
flag.StringVar(&values.ChartPath, "e2e.chart-path", "", "path to the operator chart")
flag.BoolVar(&values.IsolatedMode, "e2e.isolated-mode", false, "if true, the test will run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD or other providers that run in the same network as the bootstrap cluster.")
flag.BoolVar(&values.GiteaCustomIngress, "e2e.gitea-custom-ingress", false, "if true, the test will use a custom ingress for Gitea")
}
132 changes: 37 additions & 95 deletions test/e2e/suites/embedded-capi-disabled-v3/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,7 @@ import (
. "github.com/onsi/gomega"
"github.com/rancher/turtles/test/e2e"
"github.com/rancher/turtles/test/framework"
turtlesframework "github.com/rancher/turtles/test/framework"
"github.com/rancher/turtles/test/testenv"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -84,26 +82,7 @@ var _ = BeforeSuite(func() {
By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath))
e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath)

hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar)
ingressType := testenv.NgrokIngress
dockerUsername := ""
dockerPassword := ""
var customClusterProvider testenv.CustomClusterProvider

if flagVals.UseEKS {
Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated")
dockerUsername = os.Getenv("GITHUB_USERNAME")
Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required")
dockerPassword = os.Getenv("GITHUB_TOKEN")
Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required")
customClusterProvider = testenv.EKSBootsrapCluster
Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required")
ingressType = testenv.EKSNginxIngress
}

if flagVals.IsolatedMode {
ingressType = testenv.CustomIngress
}
preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig)

By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder))
clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository"))
Expand All @@ -115,16 +94,15 @@ var _ = BeforeSuite(func() {
Scheme: e2e.InitScheme(),
ArtifactFolder: flagVals.ArtifactFolder,
KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar),
IsolatedMode: flagVals.IsolatedMode,
HelmBinaryPath: flagVals.HelmBinaryPath,
CustomClusterProvider: customClusterProvider,
CustomClusterProvider: preSetupOutput.CustomClusterProvider,
})

testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"),
IngressType: ingressType,
IngressType: preSetupOutput.IngressType,
CustomIngress: e2e.NginxIngress,
CustomIngressNamespace: e2e.NginxIngressNamespace,
CustomIngressDeployment: e2e.NginxIngressDeployment,
Expand All @@ -137,32 +115,6 @@ var _ = BeforeSuite(func() {
DefaultIngressClassPatch: e2e.IngressClassPatch,
})

if flagVals.IsolatedMode {
hostName = setupClusterResult.IsolatedHostName
}

if flagVals.UseEKS {
By("Getting ingress hostname")
svcRes := &testenv.WaitForServiceIngressHostnameResult{}
testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
ServiceName: "ingress-nginx-controller",
ServiceNamespace: "ingress-nginx",
IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"),
}, svcRes)
hostName = svcRes.Hostname

By("Deploying ghcr details")
framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{
Name: "regcred",
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
Namespace: "rancher-turtles-system",
DockerServer: "https://ghcr.io",
DockerUsername: dockerUsername,
DockerPassword: dockerPassword,
})
}

// NOTE: deploy Rancher first with the embedded-cluster-api feature disabled.
// and the deploy Rancher Turtles.
rancherInput := testenv.DeployRancherInput{
Expand All @@ -177,7 +129,6 @@ var _ = BeforeSuite(func() {
RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar),
RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar),
RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar),
RancherHost: hostName,
RancherNamespace: e2e.RancherNamespace,
RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar),
RancherFeatures: "embedded-cluster-api=false",
Expand All @@ -186,22 +137,26 @@ var _ = BeforeSuite(func() {
ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
Variables: e2eConfig.Variables,
}
if !flagVals.IsolatedMode && !flagVals.UseEKS {
// i.e. we are using ngrok locally
rancherInput.RancherIngressConfig = e2e.IngressConfig
rancherInput.RancherServicePatch = e2e.RancherServicePatch
}
if flagVals.UseEKS {
rancherInput.RancherIngressClassName = "nginx"
}

rancherHookResult := testenv.PreRancherInstallHook(
&testenv.PreRancherInstallHookInput{
Ctx: ctx,
RancherInput: &rancherInput,
E2EConfig: e2eConfig,
SetupClusterResult: setupClusterResult,
PreSetupOutput: preSetupOutput,
})

hostName = rancherHookResult.HostName

testenv.DeployRancher(ctx, rancherInput)

rtInput := testenv.DeployRancherTurtlesInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
ChartPath: flagVals.ChartPath,
CAPIProvidersYAML: e2e.CapiProviders,
Namespace: turtlesframework.DefaultRancherTurtlesNamespace,
Namespace: framework.DefaultRancherTurtlesNamespace,
Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH),
Tag: "v0.0.1",
WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
Expand All @@ -210,14 +165,9 @@ var _ = BeforeSuite(func() {
"rancherTurtles.features.embedded-capi.disabled": "false",
},
}
if flagVals.UseEKS {
rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}"
rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent"
} else {
// NOTE: this was the default previously in the chart locally and ok as
// we where loading the image into kind manually.
rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never"
}

testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig)

testenv.DeployRancherTurtles(ctx, rtInput)

// NOTE: there are no short or local tests in this suite
Expand All @@ -233,7 +183,7 @@ var _ = BeforeSuite(func() {
},
CAPIProvidersYAML: e2e.FullProviders,
TemplateData: map[string]string{
"AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar),
"AWSEncodedCredentials": awsCreds,
},
WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
WaitForDeployments: []testenv.NamespaceName{
Expand All @@ -248,38 +198,30 @@ var _ = BeforeSuite(func() {
},
})

giteaValues := map[string]string{
"gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar),
"gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar),
}

giteaServiceType := corev1.ServiceTypeNodePort
if flagVals.UseEKS {
giteaServiceType = corev1.ServiceTypeLoadBalancer
}

if flagVals.GiteaCustomIngress {
giteaServiceType = corev1.ServiceTypeClusterIP
}

giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{
giteaInput := testenv.DeployGiteaInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar),
ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar),
ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar),
ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar),
ValuesFilePath: "../../data/gitea/values.yaml",
Values: giteaValues,
RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"),
ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"),
AuthSecretName: e2e.AuthSecretName,
Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar),
Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar),
ServiceType: giteaServiceType,
CustomIngressConfig: e2e.GiteaIngress,
Variables: e2eConfig.Variables,
})
Values: map[string]string{
"gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar),
"gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar),
},
RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"),
ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"),
AuthSecretName: e2e.AuthSecretName,
Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar),
Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar),
CustomIngressConfig: e2e.GiteaIngress,
Variables: e2eConfig.Variables,
}

testenv.PreGiteaInstallHook(&giteaInput, e2eConfig)

giteaResult = testenv.DeployGitea(ctx, giteaInput)
})

var _ = AfterSuite(func() {
Expand Down
Loading

0 comments on commit f11a7ad

Please sign in to comment.