diff --git a/Makefile b/Makefile index a1f30a9f14..457ef8d6b0 100644 --- a/Makefile +++ b/Makefile @@ -16,8 +16,8 @@ IMG_TAG := latest endif ifeq (${LOCAL_PROVIDER_VERSION},latest) -# Change this versions after release when required here and in e2e config (test/e2e/config/nutanix.yaml) -LOCAL_PROVIDER_VERSION := v1.3.99 +# TODO(release-blocker): Change this versions after release when required here and in e2e config (test/e2e/config/nutanix.yaml) +LOCAL_PROVIDER_VERSION := v1.4.99 endif # PLATFORMS is a list of platforms to build for. @@ -304,11 +304,14 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi ##@ Templates .PHONY: cluster-e2e-templates -cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 ## Generate cluster templates for all versions +cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 cluster-e2e-templates-v130 ## Generate cluster templates for all versions cluster-e2e-templates-v124: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.2.4 $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template.yaml +cluster-e2e-templates-v130: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.3.0 + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.3.0/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.3.0/cluster-template.yaml + cluster-e2e-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alpha4 $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template.yaml diff --git a/metadata.yaml b/metadata.yaml index 2a72859be1..cccc4437e8 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -2,7 +2,8 @@ # the contract version may change between minor or major versions, but *not* # between patch versions. # -# update this file only when a new major or minor version is released +# TODO(release-blocker): update this file only when a new major or minor version is released + apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: diff --git a/test/e2e/config/nutanix.yaml b/test/e2e/config/nutanix.yaml index 823a60cd1f..01413b3508 100644 --- a/test/e2e/config/nutanix.yaml +++ b/test/e2e/config/nutanix.yaml @@ -244,7 +244,15 @@ providers: - sourcePath: "../../../metadata.yaml" - sourcePath: "../data/infrastructure-nutanix/v1.2.4/cluster-template.yaml" - sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml" - - name: v1.3.99 # next; use manifest from source files + - name: v1.3.0 + type: url + value: https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/download/v1.3.0/infrastructure-components.yaml + contract: v1beta1 + files: + - sourcePath: "../../../metadata.yaml" + - sourcePath: "../data/infrastructure-nutanix/v1.3.0/cluster-template.yaml" + - sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml" + - name: v1.4.99 # # TODO(release-blocker): update this when you make a new release and change the version to higher than the latest release type: kustomize value: "../../../config/default" contract: v1beta1 diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/ccm-patch.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/ccm-patch.yaml new file mode 100644 index 0000000000..790de17905 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/ccm-patch.yaml @@ -0,0 +1,43 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-kcp" + namespace: "${NAMESPACE}" +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + ccm: "nutanix" + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cluster-with-kcp.yaml new file mode 100644 index 0000000000..f6ba2dc11b --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cluster-with-kcp.yaml @@ -0,0 +1,200 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + prismCentral: + address: "${NUTANIX_ENDPOINT}" + port: ${NUTANIX_PORT=9440} + insecure: ${NUTANIX_INSECURE=false} + credentialRef: + name: "${CLUSTER_NAME}" + kind: Secret + additionalTrustBundle: + name: user-ca-bundle + kind: ConfigMap + controlPlaneEndpoint: + host: "${CONTROL_PLANE_ENDPOINT_IP}" + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + clusterNetwork: + services: + cidrBlocks: ["172.19.0.0/16"] + pods: + cidrBlocks: ["172.20.0.0/16"] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-kcp" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "${CLUSTER_NAME}" + +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-kcp" + namespace: "${NAMESPACE}" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT=1} + version: ${KUBERNETES_VERSION} + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "${CLUSTER_NAME}-mt-0" + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + scheduler: + extraArgs: + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "${CONTROL_PLANE_ENDPOINT_IP}" + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "${KUBEVIP_SVC_ENABLE=false}" + - name: lb_enable + value: "${KUBEVIP_LB_ENABLE=false}" + - name: enableServicesElection + value: "${KUBEVIP_SVC_ELECTION=false}" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + users: + - name: capiuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - ${NUTANIX_SSH_AUTHORIZED_KEY} + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + - echo "after kubeadm call" > /var/log/postkubeadm.log + useExperimentalRetryJoin: true + verbosity: 10 + +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + users: + - name: capiuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - ${NUTANIX_SSH_AUTHORIZED_KEY} + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + verbosity: 10 + #useExperimentalRetryJoin: true diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cm.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cm.yaml new file mode 100644 index 0000000000..ff4d33af76 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cm.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: "${NAMESPACE}" +binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cni-patch.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cni-patch.yaml new file mode 100644 index 0000000000..9936ca0cf9 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/cni-patch.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-crs-cni + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/crs.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/crs.yaml new file mode 100644 index 0000000000..608f696def --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/crs.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "cni-${CLUSTER_NAME}-crs-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/md.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/md.yaml new file mode 100644 index 0000000000..b5efd60dab --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/md.yaml @@ -0,0 +1,28 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + name: "${CLUSTER_NAME}-wmd" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "${CLUSTER_NAME}-kcfg-0" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "${CLUSTER_NAME}-mt-0" + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/mhc.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/mhc.yaml new file mode 100644 index 0000000000..7c6077e84a --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/mhc.yaml @@ -0,0 +1,31 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: "${CLUSTER_NAME}-mhc" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + maxUnhealthy: 40% + nodeStartupTimeout: 10m0s + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + unhealthyConditions: + - type: Ready + status: "False" + timeout: 5m0s + - type: Ready + status: Unknown + timeout: 5m0s + - type: MemoryPressure + status: "True" + timeout: 5m0s + - type: DiskPressure + status: "True" + timeout: 5m0s + - type: PIDPressure + status: "True" + timeout: 5m0s + - type: NetworkUnavailable + status: "True" + timeout: 5m0s diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nmt.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nmt.yaml new file mode 100644 index 0000000000..90829f2a76 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nmt.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm-crs.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm-crs.yaml new file mode 100644 index 0000000000..188d39347c --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm-crs.yaml @@ -0,0 +1,22 @@ +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: nutanix-ccm-crs +spec: + clusterSelector: + matchLabels: + ccm: nutanix + resources: + - kind: ConfigMap + name: nutanix-ccm + - kind: Secret + name: nutanix-ccm-secret + - kind: ConfigMap + name: user-ca-bundle + strategy: ApplyOnce +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nutanix-ccm +data: diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm-secret.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm-secret.yaml new file mode 100644 index 0000000000..12a8500f39 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm-secret.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-ccm-secret +type: addons.cluster.x-k8s.io/resource-set +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "${NUTANIX_USER}", + "password": "${NUTANIX_PASSWORD}" + }, + "prismElements": null + } + } + ] diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm.yaml new file mode 100644 index 0000000000..2ef443ae5d --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/nutanix-ccm.yaml @@ -0,0 +1,211 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: kube-system +binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} +--- +# Source: nutanix-cloud-provider/templates/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +# Source: nutanix-cloud-provider/templates/cm.yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: nutanix-config + namespace: kube-system +data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT=9440}, + "insecure": ${NUTANIX_INSECURE=false}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, + "topologyDiscovery": { + "type": "Prism" + } + } +--- +# Source: nutanix-cloud-provider/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +# Source: nutanix-cloud-provider/templates/rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +# Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.2}" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/base/secret.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/secret.yaml new file mode 100644 index 0000000000..89771a709d --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/base/secret.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "${NUTANIX_USER}", + "password": "${NUTANIX_PASSWORD}" + } + } + } + ] diff --git a/test/e2e/data/infrastructure-nutanix/v1.3.0/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1.3.0/cluster-template/kustomization.yaml new file mode 100644 index 0000000000..5d7cbe9663 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.3.0/cluster-template/kustomization.yaml @@ -0,0 +1,23 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../base/nutanix-ccm.yaml + +bases: + - ../base/cluster-with-kcp.yaml + - ../base/secret.yaml + - ../base/cm.yaml + - ../base/nmt.yaml + - ../base/crs.yaml + - ../base/md.yaml + - ../base/mhc.yaml + - ../base/nutanix-ccm-crs.yaml + - ../base/nutanix-ccm-secret.yaml + +patchesStrategicMerge: + - ../base/ccm-patch.yaml + - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml index 8241698d8e..d0e7f684af 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml @@ -197,7 +197,7 @@ data: key: node.kubernetes.io/not-ready operator: Exists containers: - - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.2}" imagePullPolicy: IfNotPresent name: nutanix-cloud-controller-manager env: