E2E Multi-Cluster Fleet #5320
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: E2E Multi-Cluster Fleet | |
on: | |
schedule: | |
# Run everyday day at 9:00 AM | |
- cron: '0 9 * * *' | |
pull_request: | |
push: | |
branches: | |
- 'release/*' | |
env: | |
GOARCH: amd64 | |
CGO_ENABLED: 0 | |
SETUP_K3D_VERSION: 'v5.7.4' | |
jobs: | |
e2e-fleet-mc-test: | |
runs-on: runs-on,runner=8cpu-linux-x64,mem=16,run-id=${{ github.run_id }} | |
steps: | |
- | |
uses: actions/checkout@v4 | |
with: | |
fetch-depth: 0 | |
- | |
uses: actions/setup-go@v5 | |
with: | |
go-version-file: 'go.mod' | |
check-latest: true | |
- | |
name: Install Ginkgo CLI | |
run: go install github.com/onsi/ginkgo/v2/ginkgo | |
- | |
name: Install crust-gather CLI | |
run: curl -sSfL https://github.com/crust-gather/crust-gather/raw/main/install.sh | sh -s -- --yes | |
- | |
name: Build Fleet Binaries | |
run: | | |
./.github/scripts/build-fleet-binaries.sh | |
- | |
name: Build Docker Images | |
run: | | |
./.github/scripts/build-fleet-images.sh | |
- | |
name: Install k3d | |
run: curl --silent --fail https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=${{ env.SETUP_K3D_VERSION }} bash | |
- | |
name: Provision k3d Cluster | |
run: | | |
k3d cluster create upstream --wait \ | |
-p "80:80@agent:0:direct" \ | |
-p "443:443@agent:0:direct" \ | |
--api-port 6443 \ | |
--agents 1 \ | |
--network "nw01" | |
- | |
name: Provision k3d Downstream Cluster for agent-initiated registration | |
run: | | |
k3d cluster create downstream --wait \ | |
-p "81:80@agent:0:direct" \ | |
-p "444:443@agent:0:direct" \ | |
--api-port 6644 \ | |
--agents 1 \ | |
--network "nw01" | |
- | |
name: Provision k3d Downstream Cluster for manager-initiated registration | |
run: | | |
k3d cluster create managed-downstream --wait \ | |
-p "82:80@agent:0:direct" \ | |
-p "445:443@agent:0:direct" \ | |
--api-port 6645 \ | |
--agents 1 \ | |
--network "nw01" | |
- | |
name: Import Images Into k3d | |
run: | | |
./.github/scripts/k3d-import-retry.sh rancher/fleet:dev rancher/fleet-agent:dev -c upstream | |
./.github/scripts/k3d-import-retry.sh rancher/fleet-agent:dev -c downstream | |
./.github/scripts/k3d-import-retry.sh rancher/fleet-agent:dev -c managed-downstream | |
- | |
name: Deploy Fleet | |
run: | | |
kubectl config use-context k3d-upstream | |
./.github/scripts/deploy-fleet.sh | |
- | |
name: Deploy and Register Downstream Fleet | |
run: | | |
kubectl create ns fleet-default | |
kubectl apply -f - <<EOF | |
apiVersion: "fleet.cattle.io/v1alpha1" | |
kind: ClusterRegistrationToken | |
metadata: | |
name: second-token | |
namespace: fleet-default | |
spec: | |
ttl: 12h | |
EOF | |
{ grep -q -m 1 "second-token"; kill $!; } < <(kubectl get secrets -n fleet-default -l "fleet.cattle.io/managed=true" -w) | |
token=$(kubectl get secret -n fleet-default second-token -o go-template='{{index .data "values" | base64decode}}' | yq .token -) | |
ca=$(kubectl get secret -n cattle-fleet-system fleet-controller-bootstrap-token -o go-template='{{index .data "ca.crt" | base64decode}}') | |
apiServerIP=$(kubectl get node k3d-upstream-server-0 -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') | |
# agent initiated cluster registration | |
helm \ | |
--kube-context k3d-downstream \ | |
-n cattle-fleet-system \ | |
upgrade \ | |
--install \ | |
--create-namespace \ | |
--wait \ | |
fleet-agent charts/fleet-agent \ | |
--set-string labels.env=test \ | |
--set apiServerCA="$ca" \ | |
--set apiServerURL="https://$apiServerIP:6443" \ | |
--set clusterNamespace="fleet-default" \ | |
--set token="$token" | |
echo "waiting for downstream cluster to be registered..." | |
{ grep -q -m 1 "1/1"; kill $!; } < <(kubectl get cluster -n fleet-default -w) | |
echo "waiting for cluster to report being ready..." | |
while [ $(kubectl -n fleet-default get cluster -o jsonpath='{.items[0].status.summary.ready}') -ne 1 ]; do | |
sleep 1 | |
done | |
- | |
name: Deploy and Register Managed Downstream Fleet | |
run: | | |
kubectl config use-context k3d-managed-downstream | |
host=$(kubectl get node k3d-managed-downstream-server-0 -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') | |
ca=$( kubectl config view --flatten -o jsonpath='{.clusters[?(@.name == "k3d-managed-downstream")].cluster.certificate-authority-data}' ) | |
client_cert=$( kubectl config view --flatten -o jsonpath='{.users[?(@.name == "admin@k3d-managed-downstream")].user.client-certificate-data}' ) | |
token=$( kubectl config view --flatten -o jsonpath='{.users[?(@.name == "admin@k3d-managed-downstream")].user.client-key-data}' ) | |
server="https://$host:6443" | |
kubectl config use-context k3d-upstream | |
value=$(cat <<EOF | |
apiVersion: v1 | |
kind: Config | |
current-context: default | |
clusters: | |
- cluster: | |
certificate-authority-data: $ca | |
server: $server | |
name: cluster | |
contexts: | |
- context: | |
cluster: cluster | |
user: user | |
name: default | |
preferences: {} | |
users: | |
- name: user | |
user: | |
client-certificate-data: $client_cert | |
client-key-data: $token | |
EOF | |
) | |
kubectl create ns fleet-default || true | |
kubectl delete secret -n fleet-default kbcfg-second || true | |
# Rancher sets a token value in the secret, but our docs don't mention it | |
# * https://github.com/rancher/rancher/blob/c24fb8b0869a0b445f55b3307c6ed4582e147747/pkg/provisioningv2/kubeconfig/manager.go#L362 | |
# * https://fleet.rancher.io/0.5/manager-initiated#kubeconfig-secret-1 | |
kubectl create secret generic -n fleet-default kbcfg-second --from-literal=token="$token" --from-literal=value="$value" | |
kubectl apply -n fleet-default -f - <<EOF | |
apiVersion: "fleet.cattle.io/v1alpha1" | |
kind: Cluster | |
metadata: | |
name: second | |
namespace: fleet-default | |
labels: | |
name: second | |
spec: | |
kubeConfigSecret: kbcfg-second | |
EOF | |
- | |
name: E2E tests | |
env: | |
FLEET_E2E_NS: fleet-local | |
FLEET_E2E_NS_DOWNSTREAM: fleet-default | |
run: | | |
# Force use of non-managed downstream cluster for portability | |
export CI_REGISTERED_CLUSTER=$(kubectl get clusters.fleet.cattle.io -n $FLEET_E2E_NS_DOWNSTREAM -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | grep -v second) | |
kubectl config use-context k3d-upstream | |
ginkgo --github-output e2e/multi-cluster | |
- | |
name: E2E tests with managed downstream agent | |
env: | |
FLEET_E2E_NS: fleet-local | |
FLEET_E2E_NS_DOWNSTREAM: fleet-default | |
FLEET_E2E_CLUSTER_DOWNSTREAM: k3d-managed-downstream | |
run: | | |
kubectl config use-context k3d-upstream | |
ginkgo --github-output e2e/multi-cluster/installation | |
- | |
name: Acceptance Tests for Examples | |
if: > | |
github.event_name == 'schedule' | |
env: | |
FLEET_E2E_NS: fleet-local | |
FLEET_E2E_NS_DOWNSTREAM: fleet-default | |
run: | | |
ginkgo --github-output e2e/acceptance/multi-cluster-examples | |
- | |
name: Dump Failed Downstream Environment | |
if: failure() | |
run: | | |
kubectl config use-context k3d-downstream | |
crust-gather collect --exclude-namespace=kube-system --exclude-kind=Lease --duration=5s -f tmp/downstream | |
- | |
name: Upload Logs | |
uses: actions/upload-artifact@v4 | |
if: failure() | |
with: | |
name: gha-fleet-mc-e2e-logs-${{ github.sha }}-${{ github.run_id }} | |
path: | | |
tmp/downstream | |
tmp/upstream | |
retention-days: 2 |