Skip to content

Commit

Permalink
Add CI to handle auto test and promote to strict and moonray branches (
Browse files Browse the repository at this point in the history
…#476)

* Add CI to handle auto test and promote to strict and moonray branches

* also apply patch when running tests

* fix missing namespace issues with calico

* Disable more tests until features are implemented

* Add --timeout flags to status check commands

* do not fail fast on failing informing tests

* skip cleanup test (not yet implemented)

* relax default timeout

* refactor check network ready

* improve wording of x-wait-for failure commands

* retry on failures of checking DNS and network

* never return false without error

* disable test_network too

* avoid returning false, nil on CheckNetwork cilium

* fixup cilium messages
  • Loading branch information
neoaggelos committed Jun 10, 2024
1 parent 5faf42c commit 6772673
Show file tree
Hide file tree
Showing 9 changed files with 154 additions and 103 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Integration Tests (Strict)
name: Informing Integration Tests

on:
push:
Expand All @@ -13,8 +13,12 @@ permissions:

jobs:
build:
name: Build
name: Build ${{ matrix.patch }}
runs-on: ubuntu-20.04
strategy:
matrix:
patch: ["strict", "moonray"]
fail-fast: false
steps:
- name: Harden Runner
uses: step-security/harden-runner@v2
Expand All @@ -30,25 +34,27 @@ jobs:
- name: Install snapcraft
run: |
sudo snap install snapcraft --classic
- name: Apply strict patch
- name: Apply ${{ matrix.patch }} patch
run: |
./build-scripts/patches/strict/apply
./build-scripts/patches/${{ matrix.patch }}/apply
- name: Build snap
run: |
sg lxd -c 'snapcraft --use-lxd'
mv k8s_*.snap k8s-strict.snap
mv k8s_*.snap k8s-${{ matrix.patch }}.snap
- name: Uploading snap
uses: actions/upload-artifact@v4
with:
name: k8s-strict.snap
path: k8s-strict.snap
name: k8s-${{ matrix.patch }}.snap
path: k8s-${{ matrix.patch }}.snap

test-integration:
needs: [ build ]
name: Test ${{ matrix.os }}
name: Test ${{ matrix.patch }} ${{ matrix.os }}
strategy:
matrix:
os: ["ubuntu:20.04"]
patch: ["strict", "moonray"]
fail-fast: false
runs-on: ubuntu-20.04
steps:
- name: Check out code
Expand All @@ -68,11 +74,14 @@ jobs:
- name: Download snap
uses: actions/download-artifact@v4
with:
name: k8s-strict.snap
name: k8s-${{ matrix.patch }}.snap
path: build
- name: Apply ${{ matrix.patch }} patch
run: |
./build-scripts/patches/${{ matrix.patch }}/apply
- name: Run end to end tests
run: |
export TEST_SNAP="$PWD/build/k8s-strict.snap"
export TEST_SNAP="$PWD/build/k8s-${{ matrix.patch }}.snap"
export TEST_SUBSTRATE=lxd
export TEST_LXD_IMAGE=${{ matrix.os }}
cd tests/integration && sg lxd -c 'tox -e integration'
58 changes: 0 additions & 58 deletions .github/workflows/strict.yaml

This file was deleted.

51 changes: 51 additions & 0 deletions .github/workflows/update-branches.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
name: Auto-update branches

on:
push:
branches:
- main
- 'release-[0-9]+.[0-9]+'

permissions:
contents: read

jobs:
update:
name: "${{ matrix.patch }}"
permissions:
contents: write # for Git to git push
runs-on: ubuntu-20.04
strategy:
matrix:
patch: ["strict", "moonray"]
outputs:
branch: ${{ steps.determine.outputs.branch }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@v2
with:
egress-policy: audit
- name: Determine branch
id: determine
env:
BRANCH: ${{ github.ref }}
run: |
BRANCH=${BRANCH#refs/heads/} # strip off refs/heads/ if it exists
if [[ "${BRANCH}" == "main" ]]; then
echo "branch=autoupdate/${{ matrix.patch }}" >> "$GITHUB_OUTPUT"
elif [[ "${BRANCH}" =~ ^release-[0-9]+\.[0-9]+$ ]]; then
echo "branch=autoupdate/${BRANCH}-${{ matrix.patch }}" >> "$GITHUB_OUTPUT"
else
exit 1
fi
- name: Sync ${{ github.ref }} to ${{ needs.prepare.outputs.branch }}
uses: actions/checkout@v4
with:
ssh-key: ${{ secrets.DEPLOY_KEY_TO_UPDATE_STRICT_BRANCH }}
- name: Apply ${{ matrix.patch }} patch
run: |
git checkout -b ${{ needs.prepare.outputs.branch }}
./build-scripts/patches/${{ matrix.patch }}/apply
- name: Push to ${{ needs.prepare.outputs.branch }}
run: |
git push origin --force ${{ needs.prepare.outputs.branch }}
11 changes: 11 additions & 0 deletions build-scripts/patches/moonray/apply
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,17 @@ git config user.name k8s-bot

# Remove unrelated tests
rm "${DIR}/../../../tests/integration/tests/test_cilium_e2e.py"
## TODO: restore when loadbalancer is implemented
rm "${DIR}/../../../tests/integration/tests/test_loadbalancer.py"
## TODO: restore when gateway is implemented
rm "${DIR}/../../../tests/integration/tests/test_gateway.py"
## TODO: restore when ingress is implemented
rm "${DIR}/../../../tests/integration/tests/test_ingress.py"
## TODO: restore when cleanup is implemented
rm "${DIR}/../../../tests/integration/tests/test_cleanup.py"
## TODO: restore when network test is fixed
rm "${DIR}/../../../tests/integration/tests/test_network.py"

git commit -a -m "Remove unrelated tests"

# Apply strict patch
Expand Down
40 changes: 30 additions & 10 deletions src/k8s/cmd/k8s/k8s_x_wait_for.go
Original file line number Diff line number Diff line change
@@ -1,40 +1,60 @@
package k8s

import (
"context"
"time"

cmdutil "github.com/canonical/k8s/cmd/util"
"github.com/canonical/k8s/pkg/k8sd/features"
"github.com/canonical/k8s/pkg/utils/control"
"github.com/spf13/cobra"
)

func newXWaitForCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
var opts struct {
timeout time.Duration
}
waitForDNSCmd := &cobra.Command{
Use: "dns",
Short: "Wait for DNS to be ready",
Run: func(cmd *cobra.Command, args []string) {
err := control.WaitUntilReady(cmd.Context(), func() (bool, error) {
return features.StatusChecks.CheckDNS(cmd.Context(), env.Snap)
})
if err != nil {
cmd.PrintErrf("Error: failed to wait for DNS to be ready: %v\n", err)
ctx, cancel := context.WithTimeout(cmd.Context(), opts.timeout)
defer cancel()
if err := control.WaitUntilReady(ctx, func() (bool, error) {
ok, err := features.StatusChecks.CheckDNS(cmd.Context(), env.Snap)
if ok {
return true, nil
}
cmd.PrintErrf("DNS not ready yet: %v\n", err.Error())
return false, nil
}); err != nil {
cmd.PrintErrf("Error: DNS did not become ready: %v\n", err)
env.Exit(1)
}
},
}
waitForDNSCmd.Flags().DurationVar(&opts.timeout, "timeout", 5*time.Minute, "maximum time to wait")

waitForNetworkCmd := &cobra.Command{
Use: "network",
Short: "Wait for Network to be ready",
Run: func(cmd *cobra.Command, args []string) {
err := control.WaitUntilReady(cmd.Context(), func() (bool, error) {
return features.StatusChecks.CheckNetwork(cmd.Context(), env.Snap)
})
if err != nil {
cmd.PrintErrf("Error: failed to wait for DNS to be ready: %v\n", err)
ctx, cancel := context.WithTimeout(cmd.Context(), opts.timeout)
defer cancel()
if err := control.WaitUntilReady(ctx, func() (bool, error) {
ok, err := features.StatusChecks.CheckNetwork(cmd.Context(), env.Snap)
if ok {
return true, nil
}
cmd.PrintErrf("network not ready yet: %v\n", err.Error())
return false, nil
}); err != nil {
cmd.PrintErrf("Error: network did not become ready: %v\n", err)
env.Exit(1)
}
},
}
waitForNetworkCmd.Flags().DurationVar(&opts.timeout, "timeout", 5*time.Minute, "maximum time to wait")

cmd := &cobra.Command{
Use: "x-wait-for",
Expand Down
1 change: 1 addition & 0 deletions src/k8s/pkg/client/helm/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ func (h *client) Apply(ctx context.Context, c InstallableChart, desired State, v
install := action.NewInstall(cfg)
install.ReleaseName = c.Name
install.Namespace = c.Namespace
install.CreateNamespace = true

chart, err := loader.Load(path.Join(h.manifestsBaseDir, c.ManifestPath))
if err != nil {
Expand Down
62 changes: 38 additions & 24 deletions src/k8s/pkg/k8sd/features/calico/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,42 +6,56 @@ import (

"github.com/canonical/k8s/pkg/snap"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// CheckNetwork checks the status of the Calico pods in the Kubernetes cluster.
// It verifies if all the Calico pods in the "tigera-operator" namespace are ready.
// If any pod is not ready, it returns false. Otherwise, it returns true.
func CheckNetwork(ctx context.Context, snap snap.Snap) (bool, error) {
client, err := snap.KubernetesClient("calico-system")
if err != nil {
return false, fmt.Errorf("failed to create kubernetes client: %w", err)
func podIsReady(pod v1.Pod) bool {
if pod.Status.Phase != v1.PodRunning {
return false
}

operatorReady, err := client.IsPodReady(ctx, "kube-system", "tigera-operator", metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to get calico pods: %w", err)
}
if !operatorReady {
return false, nil
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodReady && condition.Status == v1.ConditionTrue {
return true
}
}

calicoPods, err := client.ListPods(ctx, "calico-system", metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to get calico pods: %w", err)
}
calicoApiserverPods, err := client.ListPods(ctx, "calico-apiserver", metav1.ListOptions{})
return false
}

// CheckNetwork checks the status of the Calico pods in the Kubernetes cluster.
// We verify that the tigera-operator and calico-node pods are Ready and in Running state.
func CheckNetwork(ctx context.Context, snap snap.Snap) (bool, error) {
client, err := snap.KubernetesClient("calico-system")
if err != nil {
return false, fmt.Errorf("failed to get calico-apiserver pods: %w", err)
return false, fmt.Errorf("failed to create kubernetes client: %w", err)
}

for _, pod := range append(calicoPods, calicoApiserverPods...) {
isReady, err := client.IsPodReady(ctx, pod.Name, "calico-system", metav1.ListOptions{})
for _, check := range []struct {
name string
namespace string
labels map[string]string
}{
// check that the tigera-operator pods are ready
{name: "tigera-operator", namespace: "tigera-operator", labels: map[string]string{"k8s-app": "tigera-operator"}},
// check that calico-node pods are ready
{name: "calico-node", namespace: "calico-system", labels: map[string]string{"app.kubernetes.io/name": "calico-node"}},
} {
pods, err := client.ListPods(ctx, check.namespace, metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{MatchLabels: check.labels}),
})
if err != nil {
return false, fmt.Errorf("failed to check if pod %q is ready: %w", pod.Name, err)
return false, fmt.Errorf("failed to get %v pods: %w", check.name, err)
}
if !isReady {
return false, nil
if len(pods) == 0 {
return false, fmt.Errorf("no %v pods exist on the cluster", check.name)
}

for _, pod := range pods {
if !podIsReady(pod) {
return false, fmt.Errorf("%v pod %q not ready", check.name, pod.Name)
}
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/k8s/pkg/k8sd/features/cilium/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func CheckNetwork(ctx context.Context, snap snap.Snap) (bool, error) {
return false, fmt.Errorf("failed to check if pod %q is ready: %w", ciliumPod, err)
}
if !isReady {
return false, nil
return false, fmt.Errorf("cilium pod %q is not yet ready", ciliumPod)
}
}

Expand Down
3 changes: 3 additions & 0 deletions src/k8s/pkg/k8sd/features/coredns/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ func CheckDNS(ctx context.Context, snap snap.Snap) (bool, error) {
if err != nil {
return false, fmt.Errorf("failed to wait for CoreDNS pod to be ready: %w", err)
}
if !isReady {
return false, fmt.Errorf("coredns pod not ready yet")
}

return isReady, nil
}

0 comments on commit 6772673

Please sign in to comment.