Skip to content

sc2: fix containerd toml population #349

sc2: fix containerd toml population

sc2: fix containerd toml population #349

Workflow file for this run

name: "Integration tests"
on:
push:
branches: [main]
pull_request:
branches: [main]
types: [opened, synchronize, reopened, ready_for_review]
defaults:
run:
shell: bash
# Cancel previous running actions for the same PR
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
checks:
if: github.event.pull_request.draft == false
runs-on: ubuntu-24.04
steps:
- name: "Checkout code"
uses: actions/checkout@v4
# Formatting checks
- name: "Python formatting check"
run: ./bin/inv_wrapper.sh format-code --check
# Rust formatting checks
- name: "Run cargo lints"
run: |
for dir in "./vm-cache/" "./tools/check-kata-hashes" "./tools/tee-detect" "./tools/purge-containerd"; do
pushd ${dir} >> /dev/null
cargo fmt --all -- --check
cargo clippy -- -D warnings
popd >> /dev/null
done
- name: "Check Kata hashes match"
run: cargo run --release
working-directory: ./tools/check-kata-hashes
integration-tests:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:
include:
- tee: snp
runtime_classes: "qemu qemu-coco-dev qemu-snp qemu-snp-sc2"
- tee: tdx
runtime_classes: "qemu qemu-coco-dev qemu-tdx qemu-tdx-sc2"
runs-on: [self-hosted, "${{ matrix.tee }}"]
env:
KUBECONFIG: .config/kubeadm_kubeconfig
steps:
- name: "Checkout code"
uses: actions/checkout@v4
- name: "Install SC2"
run: ./bin/inv_wrapper.sh sc2.deploy --clean
- name: "Run python hello world"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
echo "Running test for ${runtime_class}..."
export SC2_RUNTIME_CLASS=${runtime_class}
envsubst < ./demo-apps/helloworld-py/deployment.yaml | ./bin/kubectl apply -f -
# Wait for pod to be ready
until [ "$(./bin/kubectl get pods -l ${{ env.POD_LABEL }} -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}')" = "True" ]; do echo "Waiting for pod to be ready..."; sleep 2; done
sleep 1
# Get the pod's IP
service_ip=$(./bin/kubectl get services -o jsonpath='{.items[?(@.metadata.name=="coco-helloworld-py-node-port")].spec.clusterIP}')
[ "$(curl --retry 3 -X GET ${service_ip}:8080)" = "Hello World!" ]
envsubst < ./demo-apps/helloworld-py/deployment.yaml | ./bin/kubectl delete -f -
# Wait for pod to be deleted
./bin/kubectl wait --for=delete -l ${{ env.POD_LABEL }} pod --timeout=30s
# Extra cautionary sleep
sleep 5
echo "Test for ${runtime_class} successful!"
done
env:
POD_LABEL: apps.sc2.io/name=helloworld-py
- name: "Run Knative hello world"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
echo "Running test for ${runtime_class}..."
export SC2_RUNTIME_CLASS=${runtime_class}
envsubst < ./demo-apps/helloworld-knative/service.yaml | ./bin/kubectl apply -f -
sleep 1
# Get the service URL
service_url=$(./bin/kubectl get ksvc helloworld-knative --output=custom-columns=URL:.status.url --no-headers)
[ "$(curl --retry 3 ${service_url})" = "Hello World!" ]
# Wait for pod to be deleted
envsubst < ./demo-apps/helloworld-knative/service.yaml | ./bin/kubectl delete -f -
./bin/kubectl wait --for=delete -l ${{ env.POD_LABEL }} pod --timeout=60s
# Extra cautionary sleep
sleep 5
echo "Test for ${runtime_class} successful!"
done
env:
POD_LABEL: apps.sc2.io/name=helloworld-py
- name: "Enable default-memory annotation"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
./bin/inv_wrapper.sh kata.enable-annotation default_memory --runtime ${runtime_class}
# Here we benefit that the last variable is the one we want to use
# for vm-cache
export SC2_RUNTIME_CLASS=${runtime_class}
done
# Aftre changing the annotation of the qemu-snp-sc2 runtime class we
# need to restart the VM cache
sudo -E ./vm-cache/target/release/vm-cache stop
sudo -E ./vm-cache/target/release/vm-cache background
- name: "Run knative chaining demo"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
echo "Running test for ${runtime_class}..."
export SC2_RUNTIME_CLASS=${runtime_class}
envsubst < ./demo-apps/knative-chaining/chaining.yaml | ./bin/kubectl apply -f -
sleep 1
# Curl the channel URL
./demo-apps/knative-chaining/curl_cmd.sh
# Wait for pod 3 to be scaled down
until [ "$(kubectl -n ${{ env.NAMESPACE }} logs -l ${{ env.POD_LABEL_THREE }} | grep 'cloudevent(s3): done!' | wc -l)" = "1" ]; do echo "Waiting for chain to finish..."; sleep 2; done
# Finally, clear-up
envsubst < ./demo-apps/knative-chaining/chaining.yaml | ./bin/kubectl delete -f -
# Extra cautionary sleep
sleep 5
echo "Test for ${runtime_class} successful!"
done
env:
NAMESPACE: chaining-test
POD_LABEL_ONE: apps.sc2.io/name=knative-chaining-one
POD_LABEL_THREE: apps.sc2.io/name=knative-chaining-three
- name: "Clean-up"
if: always()
run: ./bin/inv_wrapper.sh sc2.destroy