Skip to content

Run tests by @samos123 #959

Run tests by @samos123

Run tests by @samos123 #959

Workflow file for this run

name: Tests
run-name: Run tests by @${{ github.actor }}
on:
push:
branches:
- main
pull_request:
jobs:
unit-and-integration:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run unit tests
run: make test-unit
- name: Run integration tests
run: make test-integration
e2e-gpu:
runs-on: ubuntu-latest
strategy:
matrix:
testcase:
- "engine-vllm-adapters"
# Timeout to address Github actions stuck scenarios.
timeout-minutes: 60
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
# This is required to push the image to the container registry.
permissions:
contents: read
packages: write
env:
TEST_KUBECONFIG: ${{ secrets.TEST_KUBECONFIG }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set kubeconfig to point to GPU cluster
run: |
base64 -d <<< $TEST_KUBECONFIG > /tmp/kubeconfig
echo "KUBECONFIG=/tmp/kubeconfig" >> $GITHUB_ENV
- name: Create and change namespace
run: |
NAMESPACE=kubeai-${{ github.run_id }}
kubectl create namespace $NAMESPACE
kubectl config set-context --current --namespace=$NAMESPACE
echo "NAMESPACE=$namespace" >> $GITHUB_ENV
- name: Run the e2e testcase
run: make test-e2e-${{ matrix.testcase }}
env:
USE_GPU_CLUSTER: "true"
SKAFFOLD_DEFAULT_REPO: "ghcr.io/substratusai/"
- name: Delete namespace
if: always()
run: |
kubectl delete namespace $NAMESPACE
e2e-general:
runs-on: ubuntu-latest
# NOTE: Uncomment if we start getting limited on number of concurrent jobs
# (due to rapid pushes, etc).
#needs: unit-and-integration # No use in running e2e tests if integration tests fail.
strategy:
matrix:
testcase:
- "quickstart"
- "openai-python-client"
- "autoscaler-restart"
- "cache-shared-filesystem"
- "engine-vllm-pvc"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install kind
run: |
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-amd64
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
- name: Install helm
run: |
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
- name: Start kind cluster
run: kind create cluster
- name: Run the e2e testcase
run: make test-e2e-${{ matrix.testcase }}
e2e-engines:
runs-on: ubuntu-latest
# NOTE: Uncomment if we start getting limited on number of concurrent jobs
# (due to rapid pushes, etc).
#needs: unit-and-integration # No use in running e2e tests if integration tests fail.
strategy:
matrix:
engine: ["FasterWhisper"] # "VLLM", "Infinity", "OLlama"
# Run each test case with and without caching.
cacheProfile: ["", "e2e-test-kind-pv"]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install kind
run: |
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-amd64
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
- name: Install helm
run: |
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
- name: Start kind cluster
run: kind create cluster
- name: Run the e2e testcase
run: make test-e2e-engine ENGINE=${{ matrix.engine }} CACHE_PROFILE=${{ matrix.cacheProfile }}