➕ 📝 Disable Redoc, replace Swagger with Scalar #3921
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: CICD | |
on: | |
workflow_dispatch: | |
inputs: | |
run-reset-deployments: | |
description: "Reset deployment: Clean start" | |
required: false | |
default: false | |
type: boolean | |
run-tests: | |
description: "Run tests step" | |
required: false | |
default: true | |
type: boolean | |
run-regression-tests: | |
description: "Run regression tests step" | |
required: false | |
default: true | |
type: boolean | |
push: | |
branches: | |
- "development" | |
tags: | |
- "v*" | |
pull_request: | |
branches: | |
- "development" | |
types: | |
- opened | |
- reopened | |
- synchronize | |
- ready_for_review | |
concurrency: | |
group: cicd | |
cancel-in-progress: false | |
env: | |
TAILSCALE_VERSION: 1.74.0 | |
HELMFILE_VERSION: v0.168.0 | |
HELM_VERSION: v3.16.1 | |
jobs: | |
build: | |
if: github.event.pull_request.draft == false | |
name: Build GHCR | |
permissions: | |
id-token: write # This is required for requesting the JWT | |
packages: write # To push to GHCR.io | |
runs-on: ubuntu-latest | |
outputs: | |
image_version: ${{ steps.meta.outputs.version }} | |
strategy: | |
fail-fast: false | |
matrix: | |
image: | |
[ | |
ledger-nodes, | |
ledger-browser, | |
tails-server, | |
governance-ga-agent, | |
governance-trust-registry, | |
governance-multitenant-web, | |
governance-ga-web, | |
governance-tenant-web, | |
governance-public-web, | |
governance-webhooks-web, | |
governance-multitenant-agent, | |
governance-endorser, | |
pytest, | |
waypoint | |
] | |
include: | |
- image: governance-ga-agent | |
context: . | |
file: dockerfiles/agents/Dockerfile.agent | |
platforms: linux/amd64 # Pending BBS linux/arm64 | |
- image: governance-trust-registry | |
context: . | |
file: dockerfiles/trustregistry/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: governance-multitenant-web | |
context: . | |
file: dockerfiles/fastapi/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: governance-ga-web | |
context: . | |
file: dockerfiles/fastapi/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: governance-tenant-web | |
context: . | |
file: dockerfiles/fastapi/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: governance-public-web | |
context: . | |
file: dockerfiles/fastapi/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: governance-webhooks-web | |
context: . | |
file: dockerfiles/webhooks/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: governance-multitenant-agent | |
context: . | |
file: dockerfiles/agents/Dockerfile.author.agent | |
platforms: linux/amd64 # Pending BBS linux/arm64 | |
- image: ledger-browser | |
context: https://github.com/bcgov/von-network.git#v1.8.0 | |
file: Dockerfile | |
platforms: linux/amd64 | |
- image: ledger-nodes | |
context: https://github.com/bcgov/von-network.git#v1.8.0 | |
file: Dockerfile | |
platforms: linux/amd64 | |
- image: governance-endorser | |
context: . | |
file: dockerfiles/endorser/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: tails-server | |
context: https://github.com/bcgov/indy-tails-server.git#v1.1.0 | |
file: docker/Dockerfile.tails-server | |
platforms: linux/amd64,linux/arm64 | |
- image: pytest | |
context: . | |
file: dockerfiles/tests/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
- image: waypoint | |
context: . | |
file: dockerfiles/waypoint/Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
steps: | |
- name: Check out code | |
uses: actions/checkout@v4 | |
- name: Set up Docker Buildx | |
uses: docker/setup-buildx-action@v3 | |
- uses: docker/login-action@v3 | |
with: | |
registry: ghcr.io | |
username: ${{ github.repository_owner }} | |
password: ${{ github.token }} | |
- name: Docker Metadata | |
id: meta | |
uses: docker/metadata-action@v5 | |
with: | |
images: ghcr.io/${{ github.repository_owner }}/${{ matrix.image }} | |
tags: | | |
type=raw,value=latest,enable=${{ github.event.repository.default_branch == github.ref_name }} | |
type=sha,prefix=pr-${{ github.event.pull_request.number }}-,priority=601,enable=${{ github.event_name == 'pull_request' }} | |
type=sha,prefix={{branch}}-,priority=601,enable=${{ github.event_name == 'push' && github.ref_type == 'branch' }} | |
type=ref,event=branch,priority=600 | |
type=ref,event=pr | |
type=semver,pattern={{version}} | |
type=semver,pattern={{major}}.{{minor}} | |
- name: Build and push Docker images | |
uses: docker/build-push-action@v6 | |
with: | |
context: ${{ matrix.context }} | |
file: ${{ matrix.file }} | |
push: true | |
tags: ${{ steps.meta.outputs.tags }} | |
labels: ${{ steps.meta.outputs.labels }} | |
cache-from: | | |
type=gha,scope=build-${{ matrix.image }} | |
type=registry,ref=ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}:latest | |
cache-to: type=gha,mode=max,scope=build-${{ matrix.image }} | |
platforms: ${{ matrix.platforms }} | |
lint: | |
name: Lint | |
runs-on: ubuntu-latest | |
steps: | |
- uses: actions/checkout@v4 | |
with: | |
fetch-depth: 0 | |
- name: Set up Mise | |
uses: jdx/mise-action@v2 | |
with: | |
cache: true | |
experimental: true # Required for mise tasks | |
install: true | |
- name: Load Mise env | |
run: | | |
mise env -s bash \ | |
| grep -v 'export PATH=' \ | |
| cut -d' ' -f2 \ | |
>> "$GITHUB_ENV" | |
- name: Install dependencies with Poetry | |
run: mise run poetry:install:all | |
- name: Run Pylint | |
run: | | |
poetry run pylint app/ endorser/ shared/ trustregistry/ waypoint/ webhooks/ --rcfile=.pylintrc -r n --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" --exit-zero > pylintreport.txt | |
test: | |
if: github.event.pull_request.draft == false | |
name: Test | |
needs: | |
- build | |
runs-on: ubuntu-latest | |
outputs: | |
test_success: ${{ steps.test.outputs.test_success }} | |
strategy: | |
fail-fast: true | |
matrix: | |
module: | |
- { test: "app/tests/ endorser trustregistry waypoint webhooks --ignore=app/tests/e2e/", id: "1" } # fast tests, ignore e2e | |
- { test: "app/tests/e2e/issuer/", id: "2" } # then some individual, slower e2e tests | |
- { test: "app/tests/e2e/verifier/", id: "3" } | |
- { test: "app/tests/e2e/test_definitions.py app/tests/e2e/test_revocation.py", id: "4" } | |
- { test: "app/tests/e2e/ \ | |
--ignore=app/tests/e2e/issuer/ \ | |
--ignore=app/tests/e2e/verifier/ \ | |
--ignore=app/tests/e2e/test_definitions.py \ | |
--ignore=app/tests/e2e/test_revocation.py", id: "5" } # all other e2e tests | |
steps: | |
- uses: actions/checkout@v4 | |
with: | |
fetch-depth: 0 | |
- name: Set up Mise | |
uses: jdx/mise-action@v2 | |
with: | |
cache: true | |
experimental: true # Required for mise tasks | |
install: true | |
- name: Load Mise env | |
run: | | |
mise env -s bash \ | |
| grep -v 'export PATH=' \ | |
| cut -d' ' -f2 \ | |
>> "$GITHUB_ENV" | |
- name: Install dependencies with Poetry | |
run: mise run poetry:install:all | |
- uses: docker/login-action@v3 | |
with: | |
registry: ghcr.io | |
username: ${{ github.repository_owner }} | |
password: ${{ github.token }} | |
- name: Authenticate GitHub CLI | |
run: echo "${{ secrets.PAT }}" | gh auth login --with-token | |
- name: Set branch name | |
id: set_branch_name | |
run: | | |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then | |
echo "BRANCH_NAME=${{ github.head_ref }}" >> $GITHUB_ENV | |
else | |
echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | cut -d/ -f2-)" >> $GITHUB_ENV | |
fi | |
- name: Check if branch exists | |
id: check_branch | |
run: | | |
DEFAULT_BRANCH=master | |
SANITIZED_BRANCH_NAME=$(echo "$BRANCH_NAME" | sed 's/\//%2F/g') | |
if gh api "repos/didx-xyz/charts/git/ref/heads/$SANITIZED_BRANCH_NAME" &> /dev/null; then | |
echo "branch_exists=true" >> $GITHUB_ENV | |
echo "branch_name=$BRANCH_NAME" >> $GITHUB_ENV | |
else | |
echo "branch_exists=false" >> $GITHUB_ENV | |
echo "branch_name=$DEFAULT_BRANCH" >> $GITHUB_ENV | |
fi | |
- name: Checkout Charts | |
uses: actions/checkout@v4 | |
with: | |
repository: didx-xyz/charts | |
token: ${{ secrets.PAT }} | |
path: tilt/.charts | |
ref: ${{ env.branch_name }} | |
- name: Start Test Harness | |
run: mise run tilt:ci | |
shell: bash | |
env: | |
REGISTRY: ghcr.io/${{ github.repository_owner }} | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Test with pytest | |
id: test | |
run: | | |
source .venv/bin/activate | |
set +e | |
cp .env.example .env | |
source .env | |
poetry run pytest --durations=0 ${{ matrix.module.test }} --cov | tee test_output.txt | |
EXIT_CODE=${PIPESTATUS[0]} | |
set -e | |
echo "Exit code: $EXIT_CODE" | |
mkdir -p coverage-files | |
mv .coverage coverage-files/.coverage.${{ matrix.module.id }} | |
# very hacky way to get around the fact that teardown fails even if tests pass | |
TEARDOWN_ERROR=false | |
SINGLE_ERROR=false | |
TEST_FAILURES=0 | |
if grep -q "ERROR at teardown" test_output.txt; then | |
echo "ERROR at teardown" | |
TEARDOWN_ERROR=true | |
fi | |
if grep -q ", 1 error in" test_output.txt; then | |
echo "Only 1 error total" | |
SINGLE_ERROR=true | |
fi | |
# Count the number of test failures | |
TEST_FAILURES=$(grep -c "^FAILED" test_output.txt || true) | |
echo "Number of test failures: $TEST_FAILURES" | |
if [ "$TEARDOWN_ERROR" = true ] && [ "$SINGLE_ERROR" = true ] && [ "$TEST_FAILURES" -eq 0 ]; then | |
echo "Tests passed with teardown error" | |
exit 0 | |
else | |
if [ "$EXIT_CODE" -ne 0 ]; then | |
echo "test_success=false" >> $GITHUB_OUTPUT | |
else | |
echo "test_success=true" >> $GITHUB_OUTPUT | |
fi | |
exit $EXIT_CODE | |
fi | |
pwd | |
- name: Upload .coverage files as artifact | |
uses: actions/upload-artifact@v4 | |
with: | |
name: coverage-files-${{ matrix.module.id }} | |
path: coverage-files/.coverage.${{ matrix.module.id }} | |
include-hidden-files: true | |
- name: Get Docker Containers | |
if: always() | |
run: docker ps -a | |
- name: Get Pods | |
if: always() | |
run: kubectl get pods --all-namespaces | |
## Benthos generates too many logs. | |
## Uncomment if needed. (be careful) | |
# - name: Benthos Logs | |
# if: always() | |
# run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=benthos --tail 10000 | |
- name: Docker Cache Logs | |
if: always() | |
run: docker logs cache-docker | |
- name: Endorser Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-endorser --tail 10000 | |
- name: Governance Agent Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-ga-agent --tail 10000 | |
- name: Governance Web Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-ga-web --tail 10000 | |
- name: Ingress Nginx Logs | |
if: always() | |
run: kubectl logs -n ingress-system -l app.kubernetes.io/instance=ingress-nginx --tail 10000 | |
- name: Ledger Browser Logs (kind) | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=ledger-browser --tail 10000 | |
- name: Ledger Browser Logs (docker) | |
if: always() | |
run: docker logs ledger-browser | |
- name: Ledger Nodes Logs (kind) | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=ledger-nodes --tail 10000 | |
- name: Ledger Nodes Logs (docker) | |
if: always() | |
run: docker logs ledger-nodes | |
- name: Mediator Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=mediator --tail 10000 | |
- name: Multitenant Agent Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-multitenant-agent --tail 10000 | |
- name: Multitenant Web Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-multitenant-web --tail 10000 | |
- name: NATS Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=nats --tail 10000 | |
- name: PGPool Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=postgres,app.kubernetes.io/component=pgpool --tail 10000 | |
- name: PostgreSQL Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=postgres,app.kubernetes.io/component=postgresql --tail 10000 | |
- name: Public Web Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-trust-registry-web --tail 10000 | |
- name: Redis Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=redis --tail 10000 | |
- name: Tails Server Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=tails-server --tail 10000 | |
- name: Tenant Web Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-tenant-web --tail 10000 | |
- name: Trust Registry Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-trust-registry --tail 10000 | |
- name: Waypoint Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=waypoint --tail 10000 | |
- name: Webhooks Web Logs | |
if: always() | |
run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-webhooks-web --tail 10000 | |
- name: Tilt Down Destroy | |
if: always() | |
run: mise run tilt:down:destroy | |
status-check: | |
name: Status Check | |
runs-on: ubuntu-latest | |
needs: test | |
if: always() | |
steps: | |
- name: Check if any test failed | |
run: exit 1 | |
if: needs.test.outputs.test_success == 'false' | |
combine-coverage: | |
if: github.event.pull_request.draft == false | |
name: Coverage | |
runs-on: ubuntu-latest | |
needs: status-check | |
steps: | |
- uses: actions/checkout@v4 | |
with: | |
fetch-depth: 0 | |
- name: Set up Mise | |
uses: jdx/mise-action@v2 | |
with: | |
cache: true | |
experimental: true # Required for mise tasks | |
install: true | |
- name: Load Mise env | |
run: | | |
mise env -s bash \ | |
| grep -v 'export PATH=' \ | |
| cut -d' ' -f2 \ | |
>> "$GITHUB_ENV" | |
- name: Install dependencies | |
run: | | |
source .venv/bin/activate | |
pip install coverage | |
- name: Download all .coverage artifacts | |
uses: actions/download-artifact@v4 | |
with: | |
path: coverage-files | |
pattern: "coverage-*" | |
- name: Move coverage files to top-level directory | |
run: | | |
for dir in coverage-files/coverage-files-*; do | |
mv "$dir"/.coverage.* . | |
done | |
- name: Combine coverage files | |
run: | | |
source .venv/bin/activate | |
coverage combine | |
coverage report | |
- name: Generate XML coverage report | |
run: | | |
source .venv/bin/activate | |
coverage xml | |
- name: Upload coverage to Codacy | |
run: bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml | |
env: | |
CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }} | |
deploy: | |
if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false | |
name: Deploy to EKS | |
environment: | |
name: dev | |
needs: | |
- build | |
permissions: | |
id-token: write # Required to authenticate with AWS | |
contents: read # Required to clone this repository | |
checks: write # Required for action-junit-report | |
pull-requests: write # Required to comment on PRs for Pytest coverage comment | |
runs-on: ubuntu-latest | |
timeout-minutes: 30 | |
outputs: | |
output: ${{ steps.updated_deployments.outputs.success }} | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v4 | |
- name: Set up Mise | |
uses: jdx/mise-action@v2 | |
with: | |
cache: true | |
experimental: true # Required for mise tasks | |
install: true | |
- name: Load Mise env | |
run: | | |
mise env -s bash \ | |
| grep -v 'export PATH=' \ | |
| cut -d' ' -f2 \ | |
>> "$GITHUB_ENV" | |
- name: Authenticate GitHub CLI | |
run: echo "${{ secrets.PAT }}" | gh auth login --with-token | |
- name: Set branch name | |
id: set_branch_name | |
run: | | |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then | |
echo "BRANCH_NAME=${{ github.head_ref }}" >> $GITHUB_ENV | |
else | |
echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | cut -d/ -f2-)" >> $GITHUB_ENV | |
fi | |
- name: Check if branch exists | |
id: check_branch | |
run: | | |
DEFAULT_BRANCH=master | |
SANITIZED_BRANCH_NAME=$(echo "$BRANCH_NAME" | sed 's/\//%2F/g') | |
if gh api "repos/didx-xyz/charts/git/ref/heads/$SANITIZED_BRANCH_NAME" &> /dev/null; then | |
echo "branch_exists=true" >> $GITHUB_ENV | |
echo "branch_name=$BRANCH_NAME" >> $GITHUB_ENV | |
else | |
echo "branch_exists=false" >> $GITHUB_ENV | |
echo "branch_name=$DEFAULT_BRANCH" >> $GITHUB_ENV | |
fi | |
- name: Checkout Charts | |
uses: actions/checkout@v4 | |
with: | |
repository: didx-xyz/charts | |
token: ${{ secrets.PAT }} | |
path: charts | |
ref: ${{ env.branch_name }} | |
- name: Install dependencies | |
run: sudo apt-get install -y postgresql-client redis-tools | |
- name: Configure AWS credentials | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: af-south-1 | |
role-to-assume: arn:aws:iam::402177810328:role/cicd | |
role-session-name: github-cicd | |
- name: Update Kubeconfig | |
run: aws eks update-kubeconfig --name cloudapi-dev | |
- uses: tailscale/github-action@main | |
with: | |
authkey: ${{ secrets.TAILSCALE_AUTHKEY }} | |
version: ${{ env.TAILSCALE_VERSION }} | |
- name: Helmfile Destroy | |
id: destroy_deployments | |
if: github.event.inputs.run-reset-deployments == 'true' | |
uses: helmfile/helmfile-action@v1.9.2 | |
with: | |
helmfile-args: | | |
destroy \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-cloudapi-python.yaml.gotmpl | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Drop DBs | |
id: drop_dbs | |
if: github.event.inputs.run-reset-deployments == 'true' | |
env: | |
DB_PASSWORD: ${{ secrets.DB_PASSWORD }} | |
DB_HOST: ${{ secrets.DB_HOST }} | |
DB_USER: ${{ secrets.DB_USER}} | |
DB_PORT: ${{ secrets.DB_PORT }} | |
DB_EXCLUDE: ${{ secrets.DB_EXCLUDE }} | |
GA_ACAPY_WALLET_NAME: ${{ secrets.GA_ACAPY_WALLET_NAME }} | |
MT_ACAPY_WALLET_NAME: ${{ secrets.MT_ACAPY_WALLET_NAME }} | |
TRUST_REGISTRY_DB_OWNER: ${{ secrets.TRUST_REGISTRY_DB_OWNER }} | |
run: | | |
bash ./scripts/aurora-delete.sh -o $GA_ACAPY_WALLET_NAME -d | |
bash ./scripts/aurora-delete.sh -o $MT_ACAPY_WALLET_NAME -d | |
bash ./scripts/aurora-delete.sh -o $TRUST_REGISTRY_DB_OWNER -d | |
bash ./scripts/aurora-delete.sh -o $TRUST_REGISTRY_DB_OWNER -c | |
- name: List Elasticache Redis | |
if: github.event.inputs.run-reset-deployments == 'true' | |
env: | |
REDIS_HOST: ${{ secrets.REDIS_HOST }} | |
REDIS_PORT: ${{ secrets.REDIS_PORT }} | |
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD }} | |
run: | | |
redis-cli \ | |
-h $REDIS_HOST \ | |
-p $REDIS_PORT \ | |
--tls --pass $REDIS_PASSWORD \ | |
--scan --pattern '*' | |
- name: Clean Elasticache Redis | |
if: github.event.inputs.run-reset-deployments == 'true' | |
env: | |
REDIS_HOST: ${{ secrets.REDIS_HOST }} | |
REDIS_PORT: ${{ secrets.REDIS_PORT }} | |
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD }} | |
run: | | |
redis-cli \ | |
--tls --pass $REDIS_PASSWORD \ | |
--cluster call --cluster-only-masters \ | |
$REDIS_HOST:$REDIS_PORT \ | |
FLUSHALL ASYNC | |
- name: Reset NATS | |
if: github.event.inputs.run-reset-deployments == 'true' | |
env: | |
NAMESPACE: dev-cloudapi | |
run: | | |
kubectl get secret \ | |
-n ${NAMESPACE} \ | |
${{ secrets.NATS_SECRET_NAME }} \ | |
-o jsonpath='{.data.cloudapi-nats-admin\.creds}' \ | |
| base64 -d > nats.creds | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv rm cloudapi_aries_sync_locks -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv rm cloudapi_aries_sync_tracking -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv rm cloudapi_aries_unique_events -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
stream rm cloudapi_aries_events -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv add cloudapi_aries_sync_locks --ttl 10s --replicas 3 --storage memory | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv add cloudapi_aries_sync_tracking --replicas 3 | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv add cloudapi_aries_unique_events --replicas 3 | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
stream add cloudapi_aries_events --subjects "cloudapi.aries.events.*.*" \ | |
--defaults \ | |
--storage file \ | |
--replicas 3 \ | |
--compression s2 | |
rm -f ./nats.creds | |
- name: Helmfile Apply # Apply default helmfile (without RDS proxy) when resetting deployments. | |
if: github.event.inputs.run-reset-deployments == 'true' | |
uses: helmfile/helmfile-action@v1.9.2 | |
with: | |
helmfile-args: | | |
apply \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-cloudapi-python.yaml.gotmpl \ | |
--state-values-set image.tag=${{ env.IMAGE_TAG }} \ | |
--state-values-set image.registry=ghcr.io/${{ github.repository_owner }} | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Helmfile Apply (RDS Proxy) | |
if: github.event.inputs.run-reset-deployments != 'true' | |
uses: helmfile/helmfile-action@v1.9.2 | |
with: | |
helmfile-args: | | |
apply \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-cloudapi-python.yaml.gotmpl \ | |
--state-values-set image.tag=${{ env.IMAGE_TAG }} \ | |
--state-values-set image.registry=ghcr.io/${{ github.repository_owner }} \ | |
--state-values-set rdsProxyEnabled=true | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
test-eks: | |
if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false | |
name: Run Pytest on EKS | |
environment: | |
name: dev | |
needs: | |
- build | |
- deploy | |
permissions: | |
id-token: write # Required to authenticate with AWS | |
contents: read # Required to clone this repository | |
checks: write # Required for action-junit-report | |
pull-requests: write # Required to comment on PRs for Pytest coverage comment | |
runs-on: ubuntu-latest | |
timeout-minutes: 20 | |
env: | |
OUTPUT_FILE: test_output.xml | |
COVERAGE_FILE: test_coverage.txt | |
PYTEST_COMPLETIONS: 1 | |
outputs: | |
output: ${{ steps.updated_deployments.outputs.success }} | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v4 | |
- name: Install dependencies | |
run: sudo apt-get install -y redis-tools | |
- name: Set up Mise | |
uses: jdx/mise-action@v2 | |
with: | |
cache: true | |
experimental: true # Required for mise tasks | |
install: true | |
- name: Authenticate GitHub CLI | |
run: echo "${{ secrets.PAT }}" | gh auth login --with-token | |
- name: Set branch name | |
id: set_branch_name | |
run: | | |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then | |
echo "BRANCH_NAME=${{ github.head_ref }}" >> $GITHUB_ENV | |
else | |
echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | cut -d/ -f2-)" >> $GITHUB_ENV | |
fi | |
- name: Check if branch exists | |
id: check_branch | |
run: | | |
DEFAULT_BRANCH=master | |
if gh api repos/didx-xyz/charts/git/ref/heads/$BRANCH_NAME; then | |
echo "branch_exists=true" >> $GITHUB_ENV | |
echo "branch_name=$BRANCH_NAME" >> $GITHUB_ENV | |
else | |
echo "branch_exists=false" >> $GITHUB_ENV | |
echo "branch_name=$DEFAULT_BRANCH" >> $GITHUB_ENV | |
fi | |
- name: Checkout Charts | |
uses: actions/checkout@v4 | |
with: | |
repository: didx-xyz/charts | |
token: ${{ secrets.PAT }} | |
path: charts | |
ref: ${{ env.branch_name }} | |
- name: Configure AWS credentials | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: af-south-1 | |
role-to-assume: arn:aws:iam::402177810328:role/cicd | |
role-session-name: github-cicd | |
- name: Update Kubeconfig | |
run: aws eks update-kubeconfig --name cloudapi-dev | |
- uses: tailscale/github-action@main | |
with: | |
authkey: ${{ secrets.TAILSCALE_AUTHKEY }} | |
version: ${{ env.TAILSCALE_VERSION }} | |
- name: Helmfile init regression pytest | |
if: github.event.inputs.run-reset-deployments == 'true' && github.event.inputs.run-regression-tests == 'true' | |
id: pytest-init-regression | |
uses: helmfile/helmfile-action@v1.9.2 | |
with: | |
helmfile-args: | | |
apply \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-capi-test.yaml \ | |
--set image.tag=${{ env.IMAGE_TAG }} \ | |
--set image.registry=ghcr.io/${{ github.repository_owner }} \ | |
--set completions=${{ env.PYTEST_COMPLETIONS }} \ | |
--state-values-set release=cloudapi-pytest-regression \ | |
--set fullnameOverride=cloudapi-pytest-regression \ | |
--set env.RUN_REGRESSION_TESTS="true" \ | |
--state-values-set regressionEnabled=true | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Helmfile run regression pytest | |
if: ${{ github.event_name != 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run-reset-deployments == 'false' && github.event.inputs.run-regression-tests == 'true') }} | |
id: pytest-run-regression | |
uses: helmfile/helmfile-action@v1.9.2 | |
with: | |
helmfile-args: | | |
apply \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-capi-test.yaml \ | |
--set image.tag=${{ env.IMAGE_TAG }} \ | |
--set image.registry=ghcr.io/${{ github.repository_owner }} \ | |
--set completions=${{ env.PYTEST_COMPLETIONS }} \ | |
--state-values-set release=cloudapi-pytest-regression \ | |
--set fullnameOverride=cloudapi-pytest-regression \ | |
--set env.RUN_REGRESSION_TESTS="true" \ | |
--set env.FAIL_ON_RECREATING_FIXTURES="true" \ | |
--state-values-set regressionEnabled=true | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Helmfile run pytest | |
if: ${{ github.event_name != 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run-tests != 'false') }} | |
id: pytest | |
uses: helmfile/helmfile-action@v1.9.2 | |
with: | |
helmfile-args: | | |
apply \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-capi-test.yaml \ | |
--set image.tag=${{ env.IMAGE_TAG }} \ | |
--set image.registry=ghcr.io/${{ github.repository_owner }} \ | |
--set completions=${{ env.PYTEST_COMPLETIONS }} \ | |
--state-values-set release=cloudapi-pytest \ | |
--set fullnameOverride=cloudapi-pytest | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Wait for pytest and print logs | |
if: steps.pytest.outcome == 'success' | |
run: | | |
while true; do | |
# Check if the job is complete or failed | |
COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}') | |
FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}') | |
if [ "$COMPLETION_STATUS" == "${{ env.PYTEST_COMPLETIONS }}" ] || [ "$FAILURE_STATUS" == "1" ]; then | |
echo "Job $JOB_NAME has completed." | |
break | |
else | |
echo "Waiting for job to complete..." | |
sleep 10 | |
fi | |
done | |
# Get all pods for the job | |
pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}') | |
# Loop through the pods and get logs | |
for pod in $pods | |
do | |
echo "Logs for Pod: $pod" | |
kubectl logs -n $NAMESPACE $pod | |
done | |
env: | |
JOB_NAME: cloudapi-pytest | |
NAMESPACE: dev-cloudapi | |
- name: Wait for pytest regression and print logs | |
if: steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' | |
run: | | |
while true; do | |
# Check if the job is complete or failed | |
COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}') | |
FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}') | |
if [ "$COMPLETION_STATUS" == "${{ env.PYTEST_COMPLETIONS }}" ] || [ "$FAILURE_STATUS" == "1" ]; then | |
echo "Job $JOB_NAME has completed." | |
break | |
else | |
echo "Waiting for job to complete..." | |
sleep 10 | |
fi | |
done | |
# Get all pods for the job | |
pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}') | |
# Loop through the pods and get logs | |
for pod in $pods | |
do | |
echo "Logs for Pod: $pod" | |
kubectl logs -n $NAMESPACE $pod | |
done | |
env: | |
JOB_NAME: cloudapi-pytest-regression | |
NAMESPACE: dev-cloudapi | |
- name: Expire Elasticache Redis Keys | |
env: | |
REDIS_HOST: ${{ secrets.REDIS_HOST }} | |
REDIS_PORT: ${{ secrets.REDIS_PORT }} | |
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD }} | |
run: | | |
chmod +x ./scripts/redis-expire.sh | |
DEBUG=true ./scripts/redis-expire.sh | |
- name: Scale down benthos | |
run: | | |
kubectl scale deployment benthos -n dev-cloudapi --replicas=0 | |
- name: Reset NATS | |
env: | |
NAMESPACE: dev-cloudapi | |
run: | | |
kubectl get secret \ | |
-n ${NAMESPACE} \ | |
${{ secrets.NATS_SECRET_NAME }} \ | |
-o jsonpath='{.data.cloudapi-nats-admin\.creds}' \ | |
| base64 -d > nats.creds | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv rm cloudapi_aries_sync_locks -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv rm cloudapi_aries_sync_tracking -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv rm cloudapi_aries_unique_events -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
stream rm cloudapi_aries_events -f | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv add cloudapi_aries_sync_locks --ttl 10s --replicas 3 --storage memory | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv add cloudapi_aries_sync_tracking --replicas 3 | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
kv add cloudapi_aries_unique_events --replicas 3 | |
nats --creds ./nats.creds \ | |
--server ${{ secrets.NATS_URL }} \ | |
stream add cloudapi_aries_events --subjects "cloudapi.aries.events.*.*" \ | |
--defaults \ | |
--storage file \ | |
--replicas 3 \ | |
--compression s2 | |
rm -f ./nats.creds | |
- name: Scale benthos back up | |
run: | | |
kubectl scale deployment benthos -n dev-cloudapi --replicas=1 | |
- name: Copy k8s pytest results | |
if: steps.pytest.outcome == 'success' || steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' | |
run: | | |
echo "apiVersion: v1 | |
kind: Pod | |
metadata: | |
name: $POD_NAME | |
namespace: $NAMESPACE | |
labels: | |
sidecar.istio.io/inject: \"false\" | |
spec: | |
containers: | |
- name: $POD_NAME | |
image: $CONTAINER_IMAGE | |
command: [\"sleep\", \"3600\"] | |
volumeMounts: | |
- name: pytest-volume | |
mountPath: $MOUNT_PATH/pytest | |
- name: pytest-regression-volume | |
mountPath: $MOUNT_PATH/pytest-regression | |
volumes: | |
- name: pytest-volume | |
persistentVolumeClaim: | |
claimName: $PVC_NAME | |
- name: pytest-regression-volume | |
persistentVolumeClaim: | |
claimName: $PVC_NAME_REGRESSION | |
restartPolicy: Never" > pytest-results-pod.yaml | |
kubectl apply -f pytest-results-pod.yaml | |
# Wait for the pod to be ready | |
echo "Waiting for pod to be ready..." | |
kubectl -n $NAMESPACE wait --for=condition=ready pod/$POD_NAME --timeout=60s | |
# Copy the files from the pod to your local system | |
echo "Copying files from pod..." | |
mkdir -p $LOCAL_PATH $LOCAL_PATH_REGRESSION | |
kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest/$OUTPUT_FILE $LOCAL_PATH/$OUTPUT_FILE | |
kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest/$COVERAGE_FILE $LOCAL_PATH/$COVERAGE_FILE | |
kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest-regression/$OUTPUT_FILE $LOCAL_PATH_REGRESSION/$OUTPUT_FILE | |
kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest-regression/$COVERAGE_FILE $LOCAL_PATH_REGRESSION/$COVERAGE_FILE | |
# Clean up: delete the temporary pod | |
echo "Cleaning up..." | |
kubectl -n $NAMESPACE delete pod $POD_NAME | |
echo "Done!" | |
env: | |
PVC_NAME: cloudapi-pytest | |
PVC_NAME_REGRESSION: cloudapi-pytest-regression | |
POD_NAME: pytest-results-pod | |
CONTAINER_IMAGE: busybox | |
MOUNT_PATH: /mnt | |
LOCAL_PATH: ./pytest | |
LOCAL_PATH_REGRESSION: ./pytest-regression | |
NAMESPACE: dev-cloudapi | |
OUTPUT_FILE: test_output.xml | |
COVERAGE_FILE: test_coverage.txt | |
- name: Publish Pytest Report | |
uses: mikepenz/action-junit-report@v4 | |
if: steps.pytest.outcome == 'success' | |
with: | |
check_name: JUnit Test Report | |
report_paths: "./pytest/test_output.xml" | |
fail_on_failure: true | |
detailed_summary: true | |
require_passed_tests: true | |
- name: Publish Pytest Regression Report | |
uses: mikepenz/action-junit-report@v4 | |
if: steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' | |
with: | |
check_name: JUnit Test Report Regression | |
report_paths: "./pytest-regression/test_output.xml" | |
fail_on_failure: true | |
detailed_summary: true | |
require_passed_tests: true | |
- name: Pytest coverage comment | |
if: steps.pytest.outcome == 'success' | |
uses: MishaKav/pytest-coverage-comment@v1.1.52 | |
with: | |
pytest-coverage-path: ./pytest/test_coverage.txt | |
junitxml-path: ./pytest/test_output.xml | |
create-new-comment: true | |
title: "K8s Test Coverage" | |
# Resolves `Warning: Your comment is too long (maximum is 65536 characters), coverage report will not be added.` | |
hide-report: ${{ github.event_name != 'pull_request' }} | |
hide-comment: ${{ github.event_name != 'pull_request' }} | |
- name: Pytest regression coverage comment | |
if: steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' | |
uses: MishaKav/pytest-coverage-comment@v1.1.52 | |
with: | |
pytest-coverage-path: ./pytest-regression/test_coverage.txt | |
junitxml-path: ./pytest-regression/test_output.xml | |
create-new-comment: true | |
title: "K8s Regression Test Coverage" | |
# Resolves `Warning: Your comment is too long (maximum is 65536 characters), coverage report will not be added.` | |
hide-report: ${{ github.event_name != 'pull_request' }} | |
hide-comment: ${{ github.event_name != 'pull_request' }} | |
- name: Helmfile destroy pytest | |
uses: helmfile/helmfile-action@v1.9.2 | |
if: always() | |
with: | |
helmfile-args: | | |
destroy \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-capi-test.yaml \ | |
--state-values-set release=cloudapi-pytest | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
- name: Helmfile destroy pytest regression | |
uses: helmfile/helmfile-action@v1.9.2 | |
if: always() | |
with: | |
helmfile-args: | | |
destroy \ | |
--environment ${{ vars.ENVIRONMENT }} \ | |
-f ./charts/helmfiles/aries-capi-test.yaml \ | |
--state-values-set release=cloudapi-pytest-regression | |
helm-plugins: | | |
https://github.com/databus23/helm-diff | |
helmfile-version: ${{ env.HELMFILE_VERSION }} | |
helm-version: ${{ env.HELM_VERSION }} | |
env: | |
IMAGE_TAG: ${{ needs.build.outputs.image_version }} | |
k6: | |
if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false | |
name: K6 | |
environment: | |
name: dev | |
needs: | |
- build | |
- deploy | |
permissions: | |
id-token: write | |
packages: write | |
runs-on: ubuntu-latest | |
timeout-minutes: 10 | |
outputs: | |
image_version: ${{ steps.meta.outputs.version }} | |
steps: | |
- name: Check out code | |
uses: actions/checkout@v4 | |
- name: Set up Docker Buildx | |
uses: docker/setup-buildx-action@v3 | |
- uses: docker/login-action@v3 | |
with: | |
registry: ghcr.io | |
username: ${{ github.repository_owner }} | |
password: ${{ github.token }} | |
- name: Docker Metadata | |
id: meta | |
uses: docker/metadata-action@v5 | |
with: | |
images: ghcr.io/${{ github.repository_owner }}/xk6 | |
tags: | | |
type=raw,value=latest,enable=${{ github.event.repository.default_branch == github.ref_name }} | |
type=sha,prefix=pr-${{ github.event.pull_request.number }}-,priority=601,enable=${{ github.event_name == 'pull_request' }} | |
type=sha,prefix={{branch}}-,priority=601,enable=${{ github.event_name == 'push' && github.ref_type == 'branch' }} | |
type=ref,event=branch,priority=600 | |
type=ref,event=pr | |
type=semver,pattern={{version}} | |
type=semver,pattern={{major}}.{{minor}} | |
- name: Build and push Docker images | |
id: build_image | |
uses: docker/build-push-action@v6 | |
with: | |
context: . | |
file: ./scripts/k6/Dockerfile | |
push: true | |
tags: ${{ steps.meta.outputs.tags }} | |
labels: ${{ steps.meta.outputs.labels }} | |
cache-from: | | |
type=gha,scope=build-xk6 | |
type=registry,ref=ghcr.io/${{ github.repository_owner }}/xk6:latest | |
cache-to: type=gha,mode=max,scope=build-xk6 | |
- name: Prepare output directory | |
run: mkdir -p ${{ github.workspace }}/scripts/k6/output && chmod 777 ${{ github.workspace }}/scripts/k6/output | |
- name: Run k6 tests | |
run: | | |
docker run --rm \ | |
-v ${{ github.workspace }}/scripts/k6:/scripts \ | |
-e CLIENT_ID=${{ secrets.CLIENT_ID }} \ | |
-e GOVERNANCE_CLIENT_ID=${{ secrets.GOVERNANCE_CLIENT_ID }} \ | |
-e CLIENT_SECRET=${{ secrets.CLIENT_SECRET }} \ | |
-e GOVERNANCE_CLIENT_SECRET=${{ secrets.GOVERNANCE_CLIENT_SECRET }} \ | |
-e CLOUDAPI_URL=${{ secrets.CLOUDAPI_URL }} \ | |
-e OAUTH_ENDPOINT=${{ secrets.OAUTH_ENDPOINT }} \ | |
-e GOVERNANCE_OAUTH_ENDPOINT=${{ secrets.GOVERNANCE_OAUTH_ENDPOINT }} \ | |
--workdir /scripts \ | |
--entrypoint /bin/sh \ | |
ghcr.io/${{ github.repository_owner }}/xk6:${{ steps.meta.outputs.version }} \ | |
/scripts/run_tests.sh | |
shell: bash |