Skip to content

regression testing #828

regression testing

regression testing #828

Workflow file for this run

name: regression testing
env:
# AWS credentials
AWS_EKS_NAME: tip-wlan-main
AWS_DEFAULT_OUTPUT: json
AWS_DEFAULT_REGION: ap-south-1
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CLIENT_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CLIENT_KEY }}
# Cloud SDK certs
CACERT: ${{ secrets.CACERT }}
CAKEY: ${{ secrets.CAKEY }}
ALLURE_CLI_VERSION: 2.25.0
on:
workflow_dispatch:
inputs:
openwifi_revision:
required: true
default: "main"
description: "revision of the Open Wifi Helm chart"
ap_models:
required: true
default: "cig_wf186h,edgecore_eap104,cig_wf196,udaya_a6-id2,yuncore_ax820,edgecore_eap102,yuncore_fap655,cig_wf189,edgecore_eap105,sercomm_ap72tip,edgecore_eap101,edgecore_eap111"
description: "the AP models to test"
ap_version:
required: true
default: "next-latest"
description: "revision of firmware to flash on AP, <branch>-<commit>"
marker_expressions:
required: true
default: "dfs_tests,multi_psk_tests,rate_limiting_tests,rate_limiting_with_radius_tests,dynamic_vlan_tests,multi_vlan_tests,strict_forwarding_tests,advanced_captive_portal_tests,firmware_upgrade_downgrade,asb_tests"
description: "Marker expressions to select tests to execute"
additional_args:
default: ""
description: "additional arguments that will be passed to the pytest execution"
required: false
existing_controller:
required: true
default: "qa01"
description: "Use an existing cloud controller, e.g. like qa01 instead of dynamic one"
tests_release:
required: false
default: ""
description: "Tests release branch to use (i.e. 'release/v2.8.0' or 'master'). If left empty, latest release branch is used"
schedule:
- cron: "0 12 * * 0,5,6"
jobs:
# Set vars
vars:
runs-on: ubuntu-latest
outputs:
openwifi_revision: ${{ steps.vars.outputs.openwifi}}
ap_models: ${{ steps.vars.outputs.ap_models}}
ap_version: ${{ steps.vars.outputs.ap_version}}
marker_expressions: ${{ steps.vars.outputs.marker_expressions }}
additional_arguments: ${{ steps.vars.outputs.additional_arguments }}
existing_controller: ${{ steps.vars.outputs.existing_controller }}
tests_release: ${{ steps.vars.outputs.tests_release }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: set variables
id: vars
run: |
echo "openwifi=$(echo ${{ github.event.inputs.openwifi_revision || 'main' }})" >> $GITHUB_OUTPUT
echo "ap_version=$(echo ${{ github.event.inputs.ap_version || 'next-latest' }})" >> $GITHUB_OUTPUT
echo "existing_controller=$(echo ${{ github.event.inputs.existing_controller || 'qa01' }})" >> $GITHUB_OUTPUT
MARKER_EXPRESSIONS="${{ github.event.inputs.marker_expressions || 'dfs_tests,multi_psk_tests,rate_limiting_tests,rate_limiting_with_radius_tests,dynamic_vlan_tests,multi_vlan_tests,strict_forwarding_tests,advanced_captive_portal_tests,firmware_upgrade_downgrade,asb_tests' }}"
MARKER_EXPRESSIONS=$(echo $MARKER_EXPRESSIONS | sed "s/,/\",\"/g" | sed 's/^/[\"/g' | sed 's/$/\"]/g')
echo "additional_arguments=$(echo ${{ github.event.inputs.additional_arguments || ''}})" >> $GITHUB_OUTPUT
cat >> $GITHUB_OUTPUT << EOF
marker_expressions=${MARKER_EXPRESSIONS}
EOF
# Schedule runs in two slots. slot 1 - 11 hours UTC
DOW=$(date +%u)
TOD=$(date +%H)
SLOT=$(( 10#$TOD % 24)) # 11 - slot 1
echo "Current date is $(date --iso-8601=s)"
echo "Current date of week is $DOW which is $(( 10#$DOW % 7 )) in cron notation"
echo "Current time is $TOD hours, which is $(( 10#$TOD % 24)) in cron notation"
echo "Current slot is $SLOT"
# choose AP models as per the day if the scheduled run gets triggered else input provided.
if [[ "$DOW" -eq "5" ]]; then
AP_MODELS="${{ github.event.inputs.ap_models || 'cig_wf186h,cig_wf196,cig_wf189,edgecore_eap105' }}"
elif [[ "$DOW" -eq "6" ]]; then
AP_MODELS="${{ github.event.inputs.ap_models || 'edgecore_eap104,yuncore_ax820,sercomm_ap72tip,edgecore_eap111' }}"
elif [[ "$DOW" -eq "7" ]]; then
AP_MODELS="${{ github.event.inputs.ap_models || 'udaya_a6-id2,edgecore_eap102,yuncore_fap655,edgecore_eap101' }}"
else
AP_MODELS="${{ github.event.inputs.ap_models || 'cig_wf186h,cig_wf196,yuncore_ax820,yuncore_fap655,sercomm_ap72tip,edgecore_eap101' }}"
fi
AP_MODELS=$(echo $AP_MODELS | sed "s/,/\",\"/g" | sed 's/^/[\"/g' | sed 's/$/\"]/g')
cat >> $GITHUB_OUTPUT << EOF
ap_models=${AP_MODELS}
EOF
LATEST_TESTS_RELEASE=$(git branch -r | grep 'release/v' | sed 's!\s*origin/!!' | tail -1)
if [[ -z "${{ github.event.inputs.tests_release }}" ]]; then
echo "Tests release was not passed, using branch $LATEST_TESTS_RELEASE"
echo "tests_release=$LATEST_TESTS_RELEASE" >> $GITHUB_OUTPUT
else
echo "Tests release was passed - ${{ github.event.inputs.tests_release }}"
echo "tests_release=${{ github.event.inputs.tests_release }}" >> $GITHUB_OUTPUT
fi
# Build test image
build:
runs-on: ubuntu-latest
needs: ["vars"]
steps:
- uses: actions/checkout@v3
with:
ref: ${{ needs.vars.outputs.tests_release }}
- name: build and push Docker image
uses: ./.github/actions/build-and-push-docker
with:
registry: tip-tip-wlan-cloud-docker-repo.jfrog.io
registry_user: ${{ secrets.DOCKER_USER_NAME }}
registry_password: ${{ secrets.DOCKER_USER_PASSWORD }}
# Run tests on APs
test-edgecore-eap102:
needs: ["vars", "build", "test-yuncore-ax820"]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap102')"
env:
AP_MODEL: edgecore_eap102
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-3a
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-3a
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-3a
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-3a
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-3a
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-3a
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-3a
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-3a
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-3a
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-3a
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-cig-wf186h:
needs: [ "vars", "build"]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'cig_wf186h')"
env:
AP_MODEL: cig_wf186h
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-1
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-1
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-1
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-1
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-1
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-1
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-1
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-1
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-1
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-1
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-edgecore-eap101:
needs: ["vars", "build"]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap101')"
env:
AP_MODEL: edgecore_eap101
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-6
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-6
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-6
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-6
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-6
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-6
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-6
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-6
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-6
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-6
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-yuncore-fap655:
needs: ["vars", "build"]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'yuncore_fap655')"
env:
AP_MODEL: yuncore_fap655
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-4
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-4
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-4
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-4
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-4
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-4
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-4
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-4
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-4
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-4
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-cig-wf189:
needs: ["vars", "build", "test-yuncore-fap655"]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'cig_wf189')"
env:
AP_MODEL: cig_wf189
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-4a
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-4a
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-4a
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-4a
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-4a
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-4a
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-4a
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-4a
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-4a
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-4a
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-edgecore-eap104:
needs: ["vars", "build", "test-cig-wf186h"]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap104')"
env:
AP_MODEL: edgecore_eap104
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-1a
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-1a
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-1a
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-1a
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-1a
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-1a
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-1a
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-1a
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-1a
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-1a
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-yuncore-ax820:
needs: ["vars", "build"]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'yuncore_ax820')"
env:
AP_MODEL: yuncore_ax820
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-3
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-3
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-3
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-3
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-3
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-3
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-3
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-3
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-3
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-3
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-cig-wf196:
needs: [ "vars", "build" ]
runs-on: [self-hosted, small]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'cig_wf196')"
env:
AP_MODEL: cig_wf196
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-2
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-2
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-2
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-2
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-2
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-2
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-2
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-2
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-2
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-2
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-edgecore-eap111:
needs: [ "vars", "build", "test-edgecore-eap101" ]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap111')"
env:
AP_MODEL: edgecore_eap111
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-6a
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-6a
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-6a
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-6a
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-6a
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-6a
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-6a
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-6a
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-6a
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-6a
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-udaya-a6-id2:
needs: [ "vars", "build", "test-cig-wf196" ]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'udaya_a6-id2')"
env:
AP_MODEL: udaya_a6-id2
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-2a
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-2a
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-2a
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-2a
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-2a
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-2a
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-2a
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-2a
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-2a
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-2a
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-edgecore-eap105:
needs: [ "vars", "build"]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap105')"
env:
AP_MODEL: edgecore_eap105
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-5
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-5
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-5
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-5
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-5
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-5
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-5
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-5
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-5
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-5
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-sercomm-ap72tip:
needs: [ "vars", "build", "test-edgecore-eap105"]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'sercomm_ap72tip')"
env:
AP_MODEL: sercomm_ap72tip
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-5a
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-5a
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-5a
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-5a
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-5a
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-5a
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-5a
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-5a
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fwd
testbed: basic-5a
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-fwd --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-fwd $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-fwd
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-5a
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: "-o firmware=${{ needs.vars.outputs.ap_version }} ${{ needs.vars.outputs.additional_arguments }}"
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
# Save reports
report:
if: "!cancelled()"
runs-on: ubuntu-latest
needs: [vars, test-cig-wf186h, test-edgecore-eap104, test-cig-wf196, test-udaya-a6-id2, test-yuncore-ax820, test-edgecore-eap102, test-yuncore-fap655, test-cig-wf189, test-edgecore-eap105, test-sercomm-ap72tip, test-edgecore-eap101, test-edgecore-eap111]
strategy:
fail-fast: false
matrix:
model: ${{ fromJson( needs.vars.outputs.ap_models ) }}
marker: ${{ fromJson( needs.vars.outputs.marker_expressions )}}
steps:
- name: checkout testing repo
uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: allure-results-${{ matrix.model }}-${{ matrix.marker }}
path: allure-results
- name: download history of previous run
continue-on-error: true
run: |
LAST_RUN_ID=$(aws s3api head-object --bucket openwifi-allure-reports --key regression/${{ matrix.marker }}/${{ matrix.model }}/latest/index.html | jq -r .Metadata.latest)
aws s3 cp --recursive s3://openwifi-allure-reports/regression/${{ matrix.marker }}/${{ matrix.model }}/$LAST_RUN_ID/history history
- name: generate Allure report
uses: ./.github/actions/generate-allure-report
with:
results_path: ./allure-results
history_path: ./history
additional_metadata: |
Ap.Model=${{ matrix.model }}
- name: upload Allure report as artifact
uses: actions/upload-artifact@v3
with:
name: allure-report-${{ matrix.model }}-${{ matrix.marker }}
path: allure-report
# doing this to be able to aggregate multiple reports together later on
- name: copy results into report
run: cp -r allure-results allure-report/results
- name: upload to S3
# TODO uncomment this before merging
#if: github.ref == 'refs/heads/master'
uses: ./.github/actions/allure-report-to-s3
with:
test_type: regression/${{ matrix.marker }}
testbed: ${{ matrix.model }}
report_path: allure-report
s3_access_key_id: ${{ secrets.ALLURE_S3_ACCESS_KEY_ID }}
s3_access_key_secret: ${{ secrets.ALLURE_S3_ACCESS_KEY_SECRET }}
# Cleanup
cleanup:
needs: [test-cig-wf186h, test-edgecore-eap104, test-cig-wf196, test-udaya-a6-id2, test-yuncore-ax820, test-edgecore-eap102, test-yuncore-fap655, test-cig-wf189, test-edgecore-eap105, test-sercomm-ap72tip, test-edgecore-eap101, test-edgecore-eap111]
runs-on: ubuntu-latest
if: always()
steps:
- uses: actions/checkout@v3
- name: cleanup Docker image
uses: ./.github/actions/cleanup-docker
with:
registry_user: ${{ secrets.DOCKER_USER_NAME }}
registry_password: ${{ secrets.DOCKER_USER_PASSWORD }}