Skip to content

add test coverage #5313

add test coverage

add test coverage #5313

name: Run Tests
on:
push:
# This should disable running the workflow on tags, according to the
# on.<push|pull_request>.<branches|tags> GitHub Actions docs.
branches:
- "*"
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
tests:
name: Run Tests
env:
PYTHON: ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
defaults:
run:
# Adding -l {0} helps ensure conda can be found properly in windows.
shell: bash -l {0}
strategy:
fail-fast: false
matrix:
os: [macos-latest, windows-latest]
python-version: ["3.10", "3.11", "3.12"]
include:
- env_name: pyuvdata_tests_windows
os: windows-latest
- env_name: pyuvdata_tests_mac_arm
os: macos-latest
- env_name: pyuvdata_tests
os: macos-13
python-version: "3.12"
steps:
- uses: actions/checkout@main
with:
fetch-depth: 0
- name: Setup Miniforge
uses: conda-incubator/setup-miniconda@v3
with:
miniforge-version: latest
python-version: ${{ matrix.python-version }}
environment-file: ci/${{ matrix.env_name }}.yml
activate-environment: ${{ matrix.env_name }}
run-post: false
- name: Conda Info
run: |
conda info -a
conda list
PYVER=`python -c "import sys; print('{:d}.{:d}'.format(sys.version_info.major, sys.version_info.minor))"`
if [[ $PYVER != $PYTHON ]]; then
exit 1;
fi
- name: Install
run: |
CFLAGS="-DCYTHON_TRACE=1 -DCYTHON_TRACE_NOGIL=1" pip install .
- name: Run Tests
run: |
python -m pytest -n auto --dist=loadfile --cov=pyuvdata --cov-config=.coveragerc --cov-report xml:./coverage.xml
- uses: codecov/codecov-action@v4
if: success()
with:
token: ${{secrets.CODECOV_TOKEN}} #required
file: ./coverage.xml #optional
benchmark:
name: Performance Benchmark
needs: tests
env:
PYTHON: ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
defaults:
run:
# Adding -l {0} helps ensure conda can be found properly in windows.
shell: bash -l {0}
strategy:
fail-fast: false
matrix:
os: [macos-latest, windows-latest, ubuntu-latest]
python-version: ["3.12"]
include:
- env_name: pyuvdata_tests_windows
os: windows-latest
- env_name: pyuvdata_tests_mac_arm
os: macos-latest
- env_name: pyuvdata_tests
os: macos-13
python-version: "3.12"
- env_name: pyuvdata_tests
os: ubuntu-latest
steps:
- uses: actions/checkout@main
with:
fetch-depth: 0
- name: Setup Minimamba
uses: conda-incubator/setup-miniconda@v3
with:
miniforge-variant: Mambaforge
miniforge-version: latest
use-mamba: true
python-version: ${{ matrix.python-version }}
environment-file: ci/${{ matrix.env_name }}.yml
activate-environment: ${{ matrix.env_name }}
run-post: false
- name: Conda Info
run: |
conda info -a
conda list
PYVER=`python -c "import sys; print('{:d}.{:d}'.format(sys.version_info.major, sys.version_info.minor))"`
if [[ $PYVER != $PYTHON ]]; then
exit 1;
fi
# also install benchmark utility
- name: Install
run: |
pip install pytest-benchmark
pip install .
- name: Run benchmark
run: |
pytest --benchmark-only --benchmark-json output.json
# Download previous benchmark result from cache (if exists)
- name: Download previous benchmark data
uses: actions/cache/restore@v4
with:
path: ./cache
key: ${{ matrix.os }}-benchmark
# Run `github-action-benchmark` action
# this step also EDITS the ./cache/benchmark-data.json file
# We do not need to add output.json to the cache directory
- name: Compare benchmarks
uses: benchmark-action/github-action-benchmark@v1
with:
# What benchmark tool the output.txt came from
tool: 'pytest'
# Where the output from the benchmark tool is stored
output-file-path: output.json
# Where the previous data file is stored
external-data-json-path: ./cache/benchmark-data.json
# Workflow will fail when an alert happens
fail-on-alert: true
# Comment on the PR if the branch is not a fork
comment-on-alert: true
# Enable Job Summary for PRs
summary-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Store benchmark results
uses: actions/cache/save@v4
# only store the cache if being run on main
if: github.ref == 'refs/heads/main'
with:
path: ./cache
key: ${{ matrix.os }}-benchmark