Skip to content

Weekly

Weekly #45

Workflow file for this run

# The name is short because we mostly care how it appears in the pull request
# "checks" dialogue box - it looks like
# Tests / ubuntu-latest, python-3.9, defaults
# or similar.
name: Weekly
on:
schedule:
- cron: "0 12 * * 0" # Every Sunday at noon UTC
workflow_dispatch:
inputs:
open_issue:
description: 'Open issue on failure'
default: "False"
defaults:
run:
# The slightly odd shell call is to force bash to read .bashrc, which is
# necessary for having conda behave sensibly. We use bash as the shell even
# on Windows, since we don't run anything much complicated, and it makes
# things much simpler.
shell: bash -l {0}
jobs:
cases:
if: github.repository == 'qutip/qutip-jax'
name: ${{ matrix.os }}, ${{ matrix.case-name }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: [3.11]
case-name: [defaults]
steps:
- uses: actions/checkout@v4
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
- name: Install QuTiP from GitHub
run: |
python -mpip install git+https://github.com/qutip/qutip.git@master
python -c 'import qutip; qutip.about()'
- name: Install qutip-jax and dependencies
# Install in editable mode so Coveralls detects the tree structure
# relative to the git repository root as well.
run: |
python -m pip install jax jax[cpu]
python -m pip install -e .[full]
- name: Package information
run: |
conda list
python -c 'import qutip_jax; print(qutip_jax.__version__)'
- name: Run tests
# If our tests are running for longer than an hour, _something_ is wrong
# somewhere. The GitHub default is 6 hours, which is a bit long to wait
# to see if something hung.
timeout-minutes: 60
run: |
pytest --durations=0 --durations-min=1.0 --verbosity=1 --color=yes
# Above flags are:
# --durations=0 --durations-min=1.0
# at the end, show a list of all the tests that took longer than a
# second to run
# --verbosity=1
# turn the verbosity up so pytest prints the names of the tests
# it's currently working on
# --color=yes
# force coloured output in the terminal
# These flags are added to those in pyproject.toml.
finalise:
needs: cases
if: failure()
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Open Issue on Failure
env:
GITHUB_TOKEN: ${{ github.token }}
run: |
if [[ -z "${{ inputs.open_issue }}" ]] || [[ "${{ inputs.open_issue }}" != "False" ]];
then
pip install requests
python tools/report_failing_tests.py $GITHUB_TOKEN
fi