Skip to content

Wild Time Benchmarks #1321

Wild Time Benchmarks

Wild Time Benchmarks #1321

Workflow file for this run

name: linting-and-tests
on: [pull_request]
defaults:
run:
shell: bash
jobs:
flake8:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Conda
uses: ./.github/actions/conda
- name: Flake8
run: |
conda run -n modyn flake8 --version
conda run -n modyn flake8 modyn --statistics
mypy-typechecking:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Conda
uses: ./.github/actions/conda
- name: Mypy
run: |
conda run -n modyn mypy --version
conda run -n modyn mypy modyn
pylint:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Conda
uses: ./.github/actions/conda
- name: Pylint
run: |
conda run -n modyn pylint --version
conda run -n modyn pylint modyn
isort:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Conda
uses: ./.github/actions/conda
- name: Isort
run: |
conda run -n modyn isort --version
conda run -n modyn isort modyn --check --diff
conda run -n modyn isort integrationtests --check --diff
conda run -n modyn isort benchmark --check --diff
black:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Conda
uses: ./.github/actions/conda
- name: Black
run: |
conda run -n modyn black --version
conda run -n modyn black --check modyn --verbose --config black.toml
unittests:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Conda
uses: ./.github/actions/conda
- name: Pytest
run: |
conda run -n modyn pytest modyn --cov-reset --cache-clear --cov-fail-under=90
conda run -n modyn pytest > pytest-coverage.txt
- name: Comment coverage
uses: coroo/pytest-coverage-commentator@v1.0.2
### Integration Tests ###
# We have them in the same workflow because it's impossible to have a simple "if workflow A runs through completely, then workflow B should run" pipeline on Github currently
# Checks whether the base container works correctly.
dockerized-unittests:
timeout-minutes: 60
runs-on: ubuntu-latest
needs:
- flake8
- mypy-typechecking
- pylint
- unittests
- isort
- black
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup base container
uses: ./.github/actions/base
- name: Setup dev-requirements and run pytest within container
run: docker run modynbase conda run -n modyn bash -c "pip install -r dev-requirements.txt && echo Running pytest && pytest"
# Tests whether docker-compose up starts all components successfully and integration tests run through
# Only one job to reduce Github CI usage
integrationtests:
timeout-minutes: 60
runs-on: ubuntu-latest
needs:
- flake8
- mypy-typechecking
- pylint
- unittests
- isort
- black
- dockerized-unittests
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Start docker compose and exit when tests run through
run: bash run_integrationtests.sh