Skip to content

Upgrade dev dependencies. (#321) #165

Upgrade dev dependencies. (#321)

Upgrade dev dependencies. (#321) #165

Workflow file for this run

name: benchmark
"on":
push:
branches:
- main
- dev
- deps
pull_request:
branches:
- main
workflow_dispatch:
inputs:
ref:
description: (optional) ref to benchmark
env:
FORCE_COLOR: "1"
PYTHONHASHSEED: "42"
BASELINE_URL: https://github.com/jab/bidict/releases/download/microbenchmarks/GHA-linux-cachegrind-x86_64-CPython-3.12.2-baseline.json
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- name: install valgrind
uses: awalsh128/cache-apt-pkgs-action@a6c3917cc929dd0345bfb2d3feaf9101823370ad
with:
packages: valgrind
- name: check out source
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332
with:
fetch-depth: 0
- name: set up Python
uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
# Pin to micro-release for better reproducibility.
# When upgrading to a new Python version, remember to upload new associated baseline
# benchmark results here: https://github.com/jab/bidict/releases/edit/microbenchmarks
python-version: '3.12.2'
- name: set up cached uv
uses: hynek/setup-cached-uv@4b4bfa932036976749a9653b0fa4fa10b1a7092b
- name: create and activate virtualenv
run: |
uv venv
source .venv/bin/activate
echo PATH="$PATH" >> "$GITHUB_ENV"
- name: uv pip install
run: uv pip install -r dev-deps/test.txt
- name: install the version of bidict to benchmark
run: |
git checkout ${{ github.event.inputs.ref || github.sha }}
uv pip install --no-deps .
# restore the current revision so we use its version of tests/microbenchmarks.py
git checkout ${{ github.sha }}
# move aside the 'bidict' subdirectory to make sure we always import the installed version
mv -v bidict src
- name: download baseline benchmark results
run: |
curl -L -s -o baseline.json "$BASELINE_URL"
line1=$(head -n1 baseline.json)
[ "$line1" = "{" ]
- name: benchmark and compare to baseline
run: |
./cachegrind.py pytest -n0 \
--benchmark-autosave \
--benchmark-columns=min,rounds,iterations \
--benchmark-disable-gc \
--benchmark-group-by=name \
--benchmark-compare=baseline.json \
tests/microbenchmarks.py
- name: archive benchmark results
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808
with:
name: microbenchmark results
path: .benchmarks
if-no-files-found: error
permissions:
contents: read