Skip to content

use setup-cached-uv in CI #157

use setup-cached-uv in CI

use setup-cached-uv in CI #157

Workflow file for this run

name: benchmark
"on":
push:
branches:
- main
- dev
- deps
pull_request:
branches:
- main
workflow_dispatch:
inputs:
ref:
description: (optional) ref to benchmark
env:
FORCE_COLOR: "1"
PYTHONHASHSEED: "42"
BASELINE_URL: https://github.com/jab/bidict/releases/download/microbenchmarks/GHA-linux-cachegrind-x86_64-CPython-3.12.2-baseline.json
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- name: install valgrind
uses: awalsh128/cache-apt-pkgs-action@a6c3917cc929dd0345bfb2d3feaf9101823370ad
with:
packages: valgrind
- name: check out source
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332
with:
fetch-depth: 0
- name: set up Python
uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
# Pin to micro-release for better reproducibility.
# When upgrading to a new Python version, remember to upload new associated baseline
# benchmark results here: https://github.com/jab/bidict/releases/edit/microbenchmarks
python-version: '3.12.2'
cache: pip
cache-dependency-path: dev-deps/test.txt
- uses: hynek/setup-cached-uv@4b4bfa932036976749a9653b0fa4fa10b1a7092b
- name: install PyPI dependencies
run: uv venv && uv pip install -r dev-deps/test.txt
- name: install the version of bidict to benchmark
run: |
git checkout ${{ github.event.inputs.ref || github.sha }}
uv pip install --no-deps -e .
# restore the current revision so we use its version of tests/microbenchmarks.py
git checkout ${{ github.sha }}
# move aside the 'bidict' subdirectory to make sure we always import the installed version
mv -v bidict src
- name: download baseline benchmark results
run: |
curl -L -s -o baseline.json "$BASELINE_URL"
line1=$(head -n1 baseline.json)
[ "$line1" = "{" ]
- name: benchmark and compare to baseline
run: |
./cachegrind.py uv run pytest -n0 \
--benchmark-autosave \
--benchmark-columns=min,rounds,iterations \
--benchmark-disable-gc \
--benchmark-group-by=name \
--benchmark-compare=baseline.json \
tests/microbenchmarks.py
- name: archive benchmark results
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808
with:
name: microbenchmark results
path: .benchmarks
if-no-files-found: error
permissions:
contents: read