diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000..aa8353b68 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,62 @@ +name: Benchmark +env: + PYTHONHASHSEED: "0" + +on: + push: + branches: + - main + pull_request: + branches: + - main + - epic/* + workflow_dispatch: + inputs: + specific-pip-packages: + description: Run benchmarks with specific pip packages + required: false + type: string + +jobs: + benchmark: + name: Performance regression + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: ComPWA/actions/pip-install@v1 + with: + editable: "yes" + extras: test,all + python-version: "3.9" + specific-packages: ${{ inputs.specific-pip-packages }} + - name: Run pytest-benchmark + run: | + pytest \ + -k benchmark \ + --benchmark-json output.json \ + --durations=0 + working-directory: benchmarks + - name: Store result + if: github.event_name == 'push' + uses: benchmark-action/github-action-benchmark@v1 + with: + name: AmpForm benchmark results + tool: pytest + output-file-path: benchmarks/output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + gh-pages-branch: benchmark-results + benchmark-data-dir-path: "" + auto-push: true + - name: Warn on performance decrease + if: github.event_name == 'pull_request' + uses: benchmark-action/github-action-benchmark@v1 + with: + name: AmpForm benchmark results + tool: pytest + output-file-path: benchmarks/output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + gh-pages-branch: benchmark-results + benchmark-data-dir-path: "" + auto-push: false + comment-on-alert: true + fail-on-alert: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2ed597629..06f6d6958 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -89,6 +89,10 @@ repos: - id: name-tests-test name: Tests should start with test_ args: ["--django"] + exclude: > + (?x)^( + benchmarks/.* + )$ - id: trailing-whitespace - repo: https://github.com/pre-commit/mirrors-prettier diff --git a/.vscode/settings.json b/.vscode/settings.json index b15050939..8b3295360 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -73,6 +73,7 @@ "ruff.importStrategy": "fromEnvironment", "ruff.organizeImports": true, "search.exclude": { + "**/benchmarks/**/__init__.py": true, "**/tests/**/__init__.py": true, ".constraints/*.txt": true, "typings/**": true diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/benchmarks/conftest.py b/benchmarks/conftest.py new file mode 100644 index 000000000..60f4a55b0 --- /dev/null +++ b/benchmarks/conftest.py @@ -0,0 +1,6 @@ +from _pytest.config import Config + + +def pytest_configure(config: Config): + # cspell:ignore addinivalue + config.addinivalue_line("python_files", "*.py") diff --git a/benchmarks/doit_speed.py b/benchmarks/doit_speed.py new file mode 100644 index 000000000..7043ab77f --- /dev/null +++ b/benchmarks/doit_speed.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import qrules + +import ampform +from ampform.dynamics.builder import create_relativistic_breit_wigner_with_ff + +if TYPE_CHECKING: + import sympy as sp + from pytest_benchmark.fixture import BenchmarkFixture + + +@pytest.mark.benchmark(group="doit", min_rounds=1) +def test_doit_speed(benchmark: BenchmarkFixture): + reaction = qrules.generate_transitions( + initial_state=("psi(4160)", [-1, +1]), + final_state=["D-", "D0", "pi+"], + allowed_intermediate_particles=["D*(2007)0"], + formalism="canonical-helicity", + ) + builder = ampform.get_builder(reaction) + for particle in reaction.get_intermediate_particles(): + builder.dynamics.assign(particle.name, create_relativistic_breit_wigner_with_ff) + model = builder.formulate() + + intensity_expr = benchmark(_perform_doit, model.expression) + undefined_symbols = intensity_expr.free_symbols + undefined_symbols -= set(model.parameter_defaults) + undefined_symbols -= set(model.kinematic_variables) + assert not undefined_symbols + + +def _perform_doit(expr: sp.Expr): + return expr.doit() diff --git a/docs/index.md b/docs/index.md index cc4ffa7ab..e98546682 100644 --- a/docs/index.md +++ b/docs/index.md @@ -77,6 +77,7 @@ hidden: maxdepth: 2 --- API +Continuous benchmarks Changelog Upcoming features Help developing diff --git a/pyproject.toml b/pyproject.toml index 247afa9a9..3315918e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -111,6 +111,7 @@ test = [ "nbmake", "numpy", "pytest", + "pytest-benchmark", "pytest-cov", "pytest-profiling", "pytest-xdist", @@ -120,6 +121,7 @@ types = [ "ipywidgets", "numpy", "pytest", + "pytest-benchmark", "sphinx-api-relink >=0.0.3", ] viz = ["graphviz"] @@ -164,17 +166,21 @@ module = ["graphviz.*"] [[tool.mypy.overrides]] ignore_missing_imports = true -module = ["scipy.*"] +module = ["pytest_benchmark.*"] [[tool.mypy.overrides]] ignore_missing_imports = true module = ["ipywidgets.*"] +[[tool.mypy.overrides]] +ignore_missing_imports = true +module = ["scipy.*"] + [[tool.mypy.overrides]] check_untyped_defs = true disallow_incomplete_defs = false disallow_untyped_defs = false -module = ["tests.*"] +module = ["benchmarks.*", "tests.*"] [[tool.mypy.overrides]] ignore_errors = true @@ -253,6 +259,7 @@ norecursedirs = [ "_build", ] testpaths = [ + "benchmarks", "src", "tests", ] @@ -359,6 +366,10 @@ split-on-trailing-comma = false "T20", "TCH00", ] +"benchmarks/*" = [ + "D", + "S101", +] "docs/*" = [ "E402", "INP001", diff --git a/tox.ini b/tox.ini index b90ce7ccb..6eb9f1824 100644 --- a/tox.ini +++ b/tox.ini @@ -17,6 +17,17 @@ description = setenv = PYTHONHASHSEED = 0 +[testenv:bench] +allowlist_externals = + pytest +commands = + pytest {posargs:benchmarks} \ + --durations=0 \ + --benchmark-autosave \ + -k benchmark +description = + Run benchmark tests and visualize in histogram + [testenv:cov] allowlist_externals = pytest