diff --git a/.github/workflows/benchmark.yaml b/.github/workflows/benchmark.yaml index e7243f2..6631e1b 100644 --- a/.github/workflows/benchmark.yaml +++ b/.github/workflows/benchmark.yaml @@ -6,14 +6,14 @@ on: pull_request: branches: - main + # `workflow_dispatch` allows CodSpeed to trigger backtest + # performance analysis in order to generate initial data. + workflow_dispatch: jobs: benchmark: runs-on: ubuntu-latest - strategy: - matrix: - django-version: ["4.2", "5.0"] - name: Benchmark (Django ${{ matrix.django-version }}) + name: Benchmark steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -21,4 +21,8 @@ jobs: python-version: 3.12 cache: "pip" - run: make ci - - run: make benchmark >> $GITHUB_STEP_SUMMARY 2>&1 + - name: Run benchmarks + uses: CodSpeedHQ/action@v3 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: pytest tests/ --codspeed diff --git a/.gitignore b/.gitignore index 25e4845..6cc1dc5 100644 --- a/.gitignore +++ b/.gitignore @@ -162,3 +162,4 @@ cython_debug/ #.idea/ .ruff_cache/ +.codspeed/ \ No newline at end of file diff --git a/Makefile b/Makefile index 261d78f..ea29b8b 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ test: pytest -s --tb=native --random-order -m "not benchmark" $(ARGS) benchmark: - @pytest -s --quiet -m benchmark $(ARGS) > /dev/null + pytest -s $(ARGS) --codspeed format-check: ruff format --check && ruff check diff --git a/pyproject.toml b/pyproject.toml index f9483de..91ada12 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,4 @@ DJANGO_SETTINGS_MODULE = "djangoproject.settings" pythonpath = ["src", "tests"] testpaths = ["tests"] addopts = "--nomigrations" -markers = [ - "nozeal: disable the auto-setup of zeal in a test", - "benchmark: benchmarks the performance of zeal, not correctness", -] +markers = ["nozeal: disable the auto-setup of zeal in a test"] diff --git a/requirements-dev.in b/requirements-dev.in index 4a4689e..7f4c675 100644 --- a/requirements-dev.in +++ b/requirements-dev.in @@ -9,3 +9,4 @@ build twine pytest-random-order pytest-mock +pytest-codspeed diff --git a/requirements-dev.txt b/requirements-dev.txt index 35fb2ee..ec04c97 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,10 +4,14 @@ asgiref==3.8.1 # via # django # django-stubs +backports-tarfile==1.2.0 + # via jaraco-context build==1.2.1 # via -r requirements-dev.in certifi==2024.6.2 # via requests +cffi==1.17.1 + # via pytest-codspeed charset-normalizer==3.3.2 # via requests django==4.2.13 @@ -21,14 +25,22 @@ django-stubs-ext==5.0.2 # via django-stubs docutils==0.21.2 # via readme-renderer +exceptiongroup==1.2.2 + # via pytest factory-boy==3.3.0 # via -r requirements-dev.in faker==26.0.0 # via factory-boy +filelock==3.16.1 + # via pytest-codspeed idna==3.7 # via requests -importlib-metadata==8.0.0 - # via twine +importlib-metadata==8.5.0 + # via + # build + # keyring + # pytest-codspeed + # twine iniconfig==2.0.0 # via pytest jaraco-classes==3.4.0 @@ -59,6 +71,8 @@ pkginfo==1.10.0 # via twine pluggy==1.5.0 # via pytest +pycparser==2.22 + # via cffi pygments==2.18.0 # via # readme-renderer @@ -70,9 +84,12 @@ pyright==1.1.369 pytest==8.2.2 # via # -r requirements-dev.in + # pytest-codspeed # pytest-django # pytest-mock # pytest-random-order +pytest-codspeed==3.0.0 + # via -r requirements-dev.in pytest-django==4.8.0 # via -r requirements-dev.in pytest-mock==3.14.0 @@ -91,25 +108,34 @@ requests-toolbelt==1.0.0 # via twine rfc3986==2.0.0 # via twine -rich==13.7.1 - # via twine +rich==13.9.4 + # via + # pytest-codspeed + # twine ruff==0.5.0 # via -r requirements-dev.in six==1.16.0 # via python-dateutil sqlparse==0.5.0 # via django +tomli==2.1.0 + # via + # build + # django-stubs + # pytest twine==5.1.1 # via -r requirements-dev.in types-pyyaml==6.0.12.20240311 # via django-stubs typing-extensions==4.12.2 # via + # asgiref # django-stubs # django-stubs-ext + # rich urllib3==2.2.2 # via # requests # twine -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata diff --git a/tests/test_performance.py b/tests/test_performance.py index b10d7a7..71ea6a1 100644 --- a/tests/test_performance.py +++ b/tests/test_performance.py @@ -1,45 +1,14 @@ -import sys -import time - import pytest -from django.db import connection -from django.test.utils import CaptureQueriesContext from djangoproject.social.models import Post, Profile, User from zeal import zeal_context, zeal_ignore from .factories import PostFactory, ProfileFactory, UserFactory -pytestmark = [pytest.mark.benchmark, pytest.mark.nozeal, pytest.mark.django_db] - - -def _run_benchmark(): - # Test forward & reverse many-to-one relationships (Post -> User, User -> Posts) - posts = Post.objects.all() - for post in posts: - _ = post.author.username # forward many-to-one - _ = list(post.author.posts.all()) # reverse many-to-one - - # Test forward & reverse one-to-one relationships (Profile -> User, User -> Profile) - profiles = Profile.objects.all() - for profile in profiles: - _ = profile.user.username # forward one-to-one - _ = profile.user.profile.display_name # reverse one-to-one - - # Test forward & reverse many-to-many relationships - users = User.objects.all() - for user in users: - _ = list(user.following.all()) # forward many-to-many - _ = list(user.followers.all()) # reverse many-to-many - _ = list(user.blocked.all()) # many-to-many without related_name +pytestmark = [pytest.mark.nozeal, pytest.mark.django_db] - # Test chained relationships - for follower in user.followers.all(): - _ = follower.profile.display_name - _ = list(follower.posts.all()) - -def test_performance(): - users = UserFactory.create_batch(50) +def test_performance(benchmark): + users = UserFactory.create_batch(10) # everyone follows everyone user_following_relations = [] @@ -62,39 +31,34 @@ def test_performance(): for user in users: PostFactory.create_batch(10, author=user) - sys.stderr.write("# Benchmark\n") - sys.stderr.flush() - - with CaptureQueriesContext(connection) as ctx: - start_time = time.monotonic() - _run_benchmark() - duration_no_zeal = time.monotonic() - start_time - num_queries_no_zeal = len(ctx.captured_queries) - # write to stderr so we can suppress pytest's output - sys.stderr.write( - f"Without zeal: executed {num_queries_no_zeal} queries in {duration_no_zeal:.2f} seconds\n" - ) - sys.stderr.flush() - - connection.queries_log.clear() - with ( - zeal_context(), - zeal_ignore(), - CaptureQueriesContext(connection) as ctx, - ): - start_time = time.monotonic() - _run_benchmark() - duration_with_zeal = time.monotonic() - start_time - num_queries_with_zeal = len(ctx.captured_queries) - sys.stderr.write( - f"With zeal: executed {num_queries_with_zeal} queries in {duration_with_zeal:.2f} seconds\n" - ) - sys.stderr.flush() - - # if the number of queries is different, the benchmark is invalid - assert num_queries_no_zeal == num_queries_with_zeal - - sys.stderr.write( - f"**Zeal made the code {duration_with_zeal / duration_no_zeal:.2f} times slower**\n" - ) - sys.stderr.flush() + @benchmark + def _run_benchmark(): + with ( + zeal_context(), + zeal_ignore(), + ): + # Test forward & reverse many-to-one relationships (Post -> User, User -> Posts) + posts = Post.objects.all() + for post in posts: + _ = post.author.username # forward many-to-one + _ = list(post.author.posts.all()) # reverse many-to-one + + # Test forward & reverse one-to-one relationships (Profile -> User, User -> Profile) + profiles = Profile.objects.all() + for profile in profiles: + _ = profile.user.username # forward one-to-one + _ = profile.user.profile.display_name # reverse one-to-one + + # Test forward & reverse many-to-many relationships + users = User.objects.all() + for user in users: + _ = list(user.following.all()) # forward many-to-many + _ = list(user.followers.all()) # reverse many-to-many + _ = list( + user.blocked.all() + ) # many-to-many without related_name + + # Test chained relationships + for follower in user.followers.all(): + _ = follower.profile.display_name + _ = list(follower.posts.all())