From 547fad12df93b8ca62880e7ac9f5c4822a8493d0 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Fri, 7 Feb 2025 11:54:33 +0100 Subject: [PATCH 01/20] test: add simple python benches --- tests/benchmarks/test_bench_misc.py | 72 +++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 tests/benchmarks/test_bench_misc.py diff --git a/tests/benchmarks/test_bench_misc.py b/tests/benchmarks/test_bench_misc.py new file mode 100644 index 0000000..f9fb2e4 --- /dev/null +++ b/tests/benchmarks/test_bench_misc.py @@ -0,0 +1,72 @@ +import pytest + + +def count_even_fast(arr): + """Count the number of even numbers in an array.""" + even = 0 + for x in arr: + if x % 2 == 0: + even += 1 + return even + + +def count_even_slow(arr): + """Count the number of even numbers in an array.""" + return sum(1 for x in arr if x % 2 == 0) + + +@pytest.mark.parametrize( + "func", + [ + count_even_fast, + count_even_slow, + ], +) +def test_count_even(func, benchmark): + assert benchmark(func, range(10_000)) == 5000 + + +def sum_of_squares_for_loop_product(arr) -> int: + total = 0 + for x in arr: + total += x * x + return total + + +def sum_of_squares_for_loop_power(arr) -> int: + total = 0 + for x in arr: + total += x**2 + return total + + +def sum_of_squares_sum_labmda_product(arr) -> int: + return sum(map(lambda x: x * x, arr)) # noqa: C417 + + +def sum_of_squares_sum_labmda_power(arr) -> int: + return sum(map(lambda x: x**2, arr)) # noqa: C417 + + +def sum_of_squares_sum_comprehension_product(arr) -> int: + return sum(x * x for x in arr) + + +def sum_of_squares_sum_comprehension_power(arr) -> int: + return sum(x**2 for x in arr) + + +@pytest.mark.parametrize( + "func", + [ + sum_of_squares_for_loop_product, + sum_of_squares_for_loop_power, + sum_of_squares_sum_labmda_product, + sum_of_squares_sum_labmda_power, + sum_of_squares_sum_comprehension_product, + sum_of_squares_sum_comprehension_power, + ], +) +@pytest.mark.benchmark +def test_sum_of_squares(func): + assert func(range(1000)) == 332833500 From 4f60a18e5426ae32c330b57b06895e44ac9a0998 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Fri, 7 Feb 2025 11:54:53 +0100 Subject: [PATCH 02/20] test: add benches from the documentation's getting started --- tests/benchmarks/test_bench_doc.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/benchmarks/test_bench_doc.py diff --git a/tests/benchmarks/test_bench_doc.py b/tests/benchmarks/test_bench_doc.py new file mode 100644 index 0000000..13da753 --- /dev/null +++ b/tests/benchmarks/test_bench_doc.py @@ -0,0 +1,24 @@ +"""Benches from the CodSpeed Getting Started Documentation.""" + +import pytest + + +def sum_of_squares_fast(arr) -> int: + total = 0 + for x in arr: + total += x * x + return total + + +def sum_of_squares_slow(arr) -> int: + return sum(map(lambda x: x**2, arr)) # noqa: C417 + + +@pytest.mark.benchmark +def test_sum_squares_fast(): + assert sum_of_squares_fast(range(1000)) == 332833500 + + +@pytest.mark.benchmark +def test_sum_squares_slow(): + assert sum_of_squares_slow(range(1000)) == 332833500 From c92304cdf2aa40e51b57942b18438f6c52f88ee3 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 11 Feb 2025 15:12:57 +0100 Subject: [PATCH 03/20] ci: update apt before installing packages --- .github/workflows/ci.yml | 4 +++- .github/workflows/codspeed.yml | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a2ec619..98d5da0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,7 +53,9 @@ jobs: python-version: "${{ matrix.python-version }}" - if: matrix.config == 'valgrind' || matrix.config == 'pytest-benchmark' name: Install valgrind - run: sudo apt-get install valgrind -y + run: | + sudo apt-get update + sudo apt-get install valgrind -y - name: Install dependencies with pytest${{ matrix.pytest-version }} run: | if [ "${{ matrix.config }}" == "valgrind" ]; then diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index 3519bc9..67fa10d 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -30,6 +30,7 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Install local version of pytest-codspeed run: | + sudo apt-get update sudo apt-get install valgrind -y pip install . sudo apt-get remove valgrind -y From c6f2831b3becb96cc35426c9e752b1c4e627ed79 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Tue, 13 May 2025 10:49:46 +0200 Subject: [PATCH 04/20] feat: add instrument-hooks native module --- .github/workflows/ci.yml | 4 + .gitmodules | 3 + setup.py | 8 +- .../instruments/hooks/__init__.py | 84 +++++++++++++++++++ .../instruments/hooks/build.py | 26 ++++++ .../instruments/hooks/instrument-hooks | 1 + 6 files changed, 121 insertions(+), 5 deletions(-) create mode 100644 src/pytest_codspeed/instruments/hooks/__init__.py create mode 100644 src/pytest_codspeed/instruments/hooks/build.py create mode 160000 src/pytest_codspeed/instruments/hooks/instrument-hooks diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98d5da0..5b778b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,6 +15,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + submodules: true - name: Set up Python 3.11 uses: actions/setup-python@v5 with: @@ -44,6 +46,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + submodules: true - uses: astral-sh/setup-uv@v4 with: version: "0.5.20" diff --git a/.gitmodules b/.gitmodules index c7c81dd..ad23fac 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "tests/benchmarks/TheAlgorithms"] path = tests/benchmarks/TheAlgorithms url = git@github.com:TheAlgorithms/Python.git +[submodule "src/pytest_codspeed/instruments/hooks/instrument-hooks"] + path = src/pytest_codspeed/instruments/hooks/instrument-hooks + url = https://github.com/CodSpeedHQ/instrument-hooks diff --git a/setup.py b/setup.py index 033692b..e0657bb 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,7 @@ from setuptools import setup -build_path = ( - Path(__file__).parent / "src/pytest_codspeed/instruments/valgrind/_wrapper/build.py" -) +build_path = Path(__file__).parent / "src/pytest_codspeed/instruments/hooks/build.py" spec = importlib.util.spec_from_file_location("build", build_path) assert spec is not None, "The spec should be initialized" @@ -52,8 +50,8 @@ setup( package_data={ "pytest_codspeed": [ - "instruments/valgrind/_wrapper/*.h", - "instruments/valgrind/_wrapper/*.c", + "instruments/hooks/instrument-hooks/includes/*.h", + "instruments/hooks/instrument-hooks/dist/*.c", ] }, ext_modules=( diff --git a/src/pytest_codspeed/instruments/hooks/__init__.py b/src/pytest_codspeed/instruments/hooks/__init__.py new file mode 100644 index 0000000..65e7fc4 --- /dev/null +++ b/src/pytest_codspeed/instruments/hooks/__init__.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import os +import sys +import warnings +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .dist_instrument_hooks import lib as LibType + +SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) + + +class InstrumentHooks: + """Zig library wrapper class providing benchmark measurement functionality.""" + + lib: LibType + instance: int + + def __init__(self) -> None: + if os.environ.get("CODSPEED_ENV") is None: + raise RuntimeError( + "Can't run benchmarks outside of CodSpeed environment." + "Please set the CODSPEED_ENV environment variable." + ) + + try: + from .dist_instrument_hooks import lib # type: ignore + except ImportError as e: + raise RuntimeError(f"Failed to load instrument hooks library: {e}") from e + + instance = lib.instrument_hooks_init() + if instance == 0: + raise RuntimeError("Failed to initialize CodSpeed instrumentation library.") + + if SUPPORTS_PERF_TRAMPOLINE: + sys.activate_stack_trampoline("perf") # type: ignore + + self.lib = lib + self.instance = instance + + def __del__(self): + if hasattr(self, "lib") and hasattr(self, "instance"): + self.lib.instrument_hooks_deinit(self.instance) + + def start_benchmark(self) -> None: + """Start a new benchmark measurement.""" + ret = self.lib.instrument_hooks_start_benchmark(self.instance) + if ret != 0: + warnings.warn("Failed to start benchmark measurement", RuntimeWarning) + + def stop_benchmark(self) -> None: + """Stop the current benchmark measurement.""" + ret = self.lib.instrument_hooks_stop_benchmark(self.instance) + if ret != 0: + warnings.warn("Failed to stop benchmark measurement", RuntimeWarning) + + def set_executed_benchmark(self, uri: str, pid: int | None = None) -> None: + """Set the executed benchmark URI and process ID. + + Args: + uri: The benchmark URI string identifier + pid: Optional process ID (defaults to current process) + """ + if pid is None: + pid = os.getpid() + + ret = self.lib.instrument_hooks_executed_benchmark( + self.instance, pid, uri.encode("ascii") + ) + if ret != 0: + warnings.warn("Failed to set executed benchmark", RuntimeWarning) + + def set_integration(self, name: str, version: str) -> None: + """Set the integration name and version.""" + ret = self.lib.instrument_hooks_set_integration( + self.instance, name.encode("ascii"), version.encode("ascii") + ) + if ret != 0: + warnings.warn("Failed to set integration name and version", RuntimeWarning) + + def is_instrumented(self) -> bool: + """Check if instrumentation is active.""" + return self.lib.instrument_hooks_is_instrumented(self.instance) diff --git a/src/pytest_codspeed/instruments/hooks/build.py b/src/pytest_codspeed/instruments/hooks/build.py new file mode 100644 index 0000000..99071d1 --- /dev/null +++ b/src/pytest_codspeed/instruments/hooks/build.py @@ -0,0 +1,26 @@ +from pathlib import Path + +from cffi import FFI # type: ignore + +ffibuilder = FFI() + +includes_dir = Path(__file__).parent.joinpath("instrument-hooks/includes") +header_text = (includes_dir / "core.h").read_text() +filtered_header = "\n".join( + line for line in header_text.splitlines() if not line.strip().startswith("#") +) +ffibuilder.cdef(filtered_header) + +ffibuilder.set_source( + "pytest_codspeed.instruments.hooks.dist_instrument_hooks", + """ + #include "core.h" + """, + sources=[ + "src/pytest_codspeed/instruments/hooks/instrument-hooks/dist/core.c", + ], + include_dirs=[str(includes_dir)], +) + +if __name__ == "__main__": + ffibuilder.compile(verbose=True) diff --git a/src/pytest_codspeed/instruments/hooks/instrument-hooks b/src/pytest_codspeed/instruments/hooks/instrument-hooks new file mode 160000 index 0000000..b003e50 --- /dev/null +++ b/src/pytest_codspeed/instruments/hooks/instrument-hooks @@ -0,0 +1 @@ +Subproject commit b003e5024d61cfb784d6ac6f3ffd7d61bf7b9ec9 From f887f93a93a4968c327a5fff7f5a9023ab31760b Mon Sep 17 00:00:00 2001 From: not-matthias Date: Tue, 13 May 2025 10:49:48 +0200 Subject: [PATCH 05/20] chore: remove valgrind wrapper --- .../{valgrind/__init__.py => valgrind.py} | 0 .../instruments/valgrind/_wrapper/__init__.py | 15 ----------- .../instruments/valgrind/_wrapper/build.py | 19 -------------- .../instruments/valgrind/_wrapper/wrapper.c | 25 ------------------- .../instruments/valgrind/_wrapper/wrapper.h | 6 ----- .../instruments/valgrind/_wrapper/wrapper.pyi | 13 ---------- 6 files changed, 78 deletions(-) rename src/pytest_codspeed/instruments/{valgrind/__init__.py => valgrind.py} (100%) delete mode 100644 src/pytest_codspeed/instruments/valgrind/_wrapper/__init__.py delete mode 100644 src/pytest_codspeed/instruments/valgrind/_wrapper/build.py delete mode 100644 src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.c delete mode 100644 src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.h delete mode 100644 src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.pyi diff --git a/src/pytest_codspeed/instruments/valgrind/__init__.py b/src/pytest_codspeed/instruments/valgrind.py similarity index 100% rename from src/pytest_codspeed/instruments/valgrind/__init__.py rename to src/pytest_codspeed/instruments/valgrind.py diff --git a/src/pytest_codspeed/instruments/valgrind/_wrapper/__init__.py b/src/pytest_codspeed/instruments/valgrind/_wrapper/__init__.py deleted file mode 100644 index 1e71391..0000000 --- a/src/pytest_codspeed/instruments/valgrind/_wrapper/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from .wrapper import lib as LibType - - -def get_lib() -> LibType: - try: - from .dist_callgrind_wrapper import lib # type: ignore - - return lib - except Exception as e: - raise Exception("Failed to get a compiled wrapper") from e diff --git a/src/pytest_codspeed/instruments/valgrind/_wrapper/build.py b/src/pytest_codspeed/instruments/valgrind/_wrapper/build.py deleted file mode 100644 index dbfabb8..0000000 --- a/src/pytest_codspeed/instruments/valgrind/_wrapper/build.py +++ /dev/null @@ -1,19 +0,0 @@ -from pathlib import Path - -from cffi import FFI # type: ignore - -wrapper_dir = Path(__file__).parent - -ffibuilder = FFI() - -ffibuilder.cdef((wrapper_dir / "wrapper.h").read_text()) - -ffibuilder.set_source( - "pytest_codspeed.instruments.valgrind._wrapper.dist_callgrind_wrapper", - '#include "wrapper.h"', - sources=["src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.c"], - include_dirs=[str(wrapper_dir)], -) - -if __name__ == "__main__": - ffibuilder.compile(verbose=True) diff --git a/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.c b/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.c deleted file mode 100644 index 1d9b4ad..0000000 --- a/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.c +++ /dev/null @@ -1,25 +0,0 @@ -#include - -void start_instrumentation() { - CALLGRIND_START_INSTRUMENTATION; -} - -void stop_instrumentation() { - CALLGRIND_STOP_INSTRUMENTATION; -} - -void dump_stats() { - CALLGRIND_DUMP_STATS; -} - -void dump_stats_at(char *s) { - CALLGRIND_DUMP_STATS_AT(s); -} - -void zero_stats() { - CALLGRIND_ZERO_STATS; -} - -void toggle_collect() { - CALLGRIND_TOGGLE_COLLECT; -} diff --git a/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.h b/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.h deleted file mode 100644 index 8568142..0000000 --- a/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.h +++ /dev/null @@ -1,6 +0,0 @@ -void start_instrumentation(); -void stop_instrumentation(); -void dump_stats(); -void dump_stats_at(char *s); -void zero_stats(); -void toggle_collect(); diff --git a/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.pyi b/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.pyi deleted file mode 100644 index f2ab78a..0000000 --- a/src/pytest_codspeed/instruments/valgrind/_wrapper/wrapper.pyi +++ /dev/null @@ -1,13 +0,0 @@ -class lib: - @staticmethod - def start_instrumentation() -> None: ... - @staticmethod - def stop_instrumentation() -> None: ... - @staticmethod - def dump_stats() -> None: ... - @staticmethod - def dump_stats_at(trigger: bytes) -> None: ... - @staticmethod - def zero_stats() -> None: ... - @staticmethod - def toggle_collect() -> None: ... From 26a11e8e75cb11349e54185f2038e55de58e960f Mon Sep 17 00:00:00 2001 From: not-matthias Date: Tue, 13 May 2025 10:49:50 +0200 Subject: [PATCH 06/20] feat: use instrument hooks --- src/pytest_codspeed/instruments/valgrind.py | 35 +++++++++---------- src/pytest_codspeed/instruments/walltime.py | 38 ++++++++++++++++++--- 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/src/pytest_codspeed/instruments/valgrind.py b/src/pytest_codspeed/instruments/valgrind.py index 9b80092..bed7a56 100644 --- a/src/pytest_codspeed/instruments/valgrind.py +++ b/src/pytest_codspeed/instruments/valgrind.py @@ -1,12 +1,11 @@ from __future__ import annotations -import os import sys from typing import TYPE_CHECKING from pytest_codspeed import __semver_version__ from pytest_codspeed.instruments import Instrument -from pytest_codspeed.instruments.valgrind._wrapper import get_lib +from pytest_codspeed.instruments.hooks import InstrumentHooks if TYPE_CHECKING: from typing import Any, Callable @@ -14,7 +13,6 @@ from pytest import Session from pytest_codspeed.instruments import P, T - from pytest_codspeed.instruments.valgrind._wrapper import LibType from pytest_codspeed.plugin import CodSpeedConfig SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) @@ -22,20 +20,17 @@ class ValgrindInstrument(Instrument): instrument = "valgrind" - lib: LibType | None + instrument_hooks: InstrumentHooks | None def __init__(self, config: CodSpeedConfig) -> None: self.benchmark_count = 0 - self.should_measure = os.environ.get("CODSPEED_ENV") is not None - if self.should_measure: - self.lib = get_lib() - self.lib.dump_stats_at( - f"Metadata: pytest-codspeed {__semver_version__}".encode("ascii") - ) - if SUPPORTS_PERF_TRAMPOLINE: - sys.activate_stack_trampoline("perf") # type: ignore - else: - self.lib = None + try: + self.instrument_hooks = InstrumentHooks() + self.instrument_hooks.set_integration("pytest-codspeed", __semver_version__) + except RuntimeError: + self.instrument_hooks = None + + self.should_measure = self.instrument_hooks is not None def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: config = ( @@ -61,7 +56,8 @@ def measure( **kwargs: P.kwargs, ) -> T: self.benchmark_count += 1 - if self.lib is None: # Thus should_measure is False + + if not self.instrument_hooks: return fn(*args, **kwargs) def __codspeed_root_frame__() -> T: @@ -71,14 +67,15 @@ def __codspeed_root_frame__() -> T: # Warmup CPython performance map cache __codspeed_root_frame__() - self.lib.zero_stats() - self.lib.start_instrumentation() + # Manually call the library function to avoid an extra stack frame. Also + # call the callgrind markers directly to avoid extra overhead. + self.instrument_hooks.lib.callgrind_start_instrumentation() try: return __codspeed_root_frame__() finally: # Ensure instrumentation is stopped even if the test failed - self.lib.stop_instrumentation() - self.lib.dump_stats_at(uri.encode("ascii")) + self.instrument_hooks.lib.callgrind_stop_instrumentation() + self.instrument_hooks.set_executed_benchmark(uri) def report(self, session: Session) -> None: reporter = session.config.pluginmanager.get_plugin("terminalreporter") diff --git a/src/pytest_codspeed/instruments/walltime.py b/src/pytest_codspeed/instruments/walltime.py index dcbe25b..ab7a0cc 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/src/pytest_codspeed/instruments/walltime.py @@ -1,5 +1,7 @@ from __future__ import annotations +import os +import warnings from dataclasses import asdict, dataclass from math import ceil from statistics import mean, quantiles, stdev @@ -11,7 +13,9 @@ from rich.table import Table from rich.text import Text +from pytest_codspeed import __semver_version__ from pytest_codspeed.instruments import Instrument +from pytest_codspeed.instruments.hooks import InstrumentHooks if TYPE_CHECKING: from typing import Any, Callable @@ -131,17 +135,26 @@ class Benchmark: def run_benchmark( - name: str, uri: str, fn: Callable[P, T], args, kwargs, config: BenchmarkConfig + instrument_hooks: InstrumentHooks | None, + name: str, + uri: str, + fn: Callable[P, T], + args, + kwargs, + config: BenchmarkConfig, ) -> tuple[Benchmark, T]: + def __codspeed_root_frame__() -> T: + return fn(*args, **kwargs) + # Compute the actual result of the function - out = fn(*args, **kwargs) + out = __codspeed_root_frame__() # Warmup times_per_round_ns: list[float] = [] warmup_start = start = perf_counter_ns() while True: start = perf_counter_ns() - fn(*args, **kwargs) + __codspeed_root_frame__() end = perf_counter_ns() times_per_round_ns.append(end - start) if end - warmup_start > config.warmup_time_ns: @@ -166,16 +179,21 @@ def run_benchmark( # Benchmark iter_range = range(iter_per_round) run_start = perf_counter_ns() + if instrument_hooks: + instrument_hooks.start_benchmark() for _ in range(rounds): start = perf_counter_ns() for _ in iter_range: - fn(*args, **kwargs) + __codspeed_root_frame__() end = perf_counter_ns() times_per_round_ns.append(end - start) if end - run_start > config.max_time_ns: # TODO: log something break + if instrument_hooks: + instrument_hooks.stop_benchmark() + instrument_hooks.set_executed_benchmark(uri) benchmark_end = perf_counter_ns() total_time = (benchmark_end - run_start) / 1e9 @@ -192,8 +210,19 @@ def run_benchmark( class WallTimeInstrument(Instrument): instrument = "walltime" + instrument_hooks: InstrumentHooks | None def __init__(self, config: CodSpeedConfig) -> None: + try: + self.instrument_hooks = InstrumentHooks() + self.instrument_hooks.set_integration("pytest-codspeed", __semver_version__) + except RuntimeError as e: + if os.environ.get("CODSPEED_ENV") is not None: + warnings.warn( + f"Failed to initialize instrument hooks: {e}", RuntimeWarning + ) + self.instrument_hooks = None + self.config = config self.benchmarks: list[Benchmark] = [] @@ -209,6 +238,7 @@ def measure( **kwargs: P.kwargs, ) -> T: bench, out = run_benchmark( + instrument_hooks=self.instrument_hooks, name=name, uri=uri, fn=fn, From 7583000354a0295f1a67e8af74aa6f34c7e80b59 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 27 May 2025 10:44:57 +0200 Subject: [PATCH 07/20] chore: update release workflow to include submodules --- .github/workflows/release.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a404af1..7b59a15 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -23,6 +23,8 @@ jobs: runs-on: ${{ matrix.platform.runs-on }} steps: - uses: actions/checkout@v4 + with: + submodules: true - name: Build wheels uses: pypa/cibuildwheel@v2.22.0 env: @@ -39,6 +41,8 @@ jobs: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 + with: + submodules: true - uses: astral-sh/setup-uv@v4 with: version: "0.5.20" @@ -59,6 +63,8 @@ jobs: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 + with: + submodules: true - uses: astral-sh/setup-uv@v4 with: version: "0.5.20" From 72a935f12d145a812108160fe6b57fcb6327c695 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 3 Jun 2025 10:51:53 +0200 Subject: [PATCH 08/20] chore: bump ruff --- .github/workflows/ci.yml | 2 +- .pre-commit-config.yaml | 4 +- pyproject.toml | 2 +- .../test_pytest_plugin_cpu_instrumentation.py | 12 +++--- uv.lock | 40 +++++++++---------- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b778b6..1b97c53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.11" - - uses: pre-commit/action@v3.0.0 + - uses: pre-commit/action@v3.0.1 with: extra_args: --all-files diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3705ab9..85050ad 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,8 +13,8 @@ repos: hooks: - id: mypy - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.11.12 hooks: - - id: ruff + - id: ruff-check args: [--fix] - id: ruff-format diff --git a/pyproject.toml b/pyproject.toml index 601bff5..4df558b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ ] [project.optional-dependencies] -lint = ["mypy ~= 1.11.2", "ruff ~= 0.6.5"] +lint = ["mypy ~= 1.11.2", "ruff ~= 0.11.12"] compat = [ "pytest-benchmark ~= 5.0.0", "pytest-xdist ~= 3.6.1", diff --git a/tests/test_pytest_plugin_cpu_instrumentation.py b/tests/test_pytest_plugin_cpu_instrumentation.py index e911c52..d72ca4a 100644 --- a/tests/test_pytest_plugin_cpu_instrumentation.py +++ b/tests/test_pytest_plugin_cpu_instrumentation.py @@ -79,12 +79,12 @@ def fixtured_child(): "py::ValgrindInstrument.measure..__codspeed_root_frame__" in line for line in lines ), "No root frame found in perf map" - assert any( - "py::test_some_addition_marked" in line for line in lines - ), "No marked test frame found in perf map" - assert any( - "py::test_some_addition_fixtured" in line for line in lines - ), "No fixtured test frame found in perf map" + assert any("py::test_some_addition_marked" in line for line in lines), ( + "No marked test frame found in perf map" + ) + assert any("py::test_some_addition_fixtured" in line for line in lines), ( + "No fixtured test frame found in perf map" + ) assert any( "py::test_some_addition_fixtured..fixtured_child" in line for line in lines diff --git a/uv.lock b/uv.lock index 0b40df2..0a56fed 100644 --- a/uv.lock +++ b/uv.lock @@ -371,7 +371,7 @@ requires-dist = [ { name = "pytest-cov", marker = "extra == 'test'", specifier = "~=4.0.0" }, { name = "pytest-xdist", marker = "extra == 'compat'", specifier = "~=3.6.1" }, { name = "rich", specifier = ">=13.8.1" }, - { name = "ruff", marker = "extra == 'lint'", specifier = "~=0.6.5" }, + { name = "ruff", marker = "extra == 'lint'", specifier = "~=0.11.12" }, ] [package.metadata.requires-dev] @@ -419,27 +419,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.6.9" +version = "0.11.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/26/0d/6148a48dab5662ca1d5a93b7c0d13c03abd3cc7e2f35db08410e47cef15d/ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2", size = 3095355 } +sdist = { url = "https://files.pythonhosted.org/packages/15/0a/92416b159ec00cdf11e5882a9d80d29bf84bba3dbebc51c4898bfbca1da6/ruff-0.11.12.tar.gz", hash = "sha256:43cf7f69c7d7c7d7513b9d59c5d8cafd704e05944f978614aa9faff6ac202603", size = 4202289 } wheels = [ - { url = "https://files.pythonhosted.org/packages/6e/8f/f7a0a0ef1818662efb32ed6df16078c95da7a0a3248d64c2410c1e27799f/ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd", size = 10440526 }, - { url = "https://files.pythonhosted.org/packages/8b/69/b179a5faf936a9e2ab45bb412a668e4661eded964ccfa19d533f29463ef6/ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec", size = 10034612 }, - { url = "https://files.pythonhosted.org/packages/c7/ef/fd1b4be979c579d191eeac37b5cfc0ec906de72c8bcd8595e2c81bb700c1/ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c", size = 9706197 }, - { url = "https://files.pythonhosted.org/packages/29/61/b376d775deb5851cb48d893c568b511a6d3625ef2c129ad5698b64fb523c/ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e", size = 10751855 }, - { url = "https://files.pythonhosted.org/packages/13/d7/def9e5f446d75b9a9c19b24231a3a658c075d79163b08582e56fa5dcfa38/ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577", size = 10200889 }, - { url = "https://files.pythonhosted.org/packages/6c/d6/7f34160818bcb6e84ce293a5966cba368d9112ff0289b273fbb689046047/ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829", size = 11038678 }, - { url = "https://files.pythonhosted.org/packages/13/34/a40ff8ae62fb1b26fb8e6fa7e64bc0e0a834b47317880de22edd6bfb54fb/ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5", size = 11808682 }, - { url = "https://files.pythonhosted.org/packages/2e/6d/25a4386ae4009fc798bd10ba48c942d1b0b3e459b5403028f1214b6dd161/ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7", size = 11330446 }, - { url = "https://files.pythonhosted.org/packages/f7/f6/bdf891a9200d692c94ebcd06ae5a2fa5894e522f2c66c2a12dd5d8cb2654/ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f", size = 12483048 }, - { url = "https://files.pythonhosted.org/packages/a7/86/96f4252f41840e325b3fa6c48297e661abb9f564bd7dcc0572398c8daa42/ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa", size = 10936855 }, - { url = "https://files.pythonhosted.org/packages/45/87/801a52d26c8dbf73424238e9908b9ceac430d903c8ef35eab1b44fcfa2bd/ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb", size = 10713007 }, - { url = "https://files.pythonhosted.org/packages/be/27/6f7161d90320a389695e32b6ebdbfbedde28ccbf52451e4b723d7ce744ad/ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0", size = 10274594 }, - { url = "https://files.pythonhosted.org/packages/00/52/dc311775e7b5f5b19831563cb1572ecce63e62681bccc609867711fae317/ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625", size = 10608024 }, - { url = "https://files.pythonhosted.org/packages/98/b6/be0a1ddcbac65a30c985cf7224c4fce786ba2c51e7efeb5178fe410ed3cf/ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039", size = 10982085 }, - { url = "https://files.pythonhosted.org/packages/bb/a4/c84bc13d0b573cf7bb7d17b16d6d29f84267c92d79b2f478d4ce322e8e72/ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d", size = 8522088 }, - { url = "https://files.pythonhosted.org/packages/74/be/fc352bd8ca40daae8740b54c1c3e905a7efe470d420a268cd62150248c91/ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117", size = 9359275 }, - { url = "https://files.pythonhosted.org/packages/3e/14/fd026bc74ded05e2351681545a5f626e78ef831f8edce064d61acd2e6ec7/ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93", size = 8679879 }, + { url = "https://files.pythonhosted.org/packages/60/cc/53eb79f012d15e136d40a8e8fc519ba8f55a057f60b29c2df34efd47c6e3/ruff-0.11.12-py3-none-linux_armv6l.whl", hash = "sha256:c7680aa2f0d4c4f43353d1e72123955c7a2159b8646cd43402de6d4a3a25d7cc", size = 10285597 }, + { url = "https://files.pythonhosted.org/packages/e7/d7/73386e9fb0232b015a23f62fea7503f96e29c29e6c45461d4a73bac74df9/ruff-0.11.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2cad64843da9f134565c20bcc430642de897b8ea02e2e79e6e02a76b8dcad7c3", size = 11053154 }, + { url = "https://files.pythonhosted.org/packages/4e/eb/3eae144c5114e92deb65a0cb2c72326c8469e14991e9bc3ec0349da1331c/ruff-0.11.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9b6886b524a1c659cee1758140138455d3c029783d1b9e643f3624a5ee0cb0aa", size = 10403048 }, + { url = "https://files.pythonhosted.org/packages/29/64/20c54b20e58b1058db6689e94731f2a22e9f7abab74e1a758dfba058b6ca/ruff-0.11.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc3a3690aad6e86c1958d3ec3c38c4594b6ecec75c1f531e84160bd827b2012", size = 10597062 }, + { url = "https://files.pythonhosted.org/packages/29/3a/79fa6a9a39422a400564ca7233a689a151f1039110f0bbbabcb38106883a/ruff-0.11.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f97fdbc2549f456c65b3b0048560d44ddd540db1f27c778a938371424b49fe4a", size = 10155152 }, + { url = "https://files.pythonhosted.org/packages/e5/a4/22c2c97b2340aa968af3a39bc38045e78d36abd4ed3fa2bde91c31e712e3/ruff-0.11.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74adf84960236961090e2d1348c1a67d940fd12e811a33fb3d107df61eef8fc7", size = 11723067 }, + { url = "https://files.pythonhosted.org/packages/bc/cf/3e452fbd9597bcd8058856ecd42b22751749d07935793a1856d988154151/ruff-0.11.12-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b56697e5b8bcf1d61293ccfe63873aba08fdbcbbba839fc046ec5926bdb25a3a", size = 12460807 }, + { url = "https://files.pythonhosted.org/packages/2f/ec/8f170381a15e1eb7d93cb4feef8d17334d5a1eb33fee273aee5d1f8241a3/ruff-0.11.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d47afa45e7b0eaf5e5969c6b39cbd108be83910b5c74626247e366fd7a36a13", size = 12063261 }, + { url = "https://files.pythonhosted.org/packages/0d/bf/57208f8c0a8153a14652a85f4116c0002148e83770d7a41f2e90b52d2b4e/ruff-0.11.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bf9603fe1bf949de8b09a2da896f05c01ed7a187f4a386cdba6760e7f61be", size = 11329601 }, + { url = "https://files.pythonhosted.org/packages/c3/56/edf942f7fdac5888094d9ffa303f12096f1a93eb46570bcf5f14c0c70880/ruff-0.11.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08033320e979df3b20dba567c62f69c45e01df708b0f9c83912d7abd3e0801cd", size = 11522186 }, + { url = "https://files.pythonhosted.org/packages/ed/63/79ffef65246911ed7e2290aeece48739d9603b3a35f9529fec0fc6c26400/ruff-0.11.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:929b7706584f5bfd61d67d5070f399057d07c70585fa8c4491d78ada452d3bef", size = 10449032 }, + { url = "https://files.pythonhosted.org/packages/88/19/8c9d4d8a1c2a3f5a1ea45a64b42593d50e28b8e038f1aafd65d6b43647f3/ruff-0.11.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7de4a73205dc5756b8e09ee3ed67c38312dce1aa28972b93150f5751199981b5", size = 10129370 }, + { url = "https://files.pythonhosted.org/packages/bc/0f/2d15533eaa18f460530a857e1778900cd867ded67f16c85723569d54e410/ruff-0.11.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2635c2a90ac1b8ca9e93b70af59dfd1dd2026a40e2d6eebaa3efb0465dd9cf02", size = 11123529 }, + { url = "https://files.pythonhosted.org/packages/4f/e2/4c2ac669534bdded835356813f48ea33cfb3a947dc47f270038364587088/ruff-0.11.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d05d6a78a89166f03f03a198ecc9d18779076ad0eec476819467acb401028c0c", size = 11577642 }, + { url = "https://files.pythonhosted.org/packages/a7/9b/c9ddf7f924d5617a1c94a93ba595f4b24cb5bc50e98b94433ab3f7ad27e5/ruff-0.11.12-py3-none-win32.whl", hash = "sha256:f5a07f49767c4be4772d161bfc049c1f242db0cfe1bd976e0f0886732a4765d6", size = 10475511 }, + { url = "https://files.pythonhosted.org/packages/fd/d6/74fb6d3470c1aada019ffff33c0f9210af746cca0a4de19a1f10ce54968a/ruff-0.11.12-py3-none-win_amd64.whl", hash = "sha256:5a4d9f8030d8c3a45df201d7fb3ed38d0219bccd7955268e863ee4a115fa0832", size = 11523573 }, + { url = "https://files.pythonhosted.org/packages/44/42/d58086ec20f52d2b0140752ae54b355ea2be2ed46f914231136dd1effcc7/ruff-0.11.12-py3-none-win_arm64.whl", hash = "sha256:65194e37853158d368e333ba282217941029a28ea90913c67e558c611d04daa5", size = 10697770 }, ] [[package]] From 7723404ebed7c65b15b68ddc732203b0df648635 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Fri, 6 Jun 2025 11:16:56 +0200 Subject: [PATCH 09/20] fix: fix native library typing --- .../instruments/hooks/__init__.py | 12 ++++----- .../hooks/dist_instrument_hooks.pyi | 27 +++++++++++++++++++ 2 files changed, 32 insertions(+), 7 deletions(-) create mode 100644 src/pytest_codspeed/instruments/hooks/dist_instrument_hooks.pyi diff --git a/src/pytest_codspeed/instruments/hooks/__init__.py b/src/pytest_codspeed/instruments/hooks/__init__.py index 65e7fc4..a69489c 100644 --- a/src/pytest_codspeed/instruments/hooks/__init__.py +++ b/src/pytest_codspeed/instruments/hooks/__init__.py @@ -6,7 +6,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from .dist_instrument_hooks import lib as LibType + from .dist_instrument_hooks import InstrumentHooksPointer, LibType SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) @@ -15,7 +15,7 @@ class InstrumentHooks: """Zig library wrapper class providing benchmark measurement functionality.""" lib: LibType - instance: int + instance: InstrumentHooksPointer def __init__(self) -> None: if os.environ.get("CODSPEED_ENV") is None: @@ -28,17 +28,15 @@ def __init__(self) -> None: from .dist_instrument_hooks import lib # type: ignore except ImportError as e: raise RuntimeError(f"Failed to load instrument hooks library: {e}") from e + self.lib = lib - instance = lib.instrument_hooks_init() - if instance == 0: + self.instance = self.lib.instrument_hooks_init() + if self.instance == 0: raise RuntimeError("Failed to initialize CodSpeed instrumentation library.") if SUPPORTS_PERF_TRAMPOLINE: sys.activate_stack_trampoline("perf") # type: ignore - self.lib = lib - self.instance = instance - def __del__(self): if hasattr(self, "lib") and hasattr(self, "instance"): self.lib.instrument_hooks_deinit(self.instance) diff --git a/src/pytest_codspeed/instruments/hooks/dist_instrument_hooks.pyi b/src/pytest_codspeed/instruments/hooks/dist_instrument_hooks.pyi new file mode 100644 index 0000000..3ae9ec5 --- /dev/null +++ b/src/pytest_codspeed/instruments/hooks/dist_instrument_hooks.pyi @@ -0,0 +1,27 @@ +InstrumentHooksPointer = object + +class lib: + @staticmethod + def instrument_hooks_init() -> InstrumentHooksPointer: ... + @staticmethod + def instrument_hooks_deinit(hooks: InstrumentHooksPointer) -> None: ... + @staticmethod + def instrument_hooks_is_instrumented(hooks: InstrumentHooksPointer) -> bool: ... + @staticmethod + def instrument_hooks_start_benchmark(hooks: InstrumentHooksPointer) -> int: ... + @staticmethod + def instrument_hooks_stop_benchmark(hooks: InstrumentHooksPointer) -> int: ... + @staticmethod + def instrument_hooks_executed_benchmark( + hooks: InstrumentHooksPointer, pid: int, uri: bytes + ) -> int: ... + @staticmethod + def instrument_hooks_set_integration( + hooks: InstrumentHooksPointer, name: bytes, version: bytes + ) -> int: ... + @staticmethod + def callgrind_start_instrumentation() -> int: ... + @staticmethod + def callgrind_stop_instrumentation() -> int: ... + +LibType = type[lib] From 22d4d33a75579982140f393fc7bd7f2df3370454 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Sun, 1 Jun 2025 14:39:06 +0200 Subject: [PATCH 10/20] feat: support marker attributes to customize the walltime execution --- src/pytest_codspeed/config.py | 80 +++++++++++++++++++++ src/pytest_codspeed/instruments/__init__.py | 2 + src/pytest_codspeed/instruments/valgrind.py | 5 +- src/pytest_codspeed/instruments/walltime.py | 40 ++++++++--- src/pytest_codspeed/plugin.py | 42 +++-------- tests/test_pytest_plugin.py | 40 +++++++++++ 6 files changed, 166 insertions(+), 43 deletions(-) create mode 100644 src/pytest_codspeed/config.py diff --git a/src/pytest_codspeed/config.py b/src/pytest_codspeed/config.py new file mode 100644 index 0000000..0d2d881 --- /dev/null +++ b/src/pytest_codspeed/config.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import pytest + + +@dataclass(frozen=True) +class CodSpeedConfig: + """ + The configuration for the codspeed plugin. + Usually created from the command line arguments. + """ + + warmup_time_ns: int | None = None + max_time_ns: int | None = None + max_rounds: int | None = None + + @classmethod + def from_pytest_config(cls, config: pytest.Config) -> CodSpeedConfig: + warmup_time = config.getoption("--codspeed-warmup-time", None) + warmup_time_ns = ( + int(warmup_time * 1_000_000_000) if warmup_time is not None else None + ) + max_time = config.getoption("--codspeed-max-time", None) + max_time_ns = int(max_time * 1_000_000_000) if max_time is not None else None + return cls( + warmup_time_ns=warmup_time_ns, + max_rounds=config.getoption("--codspeed-max-rounds", None), + max_time_ns=max_time_ns, + ) + + +@dataclass(frozen=True) +class BenchmarkMarkerOptions: + group: str | None = None + """The group name to use for the benchmark.""" + min_time: int | None = None + """ + The minimum time of a round (in seconds). + Only available in walltime mode. + """ + max_time: int | None = None + """ + The maximum time to run the benchmark for (in seconds). + Only available in walltime mode. + """ + max_rounds: int | None = None + """ + The maximum number of rounds to run the benchmark for. + Takes precedence over max_time. Only available in walltime mode. + """ + + @classmethod + def from_pytest_item(cls, item: pytest.Item) -> BenchmarkMarkerOptions: + marker = item.get_closest_marker( + "codspeed_benchmark" + ) or item.get_closest_marker("benchmark") + if marker is None: + return cls() + if len(marker.args) > 0: + raise ValueError( + "Positional arguments are not allowed in the benchmark marker" + ) + + options = cls( + group=marker.kwargs.pop("group", None), + min_time=marker.kwargs.pop("min_time", None), + max_time=marker.kwargs.pop("max_time", None), + max_rounds=marker.kwargs.pop("max_rounds", None), + ) + + if len(marker.kwargs) > 0: + raise ValueError( + "Unknown kwargs passed to benchmark marker: " + + ", ".join(marker.kwargs.keys()) + ) + return options diff --git a/src/pytest_codspeed/instruments/__init__.py b/src/pytest_codspeed/instruments/__init__.py index edd2849..d163783 100644 --- a/src/pytest_codspeed/instruments/__init__.py +++ b/src/pytest_codspeed/instruments/__init__.py @@ -9,6 +9,7 @@ import pytest + from pytest_codspeed.config import BenchmarkMarkerOptions from pytest_codspeed.plugin import CodSpeedConfig T = TypeVar("T") @@ -27,6 +28,7 @@ def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: ... @abstractmethod def measure( self, + marker_options: BenchmarkMarkerOptions, name: str, uri: str, fn: Callable[P, T], diff --git a/src/pytest_codspeed/instruments/valgrind.py b/src/pytest_codspeed/instruments/valgrind.py index bed7a56..4476a19 100644 --- a/src/pytest_codspeed/instruments/valgrind.py +++ b/src/pytest_codspeed/instruments/valgrind.py @@ -13,7 +13,7 @@ from pytest import Session from pytest_codspeed.instruments import P, T - from pytest_codspeed.plugin import CodSpeedConfig + from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) @@ -35,7 +35,7 @@ def __init__(self, config: CodSpeedConfig) -> None: def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: config = ( f"mode: instrumentation, " - f"callgraph: {'enabled' if SUPPORTS_PERF_TRAMPOLINE else 'not supported'}" + f"callgraph: {'enabled' if SUPPORTS_PERF_TRAMPOLINE else 'not supported'}" ) warnings = [] if not self.should_measure: @@ -49,6 +49,7 @@ def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: def measure( self, + marker_options: BenchmarkMarkerOptions, name: str, uri: str, fn: Callable[P, T], diff --git a/src/pytest_codspeed/instruments/walltime.py b/src/pytest_codspeed/instruments/walltime.py index ab7a0cc..28f2412 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/src/pytest_codspeed/instruments/walltime.py @@ -23,12 +23,12 @@ from pytest import Session from pytest_codspeed.instruments import P, T - from pytest_codspeed.plugin import CodSpeedConfig + from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig DEFAULT_WARMUP_TIME_NS = 1_000_000_000 DEFAULT_MAX_TIME_NS = 3_000_000_000 TIMER_RESOLUTION_NS = get_clock_info("perf_counter").resolution * 1e9 -DEFAULT_MIN_ROUND_TIME_NS = TIMER_RESOLUTION_NS * 1_000_000 +DEFAULT_MIN_ROUND_TIME_NS = int(TIMER_RESOLUTION_NS * 1_000_000) IQR_OUTLIER_FACTOR = 1.5 STDEV_OUTLIER_FACTOR = 3 @@ -42,16 +42,35 @@ class BenchmarkConfig: max_rounds: int | None @classmethod - def from_codspeed_config(cls, config: CodSpeedConfig) -> BenchmarkConfig: + def from_codspeed_config_and_marker_data( + cls, config: CodSpeedConfig, marker_data: BenchmarkMarkerOptions + ) -> BenchmarkConfig: + if marker_data.max_time is not None: + max_time_ns = int(marker_data.max_time * 1e9) + elif config.max_time_ns is not None: + max_time_ns = config.max_time_ns + else: + max_time_ns = DEFAULT_MAX_TIME_NS + + if marker_data.max_rounds is not None: + max_rounds = marker_data.max_rounds + elif config.max_rounds is not None: + max_rounds = config.max_rounds + else: + max_rounds = None + + if marker_data.min_time is not None: + min_round_time_ns = int(marker_data.min_time * 1e9) + else: + min_round_time_ns = DEFAULT_MIN_ROUND_TIME_NS + return cls( warmup_time_ns=config.warmup_time_ns if config.warmup_time_ns is not None else DEFAULT_WARMUP_TIME_NS, - min_round_time_ns=DEFAULT_MIN_ROUND_TIME_NS, - max_time_ns=config.max_time_ns - if config.max_time_ns is not None - else DEFAULT_MAX_TIME_NS, - max_rounds=config.max_rounds, + min_round_time_ns=min_round_time_ns, + max_time_ns=max_time_ns, + max_rounds=max_rounds, ) @@ -231,6 +250,7 @@ def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: def measure( self, + marker_options: BenchmarkMarkerOptions, name: str, uri: str, fn: Callable[P, T], @@ -244,7 +264,9 @@ def measure( fn=fn, args=args, kwargs=kwargs, - config=BenchmarkConfig.from_codspeed_config(self.config), + config=BenchmarkConfig.from_codspeed_config_and_marker_data( + self.config, marker_options + ), ) self.benchmarks.append(bench) return out diff --git a/src/pytest_codspeed/plugin.py b/src/pytest_codspeed/plugin.py index 17374f2..f3dfdc2 100644 --- a/src/pytest_codspeed/plugin.py +++ b/src/pytest_codspeed/plugin.py @@ -14,6 +14,7 @@ import pytest from _pytest.fixtures import FixtureManager +from pytest_codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig from pytest_codspeed.instruments import ( MeasurementMode, get_instrument_from_mode, @@ -58,8 +59,7 @@ def pytest_addoption(parser: pytest.Parser): action="store", type=float, help=( - "The time to warm up the benchmark for (in seconds), " - "only for walltime mode" + "The time to warm up the benchmark for (in seconds), only for walltime mode" ), ) group.addoption( @@ -82,27 +82,6 @@ def pytest_addoption(parser: pytest.Parser): ) -@dataclass(frozen=True) -class CodSpeedConfig: - warmup_time_ns: int | None = None - max_time_ns: int | None = None - max_rounds: int | None = None - - @classmethod - def from_pytest_config(cls, config: pytest.Config) -> CodSpeedConfig: - warmup_time = config.getoption("--codspeed-warmup-time", None) - warmup_time_ns = ( - int(warmup_time * 1_000_000_000) if warmup_time is not None else None - ) - max_time = config.getoption("--codspeed-max-time", None) - max_time_ns = int(max_time * 1_000_000_000) if max_time is not None else None - return cls( - warmup_time_ns=warmup_time_ns, - max_rounds=config.getoption("--codspeed-max-rounds", None), - max_time_ns=max_time_ns, - ) - - @dataclass(unsafe_hash=True) class CodSpeedPlugin: is_codspeed_enabled: bool @@ -254,20 +233,21 @@ def pytest_collection_modifyitems( def _measure( plugin: CodSpeedPlugin, - nodeid: str, + node: pytest.Item, config: pytest.Config, fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs, ) -> T: + marker_options = BenchmarkMarkerOptions.from_pytest_item(node) random.seed(0) is_gc_enabled = gc.isenabled() if is_gc_enabled: gc.collect() gc.disable() try: - uri, name = get_git_relative_uri_and_name(nodeid, config.rootpath) - return plugin.instrument.measure(name, uri, fn, *args, **kwargs) + uri, name = get_git_relative_uri_and_name(node.nodeid, config.rootpath) + return plugin.instrument.measure(marker_options, name, uri, fn, *args, **kwargs) finally: # Ensure GC is re-enabled even if the test failed if is_gc_enabled: @@ -276,13 +256,13 @@ def _measure( def wrap_runtest( plugin: CodSpeedPlugin, - nodeid: str, + node: pytest.Item, config: pytest.Config, fn: Callable[P, T], ) -> Callable[P, T]: @functools.wraps(fn) def wrapped(*args: P.args, **kwargs: P.kwargs) -> T: - return _measure(plugin, nodeid, config, fn, *args, **kwargs) + return _measure(plugin, node, config, fn, *args, **kwargs) return wrapped @@ -299,7 +279,7 @@ def pytest_runtest_protocol(item: pytest.Item, nextitem: pytest.Item | None): return None # Wrap runtest and defer to default protocol - item.runtest = wrap_runtest(plugin, item.nodeid, item.config, item.runtest) + item.runtest = wrap_runtest(plugin, item, item.config, item.runtest) return None @@ -343,9 +323,7 @@ def __call__(self, func: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T config = self._request.config plugin = get_plugin(config) if plugin.is_codspeed_enabled: - return _measure( - plugin, self._request.node.nodeid, config, func, *args, **kwargs - ) + return _measure(plugin, self._request.node, config, func, *args, **kwargs) else: return func(*args, **kwargs) diff --git a/tests/test_pytest_plugin.py b/tests/test_pytest_plugin.py index 1a6155e..fa19362 100644 --- a/tests/test_pytest_plugin.py +++ b/tests/test_pytest_plugin.py @@ -220,6 +220,46 @@ def _(): ) +def test_codspeed_marker_unexpected_args(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.codspeed_benchmark( + "positional_arg" + ) + def test_bench(): + pass + """ + ) + result = pytester.runpytest("--codspeed") + assert result.ret == 1 + result.stdout.fnmatch_lines_random( + ["*ValueError: Positional arguments are not allowed in the benchmark marker*"], + ) + + +def test_codspeed_marker_unexpected_kwargs(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.codspeed_benchmark( + not_allowed=True + ) + def test_bench(): + pass + """ + ) + result = pytester.runpytest("--codspeed") + assert result.ret == 1 + result.stdout.fnmatch_lines_random( + [ + "*ValueError: Unknown kwargs passed to benchmark marker: not_allowed*", + ], + ) + + def test_pytest_benchmark_extra_info(pytester: pytest.Pytester) -> None: """https://pytest-benchmark.readthedocs.io/en/latest/usage.html#extra-info""" pytester.makepyfile( From c4adb9b483bc3b5d5f9cd6c0d440d3e5b819002b Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 3 Jun 2025 17:18:38 +0200 Subject: [PATCH 11/20] feat: make sure the benchmark fixture can only be called once per bench --- src/pytest_codspeed/plugin.py | 20 +++++++++++++------- tests/test_pytest_plugin.py | 22 ++++++++++++++++++++++ 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/pytest_codspeed/plugin.py b/src/pytest_codspeed/plugin.py index f3dfdc2..3a20675 100644 --- a/src/pytest_codspeed/plugin.py +++ b/src/pytest_codspeed/plugin.py @@ -318,14 +318,20 @@ def __init__(self, request: pytest.FixtureRequest): self.extra_info: dict = {} self._request = request - - def __call__(self, func: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T: - config = self._request.config - plugin = get_plugin(config) - if plugin.is_codspeed_enabled: - return _measure(plugin, self._request.node, config, func, *args, **kwargs) + self._config = self._request.config + self._plugin = get_plugin(self._config) + self._called = False + + def __call__(self, target: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T: + if self._called: + raise RuntimeError("The benchmark fixture can only be used once per test") + self._called = True + if self._plugin.is_codspeed_enabled: + return _measure( + self._plugin, self._request.node, self._config, target, *args, **kwargs + ) else: - return func(*args, **kwargs) + return target(*args, **kwargs) @pytest.fixture(scope="function") diff --git a/tests/test_pytest_plugin.py b/tests/test_pytest_plugin.py index fa19362..8e7b776 100644 --- a/tests/test_pytest_plugin.py +++ b/tests/test_pytest_plugin.py @@ -338,3 +338,25 @@ def test_capsys(capsys): result.assert_outcomes(passed=1) result.stdout.no_fnmatch_line("*print to stdout*") result.stderr.no_fnmatch_line("*print to stderr*") + +@pytest.mark.parametrize("mode", [*MeasurementMode]) +def test_benchmark_fixture_used_twice( + pytester: pytest.Pytester, mode: MeasurementMode +) -> None: + """Test that using the benchmark fixture twice in a test raises an error.""" + pytester.makepyfile( + """ + def test_benchmark_used_twice(benchmark): + def foo(): + pass + + benchmark(foo) + benchmark(foo) + """ + ) + result = run_pytest_codspeed_with_mode(pytester, mode) + assert result.ret == 1, "the run should have failed" + result.stdout.fnmatch_lines( + ["*RuntimeError: The benchmark fixture can only be used once per test*"] + ) + From 96fe457ff3bba6c22194a5b7f94a683ef31bd727 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Thu, 5 Jun 2025 22:22:12 +0200 Subject: [PATCH 12/20] feat: support pytest-benchmark's pedantic API --- src/pytest_codspeed/config.py | 63 ++++- src/pytest_codspeed/instruments/__init__.py | 20 +- src/pytest_codspeed/instruments/valgrind.py | 56 ++++- src/pytest_codspeed/instruments/walltime.py | 216 ++++++++++-------- src/pytest_codspeed/plugin.py | 92 ++++++-- tests/test_pytest_plugin.py | 78 +++++++ .../test_pytest_plugin_cpu_instrumentation.py | 81 +++++++ tests/test_pytest_plugin_walltime.py | 51 +++++ 8 files changed, 530 insertions(+), 127 deletions(-) diff --git a/src/pytest_codspeed/config.py b/src/pytest_codspeed/config.py index 0d2d881..1932a8e 100644 --- a/src/pytest_codspeed/config.py +++ b/src/pytest_codspeed/config.py @@ -1,9 +1,14 @@ from __future__ import annotations -from dataclasses import dataclass -from typing import TYPE_CHECKING +import dataclasses +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Generic, TypeVar + +T = TypeVar("T") if TYPE_CHECKING: + from typing import Any, Callable + import pytest @@ -64,17 +69,51 @@ def from_pytest_item(cls, item: pytest.Item) -> BenchmarkMarkerOptions: raise ValueError( "Positional arguments are not allowed in the benchmark marker" ) + kwargs = marker.kwargs - options = cls( - group=marker.kwargs.pop("group", None), - min_time=marker.kwargs.pop("min_time", None), - max_time=marker.kwargs.pop("max_time", None), - max_rounds=marker.kwargs.pop("max_rounds", None), - ) - - if len(marker.kwargs) > 0: + unknown_kwargs = set(kwargs.keys()) - { + field.name for field in dataclasses.fields(cls) + } + if unknown_kwargs: raise ValueError( "Unknown kwargs passed to benchmark marker: " - + ", ".join(marker.kwargs.keys()) + + ", ".join(sorted(unknown_kwargs)) ) - return options + + return cls(**kwargs) + + +@dataclass(frozen=True) +class PedanticOptions(Generic[T]): + """Parameters for running a benchmark using the pedantic fixture API.""" + + target: Callable[..., T] + setup: Callable[[], Any | None] | None + teardown: Callable[..., Any | None] | None + rounds: int + warmup_rounds: int + iterations: int + args: tuple[Any, ...] = field(default_factory=tuple) + kwargs: dict[str, Any] = field(default_factory=dict) + + def __post_init__(self) -> None: + if self.rounds < 0: + raise ValueError("rounds must be positive") + if self.warmup_rounds < 0: + raise ValueError("warmup_rounds must be non-negative") + if self.iterations <= 0: + raise ValueError("iterations must be positive") + if self.iterations > 1 and self.setup is not None: + raise ValueError( + "setup cannot be used with multiple iterations, use multiple rounds" + ) + + def setup_and_get_args_kwargs(self) -> tuple[tuple[Any, ...], dict[str, Any]]: + if self.setup is None: + return self.args, self.kwargs + maybe_result = self.setup(*self.args, **self.kwargs) + if maybe_result is not None: + if len(self.args) > 0 or len(self.kwargs) > 0: + raise ValueError("setup cannot return a value when args are provided") + return maybe_result + return self.args, self.kwargs diff --git a/src/pytest_codspeed/instruments/__init__.py b/src/pytest_codspeed/instruments/__init__.py index d163783..fb264f1 100644 --- a/src/pytest_codspeed/instruments/__init__.py +++ b/src/pytest_codspeed/instruments/__init__.py @@ -5,15 +5,14 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Any, Callable, ClassVar, ParamSpec, TypeVar + from typing import Any, Callable, ClassVar, TypeVar import pytest - from pytest_codspeed.config import BenchmarkMarkerOptions + from pytest_codspeed.config import BenchmarkMarkerOptions, PedanticOptions from pytest_codspeed.plugin import CodSpeedConfig T = TypeVar("T") - P = ParamSpec("P") class Instrument(metaclass=ABCMeta): @@ -31,9 +30,18 @@ def measure( marker_options: BenchmarkMarkerOptions, name: str, uri: str, - fn: Callable[P, T], - *args: P.args, - **kwargs: P.kwargs, + fn: Callable[..., T], + *args: tuple, + **kwargs: dict[str, Any], + ) -> T: ... + + @abstractmethod + def measure_pedantic( + self, + marker_options: BenchmarkMarkerOptions, + pedantic_options: PedanticOptions[T], + name: str, + uri: str, ) -> T: ... @abstractmethod diff --git a/src/pytest_codspeed/instruments/valgrind.py b/src/pytest_codspeed/instruments/valgrind.py index 4476a19..9f135f4 100644 --- a/src/pytest_codspeed/instruments/valgrind.py +++ b/src/pytest_codspeed/instruments/valgrind.py @@ -1,6 +1,7 @@ from __future__ import annotations import sys +import warnings from typing import TYPE_CHECKING from pytest_codspeed import __semver_version__ @@ -12,7 +13,8 @@ from pytest import Session - from pytest_codspeed.instruments import P, T + from pytest_codspeed.config import PedanticOptions + from pytest_codspeed.instruments import T from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) @@ -52,9 +54,9 @@ def measure( marker_options: BenchmarkMarkerOptions, name: str, uri: str, - fn: Callable[P, T], - *args: P.args, - **kwargs: P.kwargs, + fn: Callable[..., T], + *args: tuple, + **kwargs: dict[str, Any], ) -> T: self.benchmark_count += 1 @@ -78,8 +80,54 @@ def __codspeed_root_frame__() -> T: self.instrument_hooks.lib.callgrind_stop_instrumentation() self.instrument_hooks.set_executed_benchmark(uri) + def measure_pedantic( + self, + marker_options: BenchmarkMarkerOptions, + pedantic_options: PedanticOptions[T], + name: str, + uri: str, + ) -> T: + if pedantic_options.rounds != 1 or pedantic_options.iterations != 1: + warnings.warn( + "Valgrind instrument ignores rounds and iterations settings " + "in pedantic mode" + ) + if not self.instrument_hooks: + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + out = pedantic_options.target(*args, **kwargs) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + return out + + def __codspeed_root_frame__(*args, **kwargs) -> T: + return pedantic_options.target(*args, **kwargs) + + # Warmup + warmup_rounds = max( + pedantic_options.warmup_rounds, 1 if SUPPORTS_PERF_TRAMPOLINE else 0 + ) + for _ in range(warmup_rounds): + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + __codspeed_root_frame__(*args, **kwargs) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + + # Compute the actual result of the function + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + self.instrument_hooks.lib.callgrind_start_instrumentation() + try: + out = __codspeed_root_frame__(*args, **kwargs) + finally: + self.instrument_hooks.lib.callgrind_stop_instrumentation() + self.instrument_hooks.set_executed_benchmark(uri) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + + return out + def report(self, session: Session) -> None: reporter = session.config.pluginmanager.get_plugin("terminalreporter") + assert reporter is not None, "terminalreporter not found" count_suffix = "benchmarked" if self.should_measure else "benchmark tested" reporter.write_sep( "=", diff --git a/src/pytest_codspeed/instruments/walltime.py b/src/pytest_codspeed/instruments/walltime.py index 28f2412..a992477 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/src/pytest_codspeed/instruments/walltime.py @@ -22,7 +22,8 @@ from pytest import Session - from pytest_codspeed.instruments import P, T + from pytest_codspeed.config import PedanticOptions + from pytest_codspeed.instruments import T from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig DEFAULT_WARMUP_TIME_NS = 1_000_000_000 @@ -153,80 +154,6 @@ class Benchmark: stats: BenchmarkStats -def run_benchmark( - instrument_hooks: InstrumentHooks | None, - name: str, - uri: str, - fn: Callable[P, T], - args, - kwargs, - config: BenchmarkConfig, -) -> tuple[Benchmark, T]: - def __codspeed_root_frame__() -> T: - return fn(*args, **kwargs) - - # Compute the actual result of the function - out = __codspeed_root_frame__() - - # Warmup - times_per_round_ns: list[float] = [] - warmup_start = start = perf_counter_ns() - while True: - start = perf_counter_ns() - __codspeed_root_frame__() - end = perf_counter_ns() - times_per_round_ns.append(end - start) - if end - warmup_start > config.warmup_time_ns: - break - - # Round sizing - warmup_mean_ns = mean(times_per_round_ns) - warmup_iters = len(times_per_round_ns) - times_per_round_ns.clear() - iter_per_round = ( - int(ceil(config.min_round_time_ns / warmup_mean_ns)) - if warmup_mean_ns <= config.min_round_time_ns - else 1 - ) - if config.max_rounds is None: - round_time_ns = warmup_mean_ns * iter_per_round - rounds = int(config.max_time_ns / round_time_ns) - else: - rounds = config.max_rounds - rounds = max(1, rounds) - - # Benchmark - iter_range = range(iter_per_round) - run_start = perf_counter_ns() - if instrument_hooks: - instrument_hooks.start_benchmark() - for _ in range(rounds): - start = perf_counter_ns() - for _ in iter_range: - __codspeed_root_frame__() - end = perf_counter_ns() - times_per_round_ns.append(end - start) - - if end - run_start > config.max_time_ns: - # TODO: log something - break - if instrument_hooks: - instrument_hooks.stop_benchmark() - instrument_hooks.set_executed_benchmark(uri) - benchmark_end = perf_counter_ns() - total_time = (benchmark_end - run_start) / 1e9 - - stats = BenchmarkStats.from_list( - times_per_round_ns, - rounds=rounds, - total_time=total_time, - iter_per_round=iter_per_round, - warmup_iters=warmup_iters, - ) - - return Benchmark(name=name, uri=uri, config=config, stats=stats), out - - class WallTimeInstrument(Instrument): instrument = "walltime" instrument_hooks: InstrumentHooks | None @@ -253,26 +180,137 @@ def measure( marker_options: BenchmarkMarkerOptions, name: str, uri: str, - fn: Callable[P, T], - *args: P.args, - **kwargs: P.kwargs, + fn: Callable[..., T], + *args: tuple, + **kwargs: dict[str, Any], ) -> T: - bench, out = run_benchmark( - instrument_hooks=self.instrument_hooks, - name=name, - uri=uri, - fn=fn, - args=args, - kwargs=kwargs, - config=BenchmarkConfig.from_codspeed_config_and_marker_data( - self.config, marker_options - ), + benchmark_config = BenchmarkConfig.from_codspeed_config_and_marker_data( + self.config, marker_options + ) + + def __codspeed_root_frame__() -> T: + return fn(*args, **kwargs) + + # Compute the actual result of the function + out = __codspeed_root_frame__() + + # Warmup + times_per_round_ns: list[float] = [] + warmup_start = start = perf_counter_ns() + while True: + start = perf_counter_ns() + __codspeed_root_frame__() + end = perf_counter_ns() + times_per_round_ns.append(end - start) + if end - warmup_start > benchmark_config.warmup_time_ns: + break + + # Round sizing + warmup_mean_ns = mean(times_per_round_ns) + warmup_iters = len(times_per_round_ns) + times_per_round_ns.clear() + iter_per_round = ( + int(ceil(benchmark_config.min_round_time_ns / warmup_mean_ns)) + if warmup_mean_ns <= benchmark_config.min_round_time_ns + else 1 + ) + if benchmark_config.max_rounds is None: + round_time_ns = warmup_mean_ns * iter_per_round + rounds = int(benchmark_config.max_time_ns / round_time_ns) + else: + rounds = benchmark_config.max_rounds + rounds = max(1, rounds) + + # Benchmark + iter_range = range(iter_per_round) + run_start = perf_counter_ns() + for _ in range(rounds): + start = perf_counter_ns() + for _ in iter_range: + __codspeed_root_frame__() + end = perf_counter_ns() + times_per_round_ns.append(end - start) + + if end - run_start > benchmark_config.max_time_ns: + # TODO: log something + break + benchmark_end = perf_counter_ns() + total_time = (benchmark_end - run_start) / 1e9 + + stats = BenchmarkStats.from_list( + times_per_round_ns, + rounds=rounds, + total_time=total_time, + iter_per_round=iter_per_round, + warmup_iters=warmup_iters, + ) + + self.benchmarks.append( + Benchmark(name=name, uri=uri, config=benchmark_config, stats=stats) + ) + return out + + def measure_pedantic( + self, + marker_options: BenchmarkMarkerOptions, + pedantic_options: PedanticOptions[T], + name: str, + uri: str, + ) -> T: + benchmark_config = BenchmarkConfig.from_codspeed_config_and_marker_data( + self.config, marker_options + ) + + def __codspeed_root_frame__(*args, **kwargs) -> T: + return pedantic_options.target(*args, **kwargs) + + iter_range = range(pedantic_options.iterations) + + # Warmup + for _ in range(pedantic_options.warmup_rounds): + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + for _ in iter_range: + __codspeed_root_frame__(*args, **kwargs) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + + # Benchmark + times_per_round_ns: list[float] = [] + benchmark_start = perf_counter_ns() + for _ in range(pedantic_options.rounds): + start = perf_counter_ns() + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + for _ in iter_range: + __codspeed_root_frame__(*args, **kwargs) + end = perf_counter_ns() + times_per_round_ns.append(end - start) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + + benchmark_end = perf_counter_ns() + total_time = (benchmark_end - benchmark_start) / 1e9 + stats = BenchmarkStats.from_list( + times_per_round_ns, + rounds=pedantic_options.rounds, + total_time=total_time, + iter_per_round=pedantic_options.iterations, + warmup_iters=pedantic_options.warmup_rounds, + ) + + # Compute the actual result of the function + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + out = __codspeed_root_frame__(*args, **kwargs) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + + self.benchmarks.append( + Benchmark(name=name, uri=uri, config=benchmark_config, stats=stats) ) - self.benchmarks.append(bench) return out def report(self, session: Session) -> None: reporter = session.config.pluginmanager.get_plugin("terminalreporter") + assert reporter is not None, "terminalreporter not found" if len(self.benchmarks) == 0: reporter.write_sep( diff --git a/src/pytest_codspeed/plugin.py b/src/pytest_codspeed/plugin.py index 3a20675..16f0f06 100644 --- a/src/pytest_codspeed/plugin.py +++ b/src/pytest_codspeed/plugin.py @@ -14,7 +14,11 @@ import pytest from _pytest.fixtures import FixtureManager -from pytest_codspeed.config import BenchmarkMarkerOptions, CodSpeedConfig +from pytest_codspeed.config import ( + BenchmarkMarkerOptions, + CodSpeedConfig, + PedanticOptions, +) from pytest_codspeed.instruments import ( MeasurementMode, get_instrument_from_mode, @@ -27,12 +31,11 @@ from . import __version__ if TYPE_CHECKING: - from typing import Callable, ParamSpec, TypeVar + from typing import Any, Callable, TypeVar from pytest_codspeed.instruments import Instrument T = TypeVar("T") - P = ParamSpec("P") IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None IS_PYTEST_SPEED_INSTALLED = importlib.util.find_spec("pytest_speed") is not None @@ -137,14 +140,14 @@ def pytest_configure(config: pytest.Config): profile_folder = os.environ.get("CODSPEED_PROFILE_FOLDER") - codspeedconfig = CodSpeedConfig.from_pytest_config(config) + codspeed_config = CodSpeedConfig.from_pytest_config(config) plugin = CodSpeedPlugin( disabled_plugins=tuple(disabled_plugins), is_codspeed_enabled=is_codspeed_enabled, mode=mode, - instrument=instrument(codspeedconfig), - config=codspeedconfig, + instrument=instrument(codspeed_config), + config=codspeed_config, profile_folder=Path(profile_folder) if profile_folder else None, ) config.pluginmanager.register(plugin, PLUGIN_NAME) @@ -235,9 +238,10 @@ def _measure( plugin: CodSpeedPlugin, node: pytest.Item, config: pytest.Config, - fn: Callable[P, T], - *args: P.args, - **kwargs: P.kwargs, + pedantic_options: PedanticOptions | None, + fn: Callable[..., T], + args: tuple[Any, ...], + kwargs: dict[str, Any], ) -> T: marker_options = BenchmarkMarkerOptions.from_pytest_item(node) random.seed(0) @@ -247,7 +251,14 @@ def _measure( gc.disable() try: uri, name = get_git_relative_uri_and_name(node.nodeid, config.rootpath) - return plugin.instrument.measure(marker_options, name, uri, fn, *args, **kwargs) + if pedantic_options is None: + return plugin.instrument.measure( + marker_options, name, uri, fn, *args, **kwargs + ) + else: + return plugin.instrument.measure_pedantic( + marker_options, pedantic_options, name, uri + ) finally: # Ensure GC is re-enabled even if the test failed if is_gc_enabled: @@ -258,11 +269,11 @@ def wrap_runtest( plugin: CodSpeedPlugin, node: pytest.Item, config: pytest.Config, - fn: Callable[P, T], -) -> Callable[P, T]: + fn: Callable[..., T], +) -> Callable[..., T]: @functools.wraps(fn) - def wrapped(*args: P.args, **kwargs: P.kwargs) -> T: - return _measure(plugin, node, config, fn, *args, **kwargs) + def wrapped(*args: tuple, **kwargs: dict[str, Any]) -> T: + return _measure(plugin, node, config, None, fn, args, kwargs) return wrapped @@ -322,17 +333,66 @@ def __init__(self, request: pytest.FixtureRequest): self._plugin = get_plugin(self._config) self._called = False - def __call__(self, target: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T: + def __call__( + self, target: Callable[..., T], *args: tuple, **kwargs: dict[str, Any] + ) -> T: if self._called: raise RuntimeError("The benchmark fixture can only be used once per test") self._called = True if self._plugin.is_codspeed_enabled: return _measure( - self._plugin, self._request.node, self._config, target, *args, **kwargs + self._plugin, + self._request.node, + self._config, + None, + target, + args, + kwargs, ) else: return target(*args, **kwargs) + def pedantic( + self, + target: Callable[..., T], + args: tuple[Any, ...] = (), + kwargs: dict[str, Any] = {}, + setup: Callable | None = None, + teardown: Callable | None = None, + rounds: int = 1, + warmup_rounds: int = 0, + iterations: int = 1, + ): + if self._called: + raise RuntimeError("The benchmark fixture can only be used once per test") + self._called = True + pedantic_options = PedanticOptions( + target=target, + args=args, + kwargs=kwargs, + setup=setup, + teardown=teardown, + rounds=rounds, + warmup_rounds=warmup_rounds, + iterations=iterations, + ) + if self._plugin.is_codspeed_enabled: + return _measure( + self._plugin, + self._request.node, + self._config, + pedantic_options, + target, + args, + kwargs, + ) + else: + args, kwargs = pedantic_options.setup_and_get_args_kwargs() + result = target(*args, **kwargs) + if pedantic_options.teardown is not None: + pedantic_options.teardown(*args, **kwargs) + return result + @pytest.fixture(scope="function") def codspeed_benchmark(request: pytest.FixtureRequest) -> Callable: diff --git a/tests/test_pytest_plugin.py b/tests/test_pytest_plugin.py index 8e7b776..89cd7b1 100644 --- a/tests/test_pytest_plugin.py +++ b/tests/test_pytest_plugin.py @@ -339,6 +339,63 @@ def test_capsys(capsys): result.stdout.no_fnmatch_line("*print to stdout*") result.stderr.no_fnmatch_line("*print to stderr*") + +@pytest.mark.xfail(reason="not supported by pytest-benchmark, see #78") +@pytest.mark.parametrize("mode", [*MeasurementMode]) +def test_stateful_warmup_fixture( + pytester: pytest.Pytester, mode: MeasurementMode +) -> None: + """Test that the stateful warmup works correctly.""" + pytester.makepyfile( + """ + import pytest + + def test_stateful_warmup(benchmark): + has_run = False + + def b(): + nonlocal has_run + assert not has_run, "Benchmark ran multiple times without setup" + has_run = True + + benchmark(b) + """ + ) + result = run_pytest_codspeed_with_mode(pytester, mode) + assert result.ret == 0, "the run should have succeeded" + result.assert_outcomes(passed=1) + + +@pytest.mark.xfail(reason="not supported by pytest-benchmark, see #78") +@pytest.mark.parametrize("mode", [*MeasurementMode]) +def test_stateful_warmup_marker( + pytester: pytest.Pytester, mode: MeasurementMode +) -> None: + """Test that the stateful warmup marker works correctly.""" + pytester.makepyfile( + """ + import pytest + + has_run = False + + @pytest.fixture(autouse=True) + def fixture(): + global has_run + has_run = False + + + @pytest.mark.benchmark + def test_stateful_warmup_marker(): + global has_run + assert not has_run, "Benchmark ran multiple times without setup" + has_run = True + """ + ) + result = run_pytest_codspeed_with_mode(pytester, mode) + assert result.ret == 0, "the run should have succeeded" + result.assert_outcomes(passed=1) + + @pytest.mark.parametrize("mode", [*MeasurementMode]) def test_benchmark_fixture_used_twice( pytester: pytest.Pytester, mode: MeasurementMode @@ -360,3 +417,24 @@ def foo(): ["*RuntimeError: The benchmark fixture can only be used once per test*"] ) + +@pytest.mark.parametrize("mode", [*MeasurementMode]) +def test_benchmark_fixture_used_normal_pedantic( + pytester: pytest.Pytester, mode: MeasurementMode +) -> None: + """Test that using the benchmark fixture twice in a test raises an error.""" + pytester.makepyfile( + """ + def test_benchmark_used_twice(benchmark): + def foo(): + pass + + benchmark(foo) + benchmark.pedantic(foo) + """ + ) + result = run_pytest_codspeed_with_mode(pytester, mode) + assert result.ret == 1, "the run should have failed" + result.stdout.fnmatch_lines( + ["*RuntimeError: The benchmark fixture can only be used once per test*"] + ) diff --git a/tests/test_pytest_plugin_cpu_instrumentation.py b/tests/test_pytest_plugin_cpu_instrumentation.py index d72ca4a..4856ae6 100644 --- a/tests/test_pytest_plugin_cpu_instrumentation.py +++ b/tests/test_pytest_plugin_cpu_instrumentation.py @@ -116,3 +116,84 @@ def test_my_stuff(benchmark, i): result = pytester.runpytest("--codspeed", "-n", "128") assert result.ret == 0, "the run should have succeeded" result.stdout.fnmatch_lines(["*256 passed*"]) + + +def test_valgrind_pedantic_warning(pytester: pytest.Pytester) -> None: + """ + Test that using pedantic mode with Valgrind instrumentation shows a warning about + ignoring rounds and iterations. + """ + pytester.makepyfile( + """ + def test_benchmark_pedantic(benchmark): + def foo(): + return 1 + 1 + + benchmark.pedantic(foo, rounds=10, iterations=100) + """ + ) + result = run_pytest_codspeed_with_mode(pytester, MeasurementMode.Instrumentation) + result.stdout.fnmatch_lines( + [ + "*UserWarning: Valgrind instrument ignores rounds and iterations settings " + "in pedantic mode*" + ] + ) + result.assert_outcomes(passed=1) + + +@skip_without_valgrind +@skip_without_perf_trampoline +def test_benchmark_pedantic_instrumentation( + pytester: pytest.Pytester, codspeed_env +) -> None: + """Test that pedantic mode works with instrumentation mode.""" + pytester.makepyfile( + """ + def test_pedantic_full_features(benchmark): + setup_calls = 0 + teardown_calls = 0 + target_calls = 0 + + def setup(): + nonlocal setup_calls + setup_calls += 1 + return (1, 2), {"c": 3} + + def teardown(a, b, c): + nonlocal teardown_calls + teardown_calls += 1 + assert a == 1 + assert b == 2 + assert c == 3 + + def target(a, b, c): + nonlocal target_calls + target_calls += 1 + assert a == 1 + assert b == 2 + assert c == 3 + return a + b + c + + result = benchmark.pedantic( + target, + setup=setup, + teardown=teardown, + rounds=3, + warmup_rounds=3 + ) + + # Verify the results + # Instrumentation ignores rounds but is called during warmup + assert result == 6 # 1 + 2 + 3 + assert setup_calls == 1 + 3 + assert teardown_calls == 1 + 3 + assert target_calls == 1 + 3 + """ + ) + with codspeed_env(): + result = run_pytest_codspeed_with_mode( + pytester, MeasurementMode.Instrumentation + ) + assert result.ret == 0, "the run should have succeeded" + result.assert_outcomes(passed=1) diff --git a/tests/test_pytest_plugin_walltime.py b/tests/test_pytest_plugin_walltime.py index 7e86ca4..510ab30 100644 --- a/tests/test_pytest_plugin_walltime.py +++ b/tests/test_pytest_plugin_walltime.py @@ -35,3 +35,54 @@ def test_my_stuff(benchmark, inp): "*3 benchmarked*", ] ) + + +def test_benchmark_pedantic_walltime( + pytester: pytest.Pytester, +) -> None: + """Test that pedantic mode works with walltime mode.""" + pytester.makepyfile( + """ + def test_pedantic_full_features(benchmark): + setup_calls = 0 + teardown_calls = 0 + target_calls = 0 + + def setup(): + nonlocal setup_calls + setup_calls += 1 + return (1, 2), {"c": 3} + + def teardown(a, b, c): + nonlocal teardown_calls + teardown_calls += 1 + assert a == 1 + assert b == 2 + assert c == 3 + + def target(a, b, c): + nonlocal target_calls + target_calls += 1 + assert a == 1 + assert b == 2 + assert c == 3 + return a + b + c + + result = benchmark.pedantic( + target, + setup=setup, + teardown=teardown, + rounds=3, + warmup_rounds=1 + ) + + # Verify the results + assert result == 6 # 1 + 2 + 3 + assert setup_calls == 5 # 3 rounds + 1 warmup + 1 calibration + assert teardown_calls == 5 + assert target_calls == 5 + """ + ) + result = run_pytest_codspeed_with_mode(pytester, MeasurementMode.WallTime) + assert result.ret == 0, "the run should have succeeded" + result.assert_outcomes(passed=1) From 7b8c2c9ded80703bf1d1e61944e1e007c03ceda4 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Fri, 6 Jun 2025 14:26:41 +0200 Subject: [PATCH 13/20] =?UTF-8?q?Release=20v4.0.0-beta=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 2 +- src/pytest_codspeed/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4df558b..a818476 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ version = { attr = "pytest_codspeed.__version__" } [tool.bumpver] -current_version = "3.2.0" +current_version = "4.0.0-beta" version_pattern = "MAJOR.MINOR.PATCH[-TAG[NUM]]" commit_message = "Release v{new_version} πŸš€" tag_message = "Release v{new_version} πŸš€" diff --git a/src/pytest_codspeed/__init__.py b/src/pytest_codspeed/__init__.py index 5ce02d7..05e6928 100644 --- a/src/pytest_codspeed/__init__.py +++ b/src/pytest_codspeed/__init__.py @@ -1,6 +1,6 @@ -__version__ = "3.2.0" +__version__ = "4.0.0b0" # We also have the semver version since __version__ is not semver compliant -__semver_version__ = "3.2.0" +__semver_version__ = "4.0.0-beta" from .plugin import BenchmarkFixture From 0eba0c5ce136fb9e1545fb9dd49b88cf29f8508b Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 10 Jun 2025 15:42:17 +0200 Subject: [PATCH 14/20] fix: reenable walltime instrument hooks --- src/pytest_codspeed/instruments/walltime.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/pytest_codspeed/instruments/walltime.py b/src/pytest_codspeed/instruments/walltime.py index a992477..2b01241 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/src/pytest_codspeed/instruments/walltime.py @@ -224,6 +224,8 @@ def __codspeed_root_frame__() -> T: # Benchmark iter_range = range(iter_per_round) run_start = perf_counter_ns() + if self.instrument_hooks: + self.instrument_hooks.start_benchmark() for _ in range(rounds): start = perf_counter_ns() for _ in iter_range: @@ -234,6 +236,9 @@ def __codspeed_root_frame__() -> T: if end - run_start > benchmark_config.max_time_ns: # TODO: log something break + if self.instrument_hooks: + self.instrument_hooks.stop_benchmark() + self.instrument_hooks.set_executed_benchmark(uri) benchmark_end = perf_counter_ns() total_time = (benchmark_end - run_start) / 1e9 @@ -250,7 +255,7 @@ def __codspeed_root_frame__() -> T: ) return out - def measure_pedantic( + def measure_pedantic( # noqa: C901 self, marker_options: BenchmarkMarkerOptions, pedantic_options: PedanticOptions[T], @@ -277,6 +282,8 @@ def __codspeed_root_frame__(*args, **kwargs) -> T: # Benchmark times_per_round_ns: list[float] = [] benchmark_start = perf_counter_ns() + if self.instrument_hooks: + self.instrument_hooks.start_benchmark() for _ in range(pedantic_options.rounds): start = perf_counter_ns() args, kwargs = pedantic_options.setup_and_get_args_kwargs() @@ -286,7 +293,9 @@ def __codspeed_root_frame__(*args, **kwargs) -> T: times_per_round_ns.append(end - start) if pedantic_options.teardown is not None: pedantic_options.teardown(*args, **kwargs) - + if self.instrument_hooks: + self.instrument_hooks.stop_benchmark() + self.instrument_hooks.set_executed_benchmark(uri) benchmark_end = perf_counter_ns() total_time = (benchmark_end - benchmark_start) / 1e9 stats = BenchmarkStats.from_list( From e49de52abe56e5757a84e42c624e799e63d6b82a Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 10 Jun 2025 17:02:40 +0200 Subject: [PATCH 15/20] =?UTF-8?q?Release=20v4.0.0-beta1=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 2 +- src/pytest_codspeed/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a818476..69616c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ version = { attr = "pytest_codspeed.__version__" } [tool.bumpver] -current_version = "4.0.0-beta" +current_version = "4.0.0-beta1" version_pattern = "MAJOR.MINOR.PATCH[-TAG[NUM]]" commit_message = "Release v{new_version} πŸš€" tag_message = "Release v{new_version} πŸš€" diff --git a/src/pytest_codspeed/__init__.py b/src/pytest_codspeed/__init__.py index 05e6928..69b97a8 100644 --- a/src/pytest_codspeed/__init__.py +++ b/src/pytest_codspeed/__init__.py @@ -1,6 +1,6 @@ -__version__ = "4.0.0b0" +__version__ = "4.0.0b1" # We also have the semver version since __version__ is not semver compliant -__semver_version__ = "4.0.0-beta" +__semver_version__ = "4.0.0-beta1" from .plugin import BenchmarkFixture From 3181f6d4ab9ca1ad2b4524b49d90b759ea397292 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 10 Jun 2025 21:51:38 +0200 Subject: [PATCH 16/20] chore: improve reliability of perf trampoline compatibility checks --- src/pytest_codspeed/instruments/hooks/__init__.py | 4 ++-- src/pytest_codspeed/instruments/valgrind.py | 4 +--- src/pytest_codspeed/instruments/walltime.py | 9 ++++++++- src/pytest_codspeed/plugin.py | 13 ++++--------- src/pytest_codspeed/utils.py | 9 +++++++++ tests/conftest.py | 2 +- 6 files changed, 25 insertions(+), 16 deletions(-) diff --git a/src/pytest_codspeed/instruments/hooks/__init__.py b/src/pytest_codspeed/instruments/hooks/__init__.py index a69489c..98e32fa 100644 --- a/src/pytest_codspeed/instruments/hooks/__init__.py +++ b/src/pytest_codspeed/instruments/hooks/__init__.py @@ -5,11 +5,11 @@ import warnings from typing import TYPE_CHECKING +from pytest_codspeed.utils import SUPPORTS_PERF_TRAMPOLINE + if TYPE_CHECKING: from .dist_instrument_hooks import InstrumentHooksPointer, LibType -SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) - class InstrumentHooks: """Zig library wrapper class providing benchmark measurement functionality.""" diff --git a/src/pytest_codspeed/instruments/valgrind.py b/src/pytest_codspeed/instruments/valgrind.py index 9f135f4..71b1542 100644 --- a/src/pytest_codspeed/instruments/valgrind.py +++ b/src/pytest_codspeed/instruments/valgrind.py @@ -1,12 +1,12 @@ from __future__ import annotations -import sys import warnings from typing import TYPE_CHECKING from pytest_codspeed import __semver_version__ from pytest_codspeed.instruments import Instrument from pytest_codspeed.instruments.hooks import InstrumentHooks +from pytest_codspeed.utils import SUPPORTS_PERF_TRAMPOLINE if TYPE_CHECKING: from typing import Any, Callable @@ -17,8 +17,6 @@ from pytest_codspeed.instruments import T from pytest_codspeed.plugin import BenchmarkMarkerOptions, CodSpeedConfig -SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12) - class ValgrindInstrument(Instrument): instrument = "valgrind" diff --git a/src/pytest_codspeed/instruments/walltime.py b/src/pytest_codspeed/instruments/walltime.py index 2b01241..2423240 100644 --- a/src/pytest_codspeed/instruments/walltime.py +++ b/src/pytest_codspeed/instruments/walltime.py @@ -16,6 +16,7 @@ from pytest_codspeed import __semver_version__ from pytest_codspeed.instruments import Instrument from pytest_codspeed.instruments.hooks import InstrumentHooks +from pytest_codspeed.utils import SUPPORTS_PERF_TRAMPOLINE if TYPE_CHECKING: from typing import Any, Callable @@ -173,7 +174,13 @@ def __init__(self, config: CodSpeedConfig) -> None: self.benchmarks: list[Benchmark] = [] def get_instrument_config_str_and_warns(self) -> tuple[str, list[str]]: - return f"mode: walltime, timer_resolution: {TIMER_RESOLUTION_NS:.1f}ns", [] + config_str = ( + f"mode: walltime, " + f"callgraph: " + f"{'enabled' if SUPPORTS_PERF_TRAMPOLINE else 'not supported'}, " + f"timer_resolution: {TIMER_RESOLUTION_NS:.1f}ns" + ) + return config_str, [] def measure( self, diff --git a/src/pytest_codspeed/plugin.py b/src/pytest_codspeed/plugin.py index 16f0f06..24e0401 100644 --- a/src/pytest_codspeed/plugin.py +++ b/src/pytest_codspeed/plugin.py @@ -2,7 +2,6 @@ import functools import gc -import importlib.util import json import os import random @@ -19,11 +18,11 @@ CodSpeedConfig, PedanticOptions, ) -from pytest_codspeed.instruments import ( - MeasurementMode, - get_instrument_from_mode, -) +from pytest_codspeed.instruments import MeasurementMode, get_instrument_from_mode from pytest_codspeed.utils import ( + BEFORE_PYTEST_8_1_1, + IS_PYTEST_BENCHMARK_INSTALLED, + IS_PYTEST_SPEED_INSTALLED, get_environment_metadata, get_git_relative_uri_and_name, ) @@ -37,10 +36,6 @@ T = TypeVar("T") -IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None -IS_PYTEST_SPEED_INSTALLED = importlib.util.find_spec("pytest_speed") is not None -BEFORE_PYTEST_8_1_1 = pytest.version_tuple < (8, 1, 1) - @pytest.hookimpl(trylast=True) def pytest_addoption(parser: pytest.Parser): diff --git a/src/pytest_codspeed/utils.py b/src/pytest_codspeed/utils.py index 221505b..6c24143 100644 --- a/src/pytest_codspeed/utils.py +++ b/src/pytest_codspeed/utils.py @@ -1,10 +1,13 @@ from __future__ import annotations +import importlib.util import os import sys import sysconfig from pathlib import Path +import pytest + from pytest_codspeed import __semver_version__ if sys.version_info < (3, 10): @@ -13,6 +16,12 @@ import importlib.metadata as importlib_metadata +IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None +IS_PYTEST_SPEED_INSTALLED = importlib.util.find_spec("pytest_speed") is not None +BEFORE_PYTEST_8_1_1 = pytest.version_tuple < (8, 1, 1) +SUPPORTS_PERF_TRAMPOLINE = sysconfig.get_config_var("PY_HAVE_PERF_TRAMPOLINE") == 1 + + def get_git_relative_path(abs_path: Path) -> Path: """Get the path relative to the git root directory. If the path is not inside a git repository, the original path itself is returned. diff --git a/tests/conftest.py b/tests/conftest.py index aa2d077..e8a5625 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,13 +10,13 @@ import pytest from pytest_codspeed.instruments import MeasurementMode +from pytest_codspeed.utils import IS_PYTEST_BENCHMARK_INSTALLED if TYPE_CHECKING: from _pytest.pytester import RunResult pytest_plugins = ["pytester"] -IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None skip_without_pytest_benchmark = pytest.mark.skipif( not IS_PYTEST_BENCHMARK_INSTALLED, reason="pytest_benchmark not installed" ) From 8b1fb2f7d0e0e02041019e73ae966011497bdc46 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 17 Jun 2025 20:55:22 +0200 Subject: [PATCH 17/20] chore: link to the documentation --- README.md | 6 +++--- pyproject.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2aa5264..939c10e 100644 --- a/README.md +++ b/README.md @@ -11,11 +11,11 @@ Pytest plugin to create CodSpeed benchmarks -## Requirements +--- -**Python**: 3.9 and later +**Documentation**: https://codspeed.io/docs/reference/pytest-codspeed -**pytest**: any recent version +--- ## Installation diff --git a/pyproject.toml b/pyproject.toml index 69616c5..a7abd3d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project.urls] Homepage = "https://codspeed.io/" -Documentation = "https://docs.codspeed.io/" +Documentation = "https://codspeed.io/docs/reference/pytest-codspeed" Source = "https://github.com/CodSpeedHQ/pytest-codspeed" [project] From a4e5901701a572f4c4745e807fdd45df75a2853e Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Tue, 17 Jun 2025 21:07:38 +0200 Subject: [PATCH 18/20] feat: update readme --- README.md | 73 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 939c10e..d16e445 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,9 @@ pip install pytest-codspeed ### Creating benchmarks -Creating benchmarks with `pytest-codspeed` is compatible with the standard `pytest-benchmark` API. So if you already have benchmarks written with it, you can start using `pytest-codspeed` right away. +In a nutshell, `pytest-codspeed` offers two approaches to create performance benchmarks that integrate seamlessly with your existing test suite. -#### Marking a whole test function as a benchmark with `pytest.mark.benchmark` +Use `@pytest.mark.benchmark` to measure entire test functions automatically: ```python import pytest @@ -37,55 +37,53 @@ from statistics import median @pytest.mark.benchmark def test_median_performance(): - return median([1, 2, 3, 4, 5]) + input = [1, 2, 3, 4, 5] + output = sum(i**2 for i in input) + assert output == 55 ``` -#### Benchmarking selected lines of a test function with the `benchmark` fixture +Since this measure the entire function, you might want to use the `benchmark` fixture for precise control over what code gets measured: ```python -import pytest -from statistics import mean - def test_mean_performance(benchmark): - # Precompute some data useful for the benchmark but that should not be - # included in the benchmark time data = [1, 2, 3, 4, 5] + # Only the function call is measured + result = benchmark(lambda: sum(i**2 for i in data)) + assert result == 55 +``` - # Benchmark the execution of the function - benchmark(lambda: mean(data)) - +Check out the [full documentation](https://codspeed.io/docs/reference/pytest-codspeed) for more details. -def test_mean_and_median_performance(benchmark): - # Precompute some data useful for the benchmark but that should not be - # included in the benchmark time - data = [1, 2, 3, 4, 5] +### Testing the benchmarks locally - # Benchmark the execution of the function: - # The `@benchmark` decorator will automatically call the function and - # measure its execution - @benchmark - def bench(): - mean(data) - median(data) -``` +If you want to run the benchmarks tests locally, you can use the `--codspeed` pytest flag: -### Running benchmarks +```sh +$ pytest tests/ --codspeed +============================= test session starts ==================== +platform darwin -- Python 3.13.0, pytest-7.4.4, pluggy-1.5.0 +codspeed: 3.0.0 (enabled, mode: walltime, timer_resolution: 41.7ns) +rootdir: /home/user/codspeed-test, configfile: pytest.ini +plugins: codspeed-3.0.0 +collected 1 items -#### Testing the benchmarks locally +tests/test_sum_squares.py . [ 100%] -If you want to run only the benchmarks tests locally, you can use the `--codspeed` pytest flag: - -```shell -pytest tests/ --codspeed + Benchmark Results +┏━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━┓ +┃ Benchmark ┃ Time (best) ┃ Rel. StdDev ┃ Run time ┃ Iters ┃ +┣━━━━━━━━━━━━━━━━╋━━━━━━━━━━━━━╋━━━━━━━━━━━━━╋━━━━━━━━━━╋━━━━━━━━┫ +┃test_sum_squares┃ 1,873ns ┃ 4.8% ┃ 3.00s ┃ 66,930 ┃ +┗━━━━━━━━━━━━━━━━┻━━━━━━━━━━━━━┻━━━━━━━━━━━━━┻━━━━━━━━━━┻━━━━━━━━┛ +=============================== 1 benchmarked ======================== +=============================== 1 passed in 4.12s ==================== ``` -> **Note:** Running `pytest-codspeed` locally will not produce any performance reporting. It's only useful for making sure that your benchmarks are working as expected. If you want to get performance reporting, you should run the benchmarks in your CI. - -#### In your CI +### Running the benchmarks in your CI You can use the [CodSpeedHQ/action](https://github.com/CodSpeedHQ/action) to run the benchmarks in Github Actions and upload the results to CodSpeed. -Example workflow: +Here is an example of a GitHub Actions workflow that runs the benchmarks and reports the results to CodSpeed on every push to the `main` branch and every pull request: ```yaml name: CodSpeed @@ -95,6 +93,9 @@ on: branches: - "main" # or "master" pull_request: + # `workflow_dispatch` allows CodSpeed to trigger backtest + # performance analysis in order to generate initial data. + workflow_dispatch: jobs: benchmarks: @@ -104,9 +105,11 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: "3.12" + python-version: "3.13" + - name: Install dependencies run: pip install -r requirements.txt + - name: Run benchmarks uses: CodSpeedHQ/action@v3 with: From f97b02d43591aa4cff2bfe8408beaf92bad6d426 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Thu, 10 Jul 2025 10:32:40 +0200 Subject: [PATCH 19/20] chore: remove pre-releases from git-cliff changelog --- cliff.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cliff.toml b/cliff.toml index 287a5c8..5e9bba6 100644 --- a/cliff.toml +++ b/cliff.toml @@ -72,6 +72,8 @@ postprocessors = [ # output = "test.md" [git] +# ignore pre-release tags +ignore_tags = "rc|alpha|beta" # parse the commits based on https://www.conventionalcommits.org conventional_commits = true # filter out the commits that are not conventional From f3b85bc79e9659936f549f06d468ebc7276bbd1a Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Thu, 10 Jul 2025 10:32:52 +0200 Subject: [PATCH 20/20] =?UTF-8?q?Release=20v4.0.0=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGELOG.md | 306 +++++++++----------------------- pyproject.toml | 2 +- src/pytest_codspeed/__init__.py | 4 +- 3 files changed, 85 insertions(+), 227 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50208fc..188f08e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,125 +5,26 @@ -## [3.2.0] - 2025-01-31 +## [4.0.0] - 2025-07-10 ### πŸš€ Features +- Update readme by @art049 +- Support pytest-benchmark's pedantic API by @art049 in [#81](https://github.com/CodSpeedHQ/pytest-codspeed/pull/81) +- Make sure the benchmark fixture can only be called once per bench by @art049 +- Support marker attributes to customize the walltime execution by @art049 in [#80](https://github.com/CodSpeedHQ/pytest-codspeed/pull/80) +- Use instrument hooks by @not-matthias +- Add instrument-hooks native module by @not-matthias - Increase the min round time to a bigger value (+/- 1ms) by @art049 - Add benchmarks-walltime job to run additional performance benchmarks by @art049 in [#65](https://github.com/CodSpeedHQ/pytest-codspeed/pull/65) - Fix the random seed while measuring with instruments by @art049 in [#48](https://github.com/CodSpeedHQ/pytest-codspeed/pull/48) - -### πŸ› Bug Fixes -- Use time per iteration instead of total round time in stats by @art049 - -### πŸ—οΈ Refactor -- Replace hardcoded outlier factor for improved readability by @art049 in [#67](https://github.com/CodSpeedHQ/pytest-codspeed/pull/67) - -### βš™οΈ Internals -- Fix self-dependency by @adriencaccia in [#66](https://github.com/CodSpeedHQ/pytest-codspeed/pull/66) -- Fix uv version in CI by @adriencaccia - - -## [3.1.2] - 2025-01-09 - -### πŸ› Bug Fixes -- Update package_data to include header and source files for valgrind wrapper by @art049 in [#64](https://github.com/CodSpeedHQ/pytest-codspeed/pull/64) - - -## [3.1.1] - 2025-01-07 - -### βš™οΈ Internals -- Fix tag num with bumpver by @art049 in [#61](https://github.com/CodSpeedHQ/pytest-codspeed/pull/61) -- Update uv lock before release by @art049 -- Add a py3-none-any fallback wheel by @art049 - - -## [3.1.0] - 2024-12-09 - -### πŸ—οΈ Refactor -- Remove the scripted semver generation by @art049 - -### βš™οΈ Internals -- Fix typo in cibuildwheel config by @art049 in [#57](https://github.com/CodSpeedHQ/pytest-codspeed/pull/57) - - -## [3.1.0-beta] - 2024-12-06 - -### πŸš€ Features - Check buildability and fallback when build doesn't work by @art049 - Compile the callgrind wrapper at build time by @art049 - -### πŸ› Bug Fixes -- Allow build on arm64 by @art049 - -### βš™οΈ Internals -- Build wheels with cibuildwheel by @art049 -- Allow forcing integrated tests by @art049 -- Fix release script by @art049 -- Use bumpver to manage versions by @art049 -- Add a changelog by @art049 -- Force native extension build in CI by @art049 -- Updated matrix release workflow by @art049 -- Use a common python version in the codspeed job by @art049 -- Fix the codspeed workflow by @art049 -- Use uv in CI by @art049 -- Commit uv lock file by @art049 - - -## [3.0.0] - 2024-10-29 - -### πŸ› Bug Fixes -- Fix compatibility with pytest-benchmark 5.0.0 by @art049 in [#54](https://github.com/CodSpeedHQ/pytest-codspeed/pull/54) - -### βš™οΈ Internals -- Drop support for python3.8 by @art049 -- Expose type information (#53) by @Dreamsorcerer in [#53](https://github.com/CodSpeedHQ/pytest-codspeed/pull/53) -- Run the CI with ubuntu 24.04 by @art049 -- Improve naming in workflow examples by @art049 -- Bump actions/checkout to v4 (#47) by @fargito in [#47](https://github.com/CodSpeedHQ/pytest-codspeed/pull/47) - - -## [3.0.0b4] - 2024-09-27 - -### πŸš€ Features - Send more outlier data by @art049 - -### πŸ› Bug Fixes -- Fix display of parametrized tests by @art049 -- Reenable gc logic by @art049 - -### πŸ§ͺ Testing -- Add benches for various syscalls by @art049 - - -## [3.0.0b3] - 2024-09-26 - -### πŸš€ Features - Also save the lower and upper fences in the json data by @art049 in [#46](https://github.com/CodSpeedHQ/pytest-codspeed/pull/46) - -### πŸ§ͺ Testing -- Refactor the algorithm benches using parametrization and add benches on bit_manipulation by @art049 - - -## [3.0.0b2] - 2024-09-24 - -### πŸš€ Features - Also save the q1 and q3 in the json data by @art049 in [#45](https://github.com/CodSpeedHQ/pytest-codspeed/pull/45) - Add the --codspeed-max-time flag by @art049 - - -## [3.0.0b1] - 2024-09-20 - -### πŸš€ Features - Send the semver version to cospeed instead of the PEP440 one by @art049 in [#44](https://github.com/CodSpeedHQ/pytest-codspeed/pull/44) - Also store the semver version by @art049 - -### πŸ§ͺ Testing -- Add benches for TheAlgorithms/backtracking by @art049 in [#43](https://github.com/CodSpeedHQ/pytest-codspeed/pull/43) - - -## [3.0.0b0] - 2024-09-18 - -### πŸš€ Features - Improve table style when displaying results by @art049 in [#41](https://github.com/CodSpeedHQ/pytest-codspeed/pull/41) - Add the total bench time to the collected stats by @art049 - Add configuration and split tests between instruments by @art049 @@ -131,95 +32,110 @@ - Implement the walltime instrument by @art049 - Add bench of various python noop by @art049 - Avoid overriding pytest's default protocol (#32) by @kenodegard in [#32](https://github.com/CodSpeedHQ/pytest-codspeed/pull/32) +- Support pytest 8.1.1 by @art049 +- Avoid concurrent wrapper builds by @art049 +- Add a test for pytest-xdist compatibility by @art049 +- Release the package from the CI with trusted provider by @art049 +- Add a return type to the benchmark fixture by @art049 in [#13](https://github.com/CodSpeedHQ/pytest-codspeed/pull/13) +- Add support for returning values (#12) by @patrick91 in [#12](https://github.com/CodSpeedHQ/pytest-codspeed/pull/12) +- Warmup performance map generation by @art049 +- Add some details about the callgraph generation status in the header by @art049 +- Test that perf maps are generated by @art049 +- Add a local test matrix with hatch by @art049 +- Test that benchmark selection with -k works by @art049 +- Add support for CPython3.12 and perf trampoline by @art049 +- Add introspection benchmarks by @art049 in [#9](https://github.com/CodSpeedHQ/pytest-codspeed/pull/9) +- Add library metadata in the profile output by @art049 in [#5](https://github.com/CodSpeedHQ/pytest-codspeed/pull/5) +- Allow running along with pytest-benchmarks by @art049 ### πŸ› Bug Fixes +- Reenable walltime instrument hooks by @art049 in [#82](https://github.com/CodSpeedHQ/pytest-codspeed/pull/82) +- Fix native library typing by @art049 +- Use time per iteration instead of total round time in stats by @art049 +- Update package_data to include header and source files for valgrind wrapper by @art049 in [#64](https://github.com/CodSpeedHQ/pytest-codspeed/pull/64) +- Allow build on arm64 by @art049 +- Fix compatibility with pytest-benchmark 5.0.0 by @art049 in [#54](https://github.com/CodSpeedHQ/pytest-codspeed/pull/54) +- Fix display of parametrized tests by @art049 +- Reenable gc logic by @art049 - Use importlib_metadata to keep backward compatibility by @art049 - Properly decide the mode depending on our env variable spec by @art049 - Disable pytest-speed when installed and codspeed is enabled by @art049 +- Loosen runtime requirements (#21) by @edgarrmondragon in [#21](https://github.com/CodSpeedHQ/pytest-codspeed/pull/21) +- Fix xdist test output assertion by @art049 +- Fix relative git path when using working-directory by @art049 in [#15](https://github.com/CodSpeedHQ/pytest-codspeed/pull/15) +- Fix typo in release.yml (#14) by @art049 in [#14](https://github.com/CodSpeedHQ/pytest-codspeed/pull/14) +- Fix setuptools installation with python3.12 by @art049 +- Support benchmark.extra_info parameters on the fixture by @art049 in [#10](https://github.com/CodSpeedHQ/pytest-codspeed/pull/10) +- Filter out pytest-benchmark warnings in the tests by @art049 +- Support kwargs with the benchmark fixture by @art049 in [#4](https://github.com/CodSpeedHQ/pytest-codspeed/pull/4) +- Avoid wrapping the callable to maintain existing results by @art049 +- Disable automatic garbage collection to increase stability by @art049 in [#2](https://github.com/CodSpeedHQ/pytest-codspeed/pull/2) +- Update readme by @art049 +- Fix the release script by @art049 +- Make the release script executable by @art049 +- Match the test output in any order by @art049 ### πŸ—οΈ Refactor +- Replace hardcoded outlier factor for improved readability by @art049 in [#67](https://github.com/CodSpeedHQ/pytest-codspeed/pull/67) +- Remove the scripted semver generation by @art049 - Differentiate the mode from the underlying instrument by @art049 - Move the instrumentation wrapper directly in the instrument by @art049 - Change Instrumentation to CPUInstrumentation by @art049 - Create an abstraction for each instrument by @art049 +- Use the pytest_run_protocol hook for better exec control by @art049 +- Manage compatibility env in the conftest by @art049 ### πŸ“š Documentation - Update action version in the CI workflow configuration (#39) by @frgfm in [#39](https://github.com/CodSpeedHQ/pytest-codspeed/pull/39) - Bump action versions in README by @adriencaccia ### πŸ§ͺ Testing +- Add benches from the documentation's getting started by @art049 in [#71](https://github.com/CodSpeedHQ/pytest-codspeed/pull/71) +- Add simple python benches by @art049 +- Add benches for various syscalls by @art049 +- Refactor the algorithm benches using parametrization and add benches on bit_manipulation by @art049 +- Add benches for TheAlgorithms/backtracking by @art049 in [#43](https://github.com/CodSpeedHQ/pytest-codspeed/pull/43) - Add benches for TheAlgorithms/audio_filters by @art049 in [#42](https://github.com/CodSpeedHQ/pytest-codspeed/pull/42) ### βš™οΈ Internals +- Remove pre-releases from git-cliff changelog +- Link to the documentation by @art049 +- Improve reliability of perf trampoline compatibility checks by @art049 +- Bump ruff by @art049 +- Update release workflow to include submodules by @art049 in [#79](https://github.com/CodSpeedHQ/pytest-codspeed/pull/79) +- Remove valgrind wrapper by @not-matthias +- Update apt before installing packages by @art049 +- Fix self-dependency by @adriencaccia in [#66](https://github.com/CodSpeedHQ/pytest-codspeed/pull/66) +- Fix uv version in CI by @adriencaccia +- Fix tag num with bumpver by @art049 in [#61](https://github.com/CodSpeedHQ/pytest-codspeed/pull/61) +- Update uv lock before release by @art049 +- Add a py3-none-any fallback wheel by @art049 +- Fix typo in cibuildwheel config by @art049 in [#57](https://github.com/CodSpeedHQ/pytest-codspeed/pull/57) +- Build wheels with cibuildwheel by @art049 +- Allow forcing integrated tests by @art049 +- Fix release script by @art049 +- Use bumpver to manage versions by @art049 +- Add a changelog by @art049 +- Force native extension build in CI by @art049 +- Updated matrix release workflow by @art049 +- Use a common python version in the codspeed job by @art049 +- Fix the codspeed workflow by @art049 +- Use uv in CI by @art049 +- Commit uv lock file by @art049 +- Drop support for python3.8 by @art049 +- Expose type information (#53) by @Dreamsorcerer in [#53](https://github.com/CodSpeedHQ/pytest-codspeed/pull/53) +- Run the CI with ubuntu 24.04 by @art049 +- Improve naming in workflow examples by @art049 +- Bump actions/checkout to v4 (#47) by @fargito in [#47](https://github.com/CodSpeedHQ/pytest-codspeed/pull/47) - Add a test on the walltime instrument by @art049 - Fix utils test using a fake git repo by @art049 - Update readme by @art049 - Support python 3.13 and drop 3.7 by @art049 in [#40](https://github.com/CodSpeedHQ/pytest-codspeed/pull/40) - Add TCH, FA, and UP to ruff lints (#29) by @kenodegard in [#29](https://github.com/CodSpeedHQ/pytest-codspeed/pull/29) - - -## [2.2.1] - 2024-03-19 - -### πŸš€ Features -- Support pytest 8.1.1 by @art049 - -### πŸ› Bug Fixes -- Loosen runtime requirements (#21) by @edgarrmondragon in [#21](https://github.com/CodSpeedHQ/pytest-codspeed/pull/21) - -### βš™οΈ Internals - Add all-checks job to CI workflow by @art049 in [#28](https://github.com/CodSpeedHQ/pytest-codspeed/pull/28) - Switch from black to ruff format by @art049 - Update action version in README.md by @adriencaccia - Add codspeed badge to the readme by @art049 - - -## [2.2.0] - 2023-09-01 - -### πŸš€ Features -- Avoid concurrent wrapper builds by @art049 -- Add a test for pytest-xdist compatibility by @art049 - -### πŸ› Bug Fixes -- Fix xdist test output assertion by @art049 - - -## [2.1.0] - 2023-07-27 - -### πŸ› Bug Fixes -- Fix relative git path when using working-directory by @art049 in [#15](https://github.com/CodSpeedHQ/pytest-codspeed/pull/15) -- Fix typo in release.yml (#14) by @art049 in [#14](https://github.com/CodSpeedHQ/pytest-codspeed/pull/14) - - -## [2.0.1] - 2023-07-22 - -### πŸš€ Features -- Release the package from the CI with trusted provider by @art049 -- Add a return type to the benchmark fixture by @art049 in [#13](https://github.com/CodSpeedHQ/pytest-codspeed/pull/13) -- Add support for returning values (#12) by @patrick91 in [#12](https://github.com/CodSpeedHQ/pytest-codspeed/pull/12) - -### πŸ› Bug Fixes -- Fix setuptools installation with python3.12 by @art049 - - -## [2.0.0] - 2023-07-04 - -### πŸš€ Features -- Warmup performance map generation by @art049 -- Add some details about the callgraph generation status in the header by @art049 -- Test that perf maps are generated by @art049 -- Add a local test matrix with hatch by @art049 -- Test that benchmark selection with -k works by @art049 -- Add support for CPython3.12 and perf trampoline by @art049 -- Add introspection benchmarks by @art049 in [#9](https://github.com/CodSpeedHQ/pytest-codspeed/pull/9) - -### πŸ› Bug Fixes -- Support benchmark.extra_info parameters on the fixture by @art049 in [#10](https://github.com/CodSpeedHQ/pytest-codspeed/pull/10) -- Filter out pytest-benchmark warnings in the tests by @art049 - -### πŸ—οΈ Refactor -- Use the pytest_run_protocol hook for better exec control by @art049 - -### βš™οΈ Internals - Separate the benchmark workflow by @art049 in [#8](https://github.com/CodSpeedHQ/pytest-codspeed/pull/8) - Bump version to 1.3.0 to trigger the callgraph generation by @art049 - Reuse same test code in the tests by @art049 @@ -229,70 +145,12 @@ - Restructure dev dependencies by @art049 - Replace isort by ruff by @art049 in [#11](https://github.com/CodSpeedHQ/pytest-codspeed/pull/11) - Add discord badge in the readme by @art049 - - -## [1.2.2] - 2022-12-02 - -### πŸš€ Features -- Add library metadata in the profile output by @art049 in [#5](https://github.com/CodSpeedHQ/pytest-codspeed/pull/5) - - -## [1.2.1] - 2022-11-28 - -### πŸ› Bug Fixes -- Support kwargs with the benchmark fixture by @art049 in [#4](https://github.com/CodSpeedHQ/pytest-codspeed/pull/4) - - -## [1.2.0] - 2022-11-22 - -### πŸ› Bug Fixes -- Avoid wrapping the callable to maintain existing results by @art049 -- Disable automatic garbage collection to increase stability by @art049 in [#2](https://github.com/CodSpeedHQ/pytest-codspeed/pull/2) -- Update readme by @art049 - -### βš™οΈ Internals - Update readme by @art049 - - -## [1.1.0] - 2022-11-10 - -### πŸš€ Features -- Allow running along with pytest-benchmarks by @art049 - -### πŸ› Bug Fixes -- Fix the release script by @art049 -- Make the release script executable by @art049 -- Match the test output in any order by @art049 - -### πŸ—οΈ Refactor -- Manage compatibility env in the conftest by @art049 - -### βš™οΈ Internals - Add a pytest-benchmark compatibility test by @art049 in [#1](https://github.com/CodSpeedHQ/pytest-codspeed/pull/1) - Add more details on the pytest run by @art049 - Continue running on matrix item error by @art049 - Add a CI configuration with pytest-benchmark installed by @art049 -[3.2.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.1.2..v3.2.0 -[3.1.2]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.1.1..v3.1.2 -[3.1.1]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.1.0..v3.1.1 -[3.1.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.1.0-beta..v3.1.0 -[3.1.0-beta]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.0.0..v3.1.0-beta -[3.0.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.0.0b4..v3.0.0 -[3.0.0b4]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.0.0b3..v3.0.0b4 -[3.0.0b3]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.0.0b2..v3.0.0b3 -[3.0.0b2]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.0.0b1..v3.0.0b2 -[3.0.0b1]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v3.0.0b0..v3.0.0b1 -[3.0.0b0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v2.2.1..v3.0.0b0 -[2.2.1]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v2.2.0..v2.2.1 -[2.2.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v2.1.0..v2.2.0 -[2.1.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v2.0.1..v2.1.0 -[2.0.1]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v2.0.0..v2.0.1 -[2.0.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v1.2.2..v2.0.0 -[1.2.2]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v1.2.1..v1.2.2 -[1.2.1]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v1.2.0..v1.2.1 -[1.2.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v1.1.0..v1.2.0 -[1.1.0]: https://github.com/CodSpeedHQ/pytest-codspeed/compare/v1.0.4..v1.1.0 diff --git a/pyproject.toml b/pyproject.toml index a7abd3d..7d1ed88 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ version = { attr = "pytest_codspeed.__version__" } [tool.bumpver] -current_version = "4.0.0-beta1" +current_version = "4.0.0" version_pattern = "MAJOR.MINOR.PATCH[-TAG[NUM]]" commit_message = "Release v{new_version} πŸš€" tag_message = "Release v{new_version} πŸš€" diff --git a/src/pytest_codspeed/__init__.py b/src/pytest_codspeed/__init__.py index 69b97a8..27e4f87 100644 --- a/src/pytest_codspeed/__init__.py +++ b/src/pytest_codspeed/__init__.py @@ -1,6 +1,6 @@ -__version__ = "4.0.0b1" +__version__ = "4.0.0" # We also have the semver version since __version__ is not semver compliant -__semver_version__ = "4.0.0-beta1" +__semver_version__ = "4.0.0" from .plugin import BenchmarkFixture