diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e4ba8e8..90e27ff 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - name: Set up Python 3.9
+ - name: Set up Python 3.11
uses: actions/setup-python@v2
with:
- python-version: "3.9"
- - uses: pre-commit/action@v2.0.0
+ python-version: "3.11"
+ - uses: pre-commit/action@v3.0.0
with:
extra_args: --all-files
@@ -37,6 +37,7 @@ jobs:
- "3.9"
- "3.10"
- "3.11"
+ - "3.12.0-beta.1"
steps:
- uses: actions/checkout@v2
@@ -51,6 +52,6 @@ jobs:
run: pip install .[dev]
- if: matrix.config == 'pytest-benchmark'
name: Install pytest-benchmark to test compatibility
- run: pip install pytest-benchmark~=4.0.0 py
+ run: pip install pytest-benchmark~=4.0.0
- name: Run tests
run: pytest -vs
diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml
new file mode 100644
index 0000000..a633d95
--- /dev/null
+++ b/.github/workflows/codspeed.yml
@@ -0,0 +1,23 @@
+name: benchmarks
+on:
+ push:
+ branches: [master]
+ pull_request:
+ branches: [master]
+ workflow_dispatch:
+
+jobs:
+ benchmarks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.12.0-beta.3"
+ - name: Install local version of pytest-codspeed
+ run: pip install .
+ - name: Run benchmarks
+ uses: CodSpeedHQ/action@main
+ with:
+ run: pytest tests/benchmarks/ --codspeed
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 6081540..0f71438 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -12,15 +12,11 @@ repos:
rev: 22.3.0
hooks:
- id: black
- - repo: https://github.com/timothycrosley/isort.git
- rev: "5.8.0"
- hooks:
- - id: isort
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v0.961
+ rev: v1.3.0
hooks:
- id: mypy
- repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.100
+ rev: v0.0.275
hooks:
- id: ruff
diff --git a/README.md b/README.md
index 40cb55f..be3d3b3 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,8 @@
-
+
+[](https://discord.com/invite/MxpaCfKSqF)
Pytest plugin to create CodSpeed benchmarks
diff --git a/pyproject.toml b/pyproject.toml
index 5b729dd..4bd4f4e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,37 +28,49 @@ classifiers = [
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Testing",
"Topic :: System :: Benchmark",
"Topic :: Utilities",
"Typing :: Typed",
]
-dependencies = ["cffi ~= 1.15.1", "pytest>=3.8"]
+dependencies = [
+ "cffi ~= 1.15.1",
+ "pytest>=3.8",
+ "setuptools ~= 67.8.0; python_version >= '3.12'", # FIXME: remove when cffi supports directly python 3.12
+]
[project.optional-dependencies]
-dev = [
- "hatchling ~= 1.11.1",
- "black ~= 22.3.0",
- "isort ~=5.8.0",
- "flake8 ~= 5.0.4",
- "mypy ~= 0.961",
- "pytest ~= 7.0",
- "pytest-cov ~= 4.0.0",
- "ruff ~= 0.0.100",
-]
-compatibility = ["pytest-benchmarks ~= 3.4.1"]
+lint = ["black ~= 23.3.0", "isort ~=5.12.0", "mypy ~= 1.3.0", "ruff ~= 0.0.275"]
+compat = ["pytest-benchmark ~= 4.0.0"]
+test = ["pytest ~= 7.0", "pytest-cov ~= 4.0.0"]
[project.entry-points]
pytest11 = { codspeed = "pytest_codspeed.plugin" }
+[tool.hatch.envs.default]
+python = "3.12"
+features = ["lint", "test", "compat"]
+
+[tool.hatch.envs.test]
+features = ["test"]
+
+[[tool.hatch.envs.test.matrix]]
+python = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
+features = ["compat", "test"]
+
[tool.hatch.version]
path = "src/pytest_codspeed/__init__.py"
[tool.hatch.build.targets.sdist]
include = ["/src"]
+[tool.mypy]
+python_version = "3.12"
+
[tool.ruff]
line-length = 88
+select = ["E", "F", "I", "C"]
[tool.isort]
line_length = 88
@@ -68,6 +80,10 @@ use_parentheses = true
force_grid_wrap = 0
float_to_top = true
+[tool.pytest.ini_options]
+addopts = "--ignore=tests/benchmarks --ignore=tests/examples"
+filterwarnings = ["ignore::DeprecationWarning:pytest_benchmark.utils.*:"]
+
[tool.coverage.run]
branch = true
[tool.coverage.report]
diff --git a/src/pytest_codspeed/__init__.py b/src/pytest_codspeed/__init__.py
index bc86c94..8c0d5d5 100644
--- a/src/pytest_codspeed/__init__.py
+++ b/src/pytest_codspeed/__init__.py
@@ -1 +1 @@
-__version__ = "1.2.2"
+__version__ = "2.0.0"
diff --git a/src/pytest_codspeed/plugin.py b/src/pytest_codspeed/plugin.py
index 26bb4ea..e6040a0 100644
--- a/src/pytest_codspeed/plugin.py
+++ b/src/pytest_codspeed/plugin.py
@@ -1,8 +1,9 @@
import gc
import os
import pkgutil
+import sys
from dataclasses import dataclass, field
-from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import pytest
from _pytest.fixtures import FixtureManager
@@ -14,6 +15,7 @@
from ._wrapper import LibType
IS_PYTEST_BENCHMARK_INSTALLED = pkgutil.find_loader("pytest_benchmark") is not None
+SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12)
@pytest.hookimpl(trylast=True)
@@ -94,7 +96,10 @@ def pytest_plugin_registered(plugin, manager: "pytest.PytestPluginManager"):
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: "pytest.Config"):
- out = [f"codspeed: {__version__}"]
+ out = [
+ f"codspeed: {__version__} "
+ f"(callgraph: {'enabled' if SUPPORTS_PERF_TRAMPOLINE else 'not supported'})"
+ ]
plugin = get_plugin(config)
if plugin.is_codspeed_enabled and not plugin.should_measure:
out.append(
@@ -111,19 +116,29 @@ def pytest_report_header(config: "pytest.Config"):
return "\n".join(out)
-def should_benchmark_item(item: "pytest.Item") -> bool:
+def has_benchmark_fixture(item: "pytest.Item") -> bool:
+ item_fixtures = getattr(item, "fixturenames", [])
+ return "benchmark" in item_fixtures or "codspeed_benchmark" in item_fixtures
+
+
+def has_benchmark_marker(item: "pytest.Item") -> bool:
return (
item.get_closest_marker("codspeed_benchmark") is not None
or item.get_closest_marker("benchmark") is not None
- or "benchmark" in getattr(item, "fixturenames", [])
)
+def should_benchmark_item(item: "pytest.Item") -> bool:
+ return has_benchmark_fixture(item) or has_benchmark_marker(item)
+
+
@pytest.hookimpl()
def pytest_sessionstart(session: "pytest.Session"):
plugin = get_plugin(session.config)
if plugin.is_codspeed_enabled:
plugin.benchmark_count = 0
+ if plugin.should_measure and SUPPORTS_PERF_TRAMPOLINE:
+ sys.activate_stack_trampoline("perf") # type: ignore
@pytest.hookimpl(trylast=True)
@@ -150,59 +165,92 @@ def _run_with_instrumentation(
if is_gc_enabled:
gc.collect()
gc.disable()
+
+ def __codspeed_root_frame__():
+ fn(*args, **kwargs)
+
+ if SUPPORTS_PERF_TRAMPOLINE:
+ # Warmup CPython performance map cache
+ __codspeed_root_frame__()
+
lib.zero_stats()
lib.start_instrumentation()
- fn(*args, **kwargs)
+ __codspeed_root_frame__()
lib.stop_instrumentation()
lib.dump_stats_at(f"{nodeId}".encode("ascii"))
if is_gc_enabled:
gc.enable()
-@pytest.hookimpl(trylast=True)
-def pytest_runtest_call(item: "pytest.Item"):
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_protocol(item: "pytest.Item", nextitem: Union["pytest.Item", None]):
plugin = get_plugin(item.config)
-
if not plugin.is_codspeed_enabled or not should_benchmark_item(item):
- return # Avoid running the test multiple times when codspeed is disabled
- else:
- plugin.benchmark_count += 1
- if "benchmark" in getattr(item, "fixturenames", []):
- # This is a benchmark fixture, so the measurement is done by the fixture
- item.runtest()
- elif not plugin.should_measure:
- item.runtest()
- else:
- assert plugin.lib is not None
- _run_with_instrumentation(plugin.lib, item.nodeid, item.runtest)
+ return (
+ None # Defer to the default test protocol since no benchmarking is needed
+ )
+ if has_benchmark_fixture(item):
+ return None # Instrumentation is handled by the fixture
-@pytest.hookimpl()
-def pytest_sessionfinish(session: "pytest.Session", exitstatus):
- plugin = get_plugin(session.config)
- if plugin.is_codspeed_enabled:
- reporter = session.config.pluginmanager.get_plugin("terminalreporter")
- count_suffix = "benchmarked" if plugin.should_measure else "benchmark tested"
- reporter.write_sep(
- "=",
- f"{plugin.benchmark_count} {count_suffix}",
+ plugin.benchmark_count += 1
+ if not plugin.should_measure:
+ return None # Benchmark counted but will be run in the default protocol
+
+ # Setup phase
+ reports = []
+ ihook = item.ihook
+ ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
+ setup_call = pytest.CallInfo.from_call(
+ lambda: ihook.pytest_runtest_setup(item=item, nextitem=nextitem), "setup"
+ )
+ setup_report = ihook.pytest_runtest_makereport(item=item, call=setup_call)
+ ihook.pytest_runtest_logreport(report=setup_report)
+ reports.append(setup_report)
+ # Run phase
+ if setup_report.passed and not item.config.getoption("setuponly"):
+ assert plugin.lib is not None
+ runtest_call = pytest.CallInfo.from_call(
+ lambda: _run_with_instrumentation(plugin.lib, item.nodeid, item.runtest),
+ "call",
)
+ runtest_report = ihook.pytest_runtest_makereport(item=item, call=runtest_call)
+ ihook.pytest_runtest_logreport(report=runtest_report)
+ reports.append(runtest_report)
+
+ # Teardown phase
+ teardown_call = pytest.CallInfo.from_call(
+ lambda: ihook.pytest_runtest_teardown(item=item, nextitem=nextitem), "teardown"
+ )
+ teardown_report = ihook.pytest_runtest_makereport(item=item, call=teardown_call)
+ ihook.pytest_runtest_logreport(report=teardown_report)
+ reports.append(teardown_report)
+ ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
+ return reports # Deny further protocol hooks execution
-@pytest.fixture(scope="function")
-def codspeed_benchmark(request: "pytest.FixtureRequest") -> Callable:
- plugin = get_plugin(request.config)
- def run(func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
+class BenchmarkFixture:
+ def __init__(self, request: "pytest.FixtureRequest"):
+ self.extra_info: Dict = {}
+
+ self._request = request
+
+ def __call__(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
+ plugin = get_plugin(self._request.config)
+ plugin.benchmark_count += 1
if plugin.is_codspeed_enabled and plugin.should_measure:
assert plugin.lib is not None
_run_with_instrumentation(
- plugin.lib, request.node.nodeid, func, *args, **kwargs
+ plugin.lib, self._request.node.nodeid, func, *args, **kwargs
)
else:
func(*args, **kwargs)
- return run
+
+@pytest.fixture(scope="function")
+def codspeed_benchmark(request: "pytest.FixtureRequest") -> Callable:
+ return BenchmarkFixture(request)
if not IS_PYTEST_BENCHMARK_INSTALLED:
@@ -213,3 +261,15 @@ def benchmark(codspeed_benchmark, request: "pytest.FixtureRequest"):
Compatibility with pytest-benchmark
"""
return codspeed_benchmark
+
+
+@pytest.hookimpl()
+def pytest_sessionfinish(session: "pytest.Session", exitstatus):
+ plugin = get_plugin(session.config)
+ if plugin.is_codspeed_enabled:
+ reporter = session.config.pluginmanager.get_plugin("terminalreporter")
+ count_suffix = "benchmarked" if plugin.should_measure else "benchmark tested"
+ reporter.write_sep(
+ "=",
+ f"{plugin.benchmark_count} {count_suffix}",
+ )
diff --git a/tests/benchmarks/test_bench_fibo.py b/tests/benchmarks/test_bench_fibo.py
new file mode 100644
index 0000000..eb2dc00
--- /dev/null
+++ b/tests/benchmarks/test_bench_fibo.py
@@ -0,0 +1,53 @@
+def recursive_fibonacci(n: int) -> int:
+ if n in [0, 1]:
+ return n
+ return recursive_fibonacci(n - 1) + recursive_fibonacci(n - 2)
+
+
+def recursive_cached_fibonacci(n: int) -> int:
+ cache = {0: 0, 1: 1}
+
+ def fibo(n) -> int:
+ if n in cache:
+ return cache[n]
+ cache[n] = fibo(n - 1) + fibo(n - 2)
+ return cache[n]
+
+ return fibo(n)
+
+
+def iterative_fibonacci(n: int) -> int:
+ a, b = 0, 1
+ for _ in range(n):
+ a, b = b, a + b
+ return a
+
+
+def test_iterative_fibo_10(benchmark):
+ @benchmark
+ def _():
+ iterative_fibonacci(10)
+
+
+def test_recursive_fibo_10(benchmark):
+ @benchmark
+ def _():
+ recursive_fibonacci(10)
+
+
+def test_recursive_fibo_20(benchmark):
+ @benchmark
+ def _():
+ recursive_fibonacci(20)
+
+
+def test_recursive_cached_fibo_10(benchmark):
+ @benchmark
+ def _():
+ recursive_cached_fibonacci(10)
+
+
+def test_recursive_cached_fibo_100(benchmark):
+ @benchmark
+ def _():
+ recursive_cached_fibonacci(100)
diff --git a/tests/conftest.py b/tests/conftest.py
index b481e87..f58a240 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,4 @@
-import pkgutil
+import importlib.util
import shutil
import sys
@@ -6,7 +6,7 @@
pytest_plugins = ["pytester"]
-IS_PYTEST_BENCHMARK_INSTALLED = pkgutil.find_loader("pytest_benchmark") is not None
+IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None
skip_without_pytest_benchmark = pytest.mark.skipif(
not IS_PYTEST_BENCHMARK_INSTALLED, reason="pytest_benchmark not installed"
)
@@ -25,3 +25,12 @@
if IS_VALGRIND_INSTALLED:
print("NOTICE: Testing with valgrind compatibility", file=sys.stderr, flush=True)
+
+IS_PERF_TRAMPOLINE_SUPPORTED = sys.version_info >= (3, 12)
+skip_without_perf_trampoline = pytest.mark.skipif(
+ not IS_PERF_TRAMPOLINE_SUPPORTED, reason="perf trampoline is not supported"
+)
+
+skip_with_perf_trampoline = pytest.mark.skipif(
+ IS_PERF_TRAMPOLINE_SUPPORTED, reason="perf trampoline is supported"
+)
diff --git a/tests/examples/test_addition_fixture.py b/tests/examples/test_addition_fixture.py
new file mode 100644
index 0000000..6f73f82
--- /dev/null
+++ b/tests/examples/test_addition_fixture.py
@@ -0,0 +1,4 @@
+def test_some_addition_performance(benchmark):
+ @benchmark
+ def _():
+ return 1 + 1
diff --git a/tests/test_pytest_plugin.py b/tests/test_pytest_plugin.py
index 8510ef3..c6d8644 100644
--- a/tests/test_pytest_plugin.py
+++ b/tests/test_pytest_plugin.py
@@ -1,7 +1,14 @@
+import os
from contextlib import contextmanager
import pytest
-from conftest import skip_without_pytest_benchmark, skip_without_valgrind
+from conftest import (
+ IS_PERF_TRAMPOLINE_SUPPORTED,
+ skip_with_perf_trampoline,
+ skip_without_perf_trampoline,
+ skip_without_pytest_benchmark,
+ skip_without_valgrind,
+)
@pytest.fixture(scope="function")
@@ -52,16 +59,31 @@ def fn(arg, kwarg=None):
result.stdout.fnmatch_lines(["*1 benchmark tested*"])
+@skip_without_valgrind
+@skip_without_perf_trampoline
+def test_bench_enabled_header_with_perf(
+ pytester: pytest.Pytester, codspeed_env
+) -> None:
+ pytester.copy_example("tests/examples/test_addition_fixture.py")
+ with codspeed_env():
+ result = pytester.runpytest()
+ result.stdout.fnmatch_lines(["codspeed: * (callgraph: enabled)"])
+
+
+@skip_without_valgrind
+@skip_with_perf_trampoline
+def test_bench_enabled_header_without_perf(
+ pytester: pytest.Pytester, codspeed_env
+) -> None:
+ pytester.copy_example("tests/examples/test_addition_fixture.py")
+ with codspeed_env():
+ result = pytester.runpytest()
+ result.stdout.fnmatch_lines(["codspeed: * (callgraph: not supported)"])
+
+
@skip_without_valgrind
def test_plugin_enabled_by_env(pytester: pytest.Pytester, codspeed_env) -> None:
- pytester.makepyfile(
- """
- def test_some_addition_performance(benchmark):
- @benchmark
- def _():
- return 1 + 1
- """
- )
+ pytester.copy_example("tests/examples/test_addition_fixture.py")
with codspeed_env():
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 benchmarked*", "*1 passed*"])
@@ -69,28 +91,78 @@ def _():
@skip_without_valgrind
def test_plugin_enabled_and_env(pytester: pytest.Pytester, codspeed_env) -> None:
+ pytester.copy_example("tests/examples/test_addition_fixture.py")
+ with codspeed_env():
+ result = pytester.runpytest("--codspeed")
+ result.stdout.fnmatch_lines(["*1 benchmarked*", "*1 passed*"])
+
+
+@skip_without_valgrind
+def test_plugin_enabled_and_env_bench_run_once(
+ pytester: pytest.Pytester, codspeed_env
+) -> None:
pytester.makepyfile(
"""
- def test_some_addition_performance(benchmark):
+ import pytest
+
+ @pytest.mark.benchmark
+ def test_noisy_bench_marked():
+ print() # make sure noise is on its own line
+ print("I'm noisy marked!!!")
+ print()
+
+ def test_noisy_bench_fxt(benchmark):
@benchmark
def _():
- return 1 + 1
+ print() # make sure noise is on its own line
+ print("I'm noisy fixtured!!!")
+ print()
"""
)
+ EXPECTED_OUTPUT_COUNT = 2 if IS_PERF_TRAMPOLINE_SUPPORTED else 1
with codspeed_env():
- result = pytester.runpytest("--codspeed")
- result.stdout.fnmatch_lines(["*1 benchmarked*", "*1 passed*"])
+ run_result = pytester.runpytest("--codspeed", "-s")
+ print(run_result.stdout.str())
+ assert run_result.outlines.count("I'm noisy marked!!!") == EXPECTED_OUTPUT_COUNT
+ assert (
+ run_result.outlines.count("I'm noisy fixtured!!!") == EXPECTED_OUTPUT_COUNT
+ )
-def test_plugin_disabled(pytester: pytest.Pytester) -> None:
+@skip_without_valgrind
+def test_plugin_enabled_and_env_bench_hierachy_called(
+ pytester: pytest.Pytester, codspeed_env
+) -> None:
pytester.makepyfile(
"""
- def test_some_addition_performance(benchmark):
- @benchmark
- def _():
- return 1 + 1
+ import pytest
+
+ class TestGroup:
+ def setup_method(self):
+ print(); print("Setup called")
+
+ def teardown(self):
+ print(); print("Teardown called")
+
+ @pytest.mark.benchmark
+ def test_child(self):
+ print(); print("Test called")
+
"""
)
+ with codspeed_env():
+ result = pytester.runpytest("--codspeed", "-s")
+ result.stdout.fnmatch_lines(
+ [
+ "Setup called",
+ "Test called",
+ "Teardown called",
+ ]
+ )
+
+
+def test_plugin_disabled(pytester: pytest.Pytester) -> None:
+ pytester.copy_example("tests/examples/test_addition_fixture.py")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
@@ -141,6 +213,15 @@ def test_another_useless_thing():
"*3/4 tests collected (1 deselected)*",
],
)
+ collection_result = pytester.runpytest(
+ "--codspeed", "--collect-only", "-k", "test_some_wrapped_benchmark"
+ )
+ collection_result.stdout.fnmatch_lines_random(
+ [
+ "**",
+ "*1/4 tests collected (3 deselected)*",
+ ],
+ )
@skip_without_pytest_benchmark
@@ -166,3 +247,60 @@ def _():
"*1 passed*",
]
)
+
+
+def test_pytest_benchmark_extra_info(pytester: pytest.Pytester) -> None:
+ """https://pytest-benchmark.readthedocs.io/en/latest/usage.html#extra-info"""
+ pytester.makepyfile(
+ """
+ import time
+
+ def test_my_stuff(benchmark):
+ benchmark.extra_info['foo'] = 'bar'
+ benchmark(time.sleep, 0.02)
+ """
+ )
+ result = pytester.runpytest("--codspeed")
+ assert result.ret == 0, "the run should have succeeded"
+
+
+@skip_without_valgrind
+@skip_without_perf_trampoline
+def test_perf_maps_generation(pytester: pytest.Pytester, codspeed_env) -> None:
+ pytester.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.benchmark
+ def test_some_addition_marked():
+ return 1 + 1
+
+ def test_some_addition_fixtured(benchmark):
+ @benchmark
+ def fixtured_child():
+ return 1 + 1
+ """
+ )
+ with codspeed_env():
+ result = pytester.runpytest("--codspeed")
+ result.stdout.fnmatch_lines(["*2 benchmarked*", "*2 passed*"])
+ current_pid = os.getpid()
+ perf_filepath = f"/tmp/perf-{current_pid}.map"
+ print(perf_filepath)
+
+ with open(perf_filepath, "r") as perf_file:
+ lines = perf_file.readlines()
+ assert any(
+ "py::_run_with_instrumentation..__codspeed_root_frame__" in line
+ for line in lines
+ ), "No root frame found in perf map"
+ assert any(
+ "py::test_some_addition_marked" in line for line in lines
+ ), "No marked test frame found in perf map"
+ assert any(
+ "py::test_some_addition_fixtured" in line for line in lines
+ ), "No fixtured test frame found in perf map"
+ assert any(
+ "py::test_some_addition_fixtured..fixtured_child" in line
+ for line in lines
+ ), "No fixtured child test frame found in perf map"