Skip to content

Commit c8672e3

Browse files
committed
Initial benchmark utility
1 parent 7cfc042 commit c8672e3

13 files changed

Lines changed: 740 additions & 0 deletions

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/target
2+
output/
23

34
# Byte-compiled / optimized / DLL files
45
__pycache__/

benchmark/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from pathlib import Path
2+
3+
PACKAGE_ROOT = Path(__file__).parent.resolve()
4+
PROJECT_ROOT = PACKAGE_ROOT.parent
5+
OUT_DIR = PROJECT_ROOT / "output"

benchmark/__main__.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import argparse
2+
import sys
3+
from datetime import datetime
4+
5+
from benchmark.manager import TestManager
6+
7+
8+
def unload_arcade():
9+
to_uncache = []
10+
for mod in sys.modules:
11+
if mod.startsWith("arcade."):
12+
to_uncache.append(mod)
13+
14+
for mod in to_uncache:
15+
del sys.modules[mod]
16+
17+
18+
def main():
19+
args = parse_args(sys.argv[1:])
20+
print(f"Session Name: '{args.session}'")
21+
manager = TestManager(args.session, debug=True)
22+
manager.find_test_classes(args.type, args.name)
23+
manager.create_test_instances()
24+
manager.run()
25+
26+
27+
def parse_args(args):
28+
parser = argparse.ArgumentParser()
29+
parser.add_argument(
30+
"-s",
31+
"--session",
32+
help="Session Name",
33+
type=str,
34+
default=datetime.now().strftime("%Y-%m-%dT%H-%M-%S"),
35+
)
36+
parser.add_argument("-t", "--type", help="Test Type", type=str)
37+
parser.add_argument("-n", "--name", help="Test Name", type=str)
38+
return parser.parse_args(args)
39+
40+
41+
if __name__ == "__main__":
42+
main()

benchmark/graph.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
import csv
2+
from pathlib import Path
3+
4+
import matplotlib.pyplot as plt
5+
import seaborn as sns
6+
7+
sns.set_style("whitegrid")
8+
9+
FPS = 1
10+
SPRITE_COUNT = 2
11+
DRAWING_TIME = 3
12+
PROCESSING_TIME = 4
13+
14+
15+
class DataSeries:
16+
def __init__(self, name: str, path: Path) -> None:
17+
self.name = name
18+
self.path = path
19+
# Data
20+
self.count = []
21+
self.processing_time = []
22+
self.draw_time = []
23+
self.fps = []
24+
# Process data
25+
self._process_data()
26+
27+
def _process_data(self):
28+
rows = self._read_file(self.path)
29+
for row in rows:
30+
self.count.append(row[SPRITE_COUNT])
31+
self.fps.append(row[FPS])
32+
self.processing_time.append(row[PROCESSING_TIME])
33+
self.draw_time.append(row[DRAWING_TIME])
34+
35+
def _read_file(self, path: Path):
36+
results = []
37+
with open(path) as csv_file:
38+
csv_reader = csv.reader(csv_file, delimiter=",")
39+
first_row = True
40+
for row in csv_reader:
41+
if first_row:
42+
first_row = False
43+
else:
44+
results.append([float(cell) for cell in row])
45+
46+
return results
47+
48+
49+
class PerfGraph:
50+
def __init__(self, title: str, label_x: str, label_y: str) -> None:
51+
self.title = title
52+
self.label_x = label_x
53+
self.label_y = label_y
54+
self.series = []
55+
56+
def add_series(self, series: DataSeries):
57+
self.series.append(series)
58+
59+
def create(self, output_path: Path):
60+
plt.title(self.title)
61+
62+
for series in self.series:
63+
plt.plot(series.count, series.processing_time, label=series.name)
64+
65+
plt.legend(loc="upper left", shadow=True, fontsize="large")
66+
plt.xlabel(self.label_x)
67+
plt.ylabel(self.label_y)
68+
69+
plt.savefig(output_path)
70+
plt.clf()
71+
72+
73+
if __name__ == "__main__":
74+
from benchmark import OUT_DIR
75+
76+
OUTPUT_ROOT = OUT_DIR / "test" / "graphs"
77+
OUTPUT_ROOT.mkdir(parents=True, exist_ok=True)
78+
path = OUT_DIR / "test" / "data"
79+
80+
graph = PerfGraph(
81+
"Time To Detect Collisions", label_x="Sprite Count", label_y="Time"
82+
)
83+
graph.add_series(DataSeries("Arcade 0", path / "arcade_collision-0.csv"))
84+
graph.add_series(DataSeries("Arcade 1", path / "arcade_collision-1.csv"))
85+
graph.add_series(DataSeries("Arcade 2", path / "arcade_collision-2.csv"))
86+
graph.add_series(DataSeries("Arcade 3", path / "arcade_collision-3.csv"))
87+
graph.create(OUTPUT_ROOT / "arcade_collision.png")

benchmark/manager.py

Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
import importlib
2+
import pkgutil
3+
from typing import List, Optional, Type
4+
5+
from benchmark import OUT_DIR
6+
from benchmark.graph import DataSeries, PerfGraph
7+
from benchmark.tests.base import PerfTest
8+
9+
10+
def find_test_classes(path: str) -> List[Type[PerfTest]]:
11+
"""Find all test classes in submodules"""
12+
target_module = importlib.import_module(f"benchmark.tests.{path}")
13+
14+
classes = []
15+
for v in pkgutil.iter_modules(target_module.__path__):
16+
module = importlib.import_module(f"benchmark.tests.{path}.{v.name}")
17+
if hasattr(module, "Test"):
18+
classes.append(module.Test)
19+
else:
20+
print(
21+
(
22+
"WARNING: "
23+
f"Module '{module.__name__}' does not have a Test class. "
24+
"Please add a test class or rename the class to 'Test'."
25+
)
26+
)
27+
28+
return classes
29+
30+
31+
class TestManager:
32+
"""
33+
Finds and executes tests
34+
35+
:param str session: The session name.
36+
:param bool debug: If True, print debug messages.
37+
"""
38+
39+
def __init__(self, session: str, debug: bool = True):
40+
self.debug = debug
41+
self.session = session
42+
self.session_dir = OUT_DIR / session
43+
self.session_dir.mkdir(parents=True, exist_ok=True)
44+
self.data_dir = self.session_dir / "data"
45+
46+
self.test_classes: List[Type[PerfTest]] = []
47+
self.test_instances: List[PerfTest] = []
48+
49+
@property
50+
def num_test_classes(self) -> int:
51+
return len(self.test_classes)
52+
53+
@property
54+
def num_test_instances(self) -> int:
55+
return len(self.test_instances)
56+
57+
def find_test_classes(
58+
self,
59+
type: Optional[str] = None,
60+
name: Optional[str] = None,
61+
):
62+
"""
63+
Find test classes based on type and name.
64+
65+
:param str type: The type of test to run.
66+
:param str name: The name of the test to run.
67+
:return: The number of test classes found.
68+
"""
69+
all_classes = find_test_classes("arcade")
70+
all_classes += find_test_classes("arcade_accelerate")
71+
72+
for cls in all_classes:
73+
if type is not None and cls.type != type:
74+
continue
75+
if name is not None and cls.name != name:
76+
continue
77+
self.test_classes.append(cls)
78+
79+
if self.debug:
80+
num_classes = len(self.test_classes)
81+
print(f"Found {num_classes} test classes")
82+
for cls in self.test_classes:
83+
print(f" -> {cls.type}.{cls.name}")
84+
85+
def create_test_instances(self):
86+
"""
87+
Create test instances based on each test's instances attribute.
88+
"""
89+
for cls in self.test_classes:
90+
# If a test have multiple instances, create one instance for each
91+
if cls.instances:
92+
for params, _ in cls.instances:
93+
self.add_test_instance(cls(**params))
94+
else:
95+
self.add_test_instance(cls())
96+
97+
if self.debug:
98+
num_instances = len(self.test_instances)
99+
print(f"Created {num_instances} test instances")
100+
for instance in self.test_instances:
101+
print(f" -> {instance.type}.{instance.name}")
102+
103+
def add_test_instance(self, instance: PerfTest):
104+
"""Validate instance"""
105+
if instance.name == "default":
106+
raise ValueError(
107+
(
108+
"Test name cannot be 'default'."
109+
"Please add a class attribute 'name' to your test class."
110+
f"Class: {instance}"
111+
)
112+
)
113+
self.test_instances.append(instance)
114+
115+
def get_test_instance(self, name: str) -> Optional[PerfTest]:
116+
for instance in self.test_instances:
117+
if instance.instance_name == name:
118+
return instance
119+
120+
def run(self):
121+
"""Run all tests"""
122+
for instance in self.test_instances:
123+
instance.run(self.session_dir)
124+
125+
def create_graph(
126+
self,
127+
file_name: str,
128+
title: str,
129+
x_label: str,
130+
y_label: str,
131+
series_names=[],
132+
):
133+
"""Create a graph using matplotlib"""
134+
print("Creating graph : {title}} [{x_label}, {y_label}]}]")
135+
series = []
136+
skip = False
137+
for _series in series_names:
138+
# Check if we have a test instance with this name
139+
instance = self.get_test_instance(_series)
140+
if instance is None:
141+
print(f" -> No test instance found for series '{_series}'")
142+
skip = True
143+
144+
path = self.data_dir / f"{_series}.csv"
145+
if not path.exists():
146+
print(
147+
f"No data found for series '{_series}' in session '{self.session}'"
148+
)
149+
skip = True
150+
151+
if skip:
152+
continue
153+
154+
series.append(DataSeries(instance.name, path))
155+
156+
out_path = self.session_dir / "graphs"
157+
out_path.mkdir(parents=True, exist_ok=True)
158+
out_path = out_path / f"{file_name}.png"
159+
graph = PerfGraph(title, x_label, y_label, series)
160+
graph.create(out_path)

benchmark/tests/__init__.py

Whitespace-only changes.

benchmark/tests/arcade/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from . import collision

0 commit comments

Comments
 (0)