Files
sideline/engine/benchmark.py
David Gwilliam 05d261273e feat: Add gallery presets, MultiDisplay support, and viewport tests
- Add ~20 gallery presets covering sources, effects, cameras, displays
- Add MultiDisplay support with --display multi:terminal,pygame syntax
- Fix ViewportFilterStage to recompute layout on viewport_width change
- Add benchmark.py module for hook-based performance testing
- Add viewport resize tests to test_viewport_filter_performance.py
2026-03-17 01:24:15 -07:00

74 lines
1.8 KiB
Python

"""
Benchmark module for performance testing.
Usage:
python -m engine.benchmark # Run all benchmarks
python -m engine.benchmark --hook # Run benchmarks in hook mode (for CI)
python -m engine.benchmark --displays null --iterations 20
"""
import argparse
import sys
def main():
parser = argparse.ArgumentParser(description="Run performance benchmarks")
parser.add_argument(
"--hook",
action="store_true",
help="Run in hook mode (fail on regression)",
)
parser.add_argument(
"--displays",
default="null",
help="Comma-separated list of displays to benchmark",
)
parser.add_argument(
"--iterations",
type=int,
default=100,
help="Number of iterations per benchmark",
)
args = parser.parse_args()
# Run pytest with benchmark markers
pytest_args = [
"-v",
"-m",
"benchmark",
]
if args.hook:
# Hook mode: stricter settings
pytest_args.extend(
[
"--benchmark-only",
"--benchmark-compare",
"--benchmark-compare-fail=min:5%", # Fail if >5% slower
]
)
# Add display filter if specified
if args.displays:
pytest_args.extend(["-k", args.displays])
# Add iterations
if args.iterations:
# Set environment variable for benchmark tests
import os
os.environ["BENCHMARK_ITERATIONS"] = str(args.iterations)
# Run pytest
import subprocess
result = subprocess.run(
[sys.executable, "-m", "pytest", "tests/test_benchmark.py"] + pytest_args,
cwd=None, # Current directory
)
sys.exit(result.returncode)
if __name__ == "__main__":
main()