forked from genewildish/Mainline
- Add comparison_presets.toml with 20+ preset configurations - Add comparison_capture.py for frame capture and comparison - Add run_comparison.py for running comparisons - Add test_comparison_framework.py with comprehensive tests - Add capture_upstream_comparison.py for upstream frame capture - Add tomli to dev dependencies for TOML parsing The framework supports: - Multiple preset configurations (basic, effects, camera, source, viewport) - Frame-by-frame comparison with detailed diff analysis - Performance metrics comparison - HTML report generation - Integration with sideline branch for regression testing
342 lines
12 KiB
Python
342 lines
12 KiB
Python
"""Comparison framework tests for upstream vs sideline pipeline.
|
|
|
|
These tests verify that the comparison framework works correctly
|
|
and can be used for regression testing.
|
|
"""
|
|
|
|
import json
|
|
import tempfile
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
|
|
from tests.comparison_capture import capture_frames, compare_captured_outputs
|
|
|
|
|
|
class TestComparisonCapture:
|
|
"""Tests for frame capture functionality."""
|
|
|
|
def test_capture_basic_preset(self):
|
|
"""Test capturing frames from a basic preset."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
output_dir = Path(tmpdir)
|
|
|
|
# Capture frames
|
|
result = capture_frames(
|
|
preset_name="comparison-basic",
|
|
frame_count=10,
|
|
output_dir=output_dir,
|
|
)
|
|
|
|
# Verify result structure
|
|
assert "preset" in result
|
|
assert "config" in result
|
|
assert "frames" in result
|
|
assert "capture_stats" in result
|
|
|
|
# Verify frame count
|
|
assert len(result["frames"]) == 10
|
|
|
|
# Verify frame structure
|
|
frame = result["frames"][0]
|
|
assert "frame_number" in frame
|
|
assert "buffer" in frame
|
|
assert "width" in frame
|
|
assert "height" in frame
|
|
|
|
def test_capture_with_message_overlay(self):
|
|
"""Test capturing frames with message overlay enabled."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
output_dir = Path(tmpdir)
|
|
|
|
result = capture_frames(
|
|
preset_name="comparison-with-message-overlay",
|
|
frame_count=5,
|
|
output_dir=output_dir,
|
|
)
|
|
|
|
# Verify message overlay is enabled in config
|
|
assert result["config"]["enable_message_overlay"] is True
|
|
|
|
def test_capture_multiple_presets(self):
|
|
"""Test capturing frames from multiple presets."""
|
|
presets = ["comparison-basic", "comparison-single-effect"]
|
|
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
output_dir = Path(tmpdir)
|
|
|
|
for preset in presets:
|
|
result = capture_frames(
|
|
preset_name=preset,
|
|
frame_count=5,
|
|
output_dir=output_dir,
|
|
)
|
|
assert result["preset"] == preset
|
|
|
|
|
|
class TestComparisonAnalysis:
|
|
"""Tests for comparison analysis functionality."""
|
|
|
|
def test_compare_identical_outputs(self):
|
|
"""Test comparing identical outputs shows 100% match."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
output_dir = Path(tmpdir)
|
|
|
|
# Create two identical captured outputs
|
|
sideline_file = output_dir / "test_sideline.json"
|
|
upstream_file = output_dir / "test_upstream.json"
|
|
|
|
test_data = {
|
|
"preset": "test",
|
|
"config": {"viewport_width": 80, "viewport_height": 24},
|
|
"frames": [
|
|
{
|
|
"frame_number": 0,
|
|
"buffer": ["Line 1", "Line 2", "Line 3"],
|
|
"width": 80,
|
|
"height": 24,
|
|
"render_time_ms": 10.0,
|
|
}
|
|
],
|
|
"capture_stats": {
|
|
"frame_count": 1,
|
|
"total_time_ms": 10.0,
|
|
"avg_frame_time_ms": 10.0,
|
|
"fps": 100.0,
|
|
},
|
|
}
|
|
|
|
with open(sideline_file, "w") as f:
|
|
json.dump(test_data, f)
|
|
|
|
with open(upstream_file, "w") as f:
|
|
json.dump(test_data, f)
|
|
|
|
# Compare
|
|
result = compare_captured_outputs(
|
|
sideline_file=sideline_file,
|
|
upstream_file=upstream_file,
|
|
)
|
|
|
|
# Should have 100% match
|
|
assert result["stats"]["match_percentage"] == 100.0
|
|
assert result["stats"]["identical_frames"] == 1
|
|
assert result["stats"]["total_differences"] == 0
|
|
|
|
def test_compare_different_outputs(self):
|
|
"""Test comparing different outputs detects differences."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
output_dir = Path(tmpdir)
|
|
|
|
sideline_file = output_dir / "test_sideline.json"
|
|
upstream_file = output_dir / "test_upstream.json"
|
|
|
|
# Create different outputs
|
|
sideline_data = {
|
|
"preset": "test",
|
|
"config": {"viewport_width": 80, "viewport_height": 24},
|
|
"frames": [
|
|
{
|
|
"frame_number": 0,
|
|
"buffer": ["Sideline Line 1", "Line 2"],
|
|
"width": 80,
|
|
"height": 24,
|
|
"render_time_ms": 10.0,
|
|
}
|
|
],
|
|
"capture_stats": {
|
|
"frame_count": 1,
|
|
"total_time_ms": 10.0,
|
|
"avg_frame_time_ms": 10.0,
|
|
"fps": 100.0,
|
|
},
|
|
}
|
|
|
|
upstream_data = {
|
|
"preset": "test",
|
|
"config": {"viewport_width": 80, "viewport_height": 24},
|
|
"frames": [
|
|
{
|
|
"frame_number": 0,
|
|
"buffer": ["Upstream Line 1", "Line 2"],
|
|
"width": 80,
|
|
"height": 24,
|
|
"render_time_ms": 12.0,
|
|
}
|
|
],
|
|
"capture_stats": {
|
|
"frame_count": 1,
|
|
"total_time_ms": 12.0,
|
|
"avg_frame_time_ms": 12.0,
|
|
"fps": 83.33,
|
|
},
|
|
}
|
|
|
|
with open(sideline_file, "w") as f:
|
|
json.dump(sideline_data, f)
|
|
|
|
with open(upstream_file, "w") as f:
|
|
json.dump(upstream_data, f)
|
|
|
|
# Compare
|
|
result = compare_captured_outputs(
|
|
sideline_file=sideline_file,
|
|
upstream_file=upstream_file,
|
|
)
|
|
|
|
# Should detect differences
|
|
assert result["stats"]["match_percentage"] < 100.0
|
|
assert result["stats"]["total_differences"] > 0
|
|
assert len(result["frame_comparisons"][0]["line_diffs"]) > 0
|
|
|
|
def test_performance_comparison(self):
|
|
"""Test that performance metrics are compared correctly."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
output_dir = Path(tmpdir)
|
|
|
|
sideline_file = output_dir / "test_sideline.json"
|
|
upstream_file = output_dir / "test_upstream.json"
|
|
|
|
sideline_data = {
|
|
"preset": "test",
|
|
"config": {"viewport_width": 80, "viewport_height": 24},
|
|
"frames": [
|
|
{
|
|
"frame_number": 0,
|
|
"buffer": [],
|
|
"width": 80,
|
|
"height": 24,
|
|
"render_time_ms": 10.0,
|
|
}
|
|
],
|
|
"capture_stats": {
|
|
"frame_count": 1,
|
|
"total_time_ms": 10.0,
|
|
"avg_frame_time_ms": 10.0,
|
|
"fps": 100.0,
|
|
},
|
|
}
|
|
|
|
upstream_data = {
|
|
"preset": "test",
|
|
"config": {"viewport_width": 80, "viewport_height": 24},
|
|
"frames": [
|
|
{
|
|
"frame_number": 0,
|
|
"buffer": [],
|
|
"width": 80,
|
|
"height": 24,
|
|
"render_time_ms": 12.0,
|
|
}
|
|
],
|
|
"capture_stats": {
|
|
"frame_count": 1,
|
|
"total_time_ms": 12.0,
|
|
"avg_frame_time_ms": 12.0,
|
|
"fps": 83.33,
|
|
},
|
|
}
|
|
|
|
with open(sideline_file, "w") as f:
|
|
json.dump(sideline_data, f)
|
|
|
|
with open(upstream_file, "w") as f:
|
|
json.dump(upstream_data, f)
|
|
|
|
result = compare_captured_outputs(
|
|
sideline_file=sideline_file,
|
|
upstream_file=upstream_file,
|
|
)
|
|
|
|
# Verify performance comparison
|
|
perf = result["performance_comparison"]
|
|
assert "sideline" in perf
|
|
assert "upstream" in perf
|
|
assert "diff" in perf
|
|
assert (
|
|
perf["sideline"]["fps"] > perf["upstream"]["fps"]
|
|
) # Sideline is faster in this example
|
|
|
|
|
|
class TestComparisonPresets:
|
|
"""Tests for comparison preset configuration."""
|
|
|
|
def test_comparison_presets_exist(self):
|
|
"""Test that comparison presets file exists and is valid."""
|
|
presets_file = Path("tests/comparison_presets.toml")
|
|
assert presets_file.exists(), "Comparison presets file should exist"
|
|
|
|
def test_preset_structure(self):
|
|
"""Test that presets have required fields."""
|
|
import tomli
|
|
|
|
with open("tests/comparison_presets.toml", "rb") as f:
|
|
config = tomli.load(f)
|
|
|
|
presets = config.get("presets", {})
|
|
assert len(presets) > 0, "Should have at least one preset"
|
|
|
|
for preset_name, preset_config in presets.items():
|
|
# Each preset should have required fields
|
|
assert "source" in preset_config, f"{preset_name} should have 'source'"
|
|
assert "display" in preset_config, f"{preset_name} should have 'display'"
|
|
assert "camera" in preset_config, f"{preset_name} should have 'camera'"
|
|
assert "viewport_width" in preset_config, (
|
|
f"{preset_name} should have 'viewport_width'"
|
|
)
|
|
assert "viewport_height" in preset_config, (
|
|
f"{preset_name} should have 'viewport_height'"
|
|
)
|
|
assert "frame_count" in preset_config, (
|
|
f"{preset_name} should have 'frame_count'"
|
|
)
|
|
|
|
def test_preset_variety(self):
|
|
"""Test that presets cover different scenarios."""
|
|
import tomli
|
|
|
|
with open("tests/comparison_presets.toml", "rb") as f:
|
|
config = tomli.load(f)
|
|
|
|
presets = config.get("presets", {})
|
|
|
|
# Should have presets for different categories
|
|
categories = {
|
|
"basic": 0,
|
|
"effect": 0,
|
|
"camera": 0,
|
|
"source": 0,
|
|
"viewport": 0,
|
|
"comprehensive": 0,
|
|
"regression": 0,
|
|
}
|
|
|
|
for preset_name in presets.keys():
|
|
name_lower = preset_name.lower()
|
|
if "basic" in name_lower:
|
|
categories["basic"] += 1
|
|
elif (
|
|
"effect" in name_lower or "border" in name_lower or "tint" in name_lower
|
|
):
|
|
categories["effect"] += 1
|
|
elif "camera" in name_lower:
|
|
categories["camera"] += 1
|
|
elif "source" in name_lower:
|
|
categories["source"] += 1
|
|
elif (
|
|
"viewport" in name_lower
|
|
or "small" in name_lower
|
|
or "large" in name_lower
|
|
):
|
|
categories["viewport"] += 1
|
|
elif "comprehensive" in name_lower:
|
|
categories["comprehensive"] += 1
|
|
elif "regression" in name_lower:
|
|
categories["regression"] += 1
|
|
|
|
# Verify we have variety
|
|
assert categories["basic"] > 0, "Should have at least one basic preset"
|
|
assert categories["effect"] > 0, "Should have at least one effect preset"
|
|
assert categories["camera"] > 0, "Should have at least one camera preset"
|
|
assert categories["source"] > 0, "Should have at least one source preset"
|