forked from genewildish/Mainline
- Add comparison_presets.toml with 20+ preset configurations - Add comparison_capture.py for frame capture and comparison - Add run_comparison.py for running comparisons - Add test_comparison_framework.py with comprehensive tests - Add capture_upstream_comparison.py for upstream frame capture - Add tomli to dev dependencies for TOML parsing The framework supports: - Multiple preset configurations (basic, effects, camera, source, viewport) - Frame-by-frame comparison with detailed diff analysis - Performance metrics comparison - HTML report generation - Integration with sideline branch for regression testing
503 lines
18 KiB
Python
503 lines
18 KiB
Python
"""Frame capture utilities for upstream vs sideline comparison.
|
|
|
|
This module provides functions to capture frames from both upstream and sideline
|
|
implementations for visual comparison and performance analysis.
|
|
"""
|
|
|
|
import json
|
|
import time
|
|
from pathlib import Path
|
|
from typing import Any, Dict, List, Tuple
|
|
|
|
import tomli
|
|
|
|
from engine.pipeline import Pipeline, PipelineConfig, PipelineContext
|
|
from engine.pipeline.params import PipelineParams
|
|
|
|
|
|
def load_comparison_preset(preset_name: str) -> Any:
|
|
"""Load a comparison preset from comparison_presets.toml.
|
|
|
|
Args:
|
|
preset_name: Name of the preset to load
|
|
|
|
Returns:
|
|
Preset configuration dictionary
|
|
"""
|
|
presets_file = Path("tests/comparison_presets.toml")
|
|
if not presets_file.exists():
|
|
raise FileNotFoundError(f"Comparison presets file not found: {presets_file}")
|
|
|
|
with open(presets_file, "rb") as f:
|
|
config = tomli.load(f)
|
|
|
|
presets = config.get("presets", {})
|
|
full_name = (
|
|
f"presets.{preset_name}"
|
|
if not preset_name.startswith("presets.")
|
|
else preset_name
|
|
)
|
|
simple_name = (
|
|
preset_name.replace("presets.", "")
|
|
if preset_name.startswith("presets.")
|
|
else preset_name
|
|
)
|
|
|
|
if full_name in presets:
|
|
return presets[full_name]
|
|
elif simple_name in presets:
|
|
return presets[simple_name]
|
|
else:
|
|
raise ValueError(
|
|
f"Preset '{preset_name}' not found in {presets_file}. Available: {list(presets.keys())}"
|
|
)
|
|
|
|
|
|
def capture_frames(
|
|
preset_name: str,
|
|
frame_count: int = 30,
|
|
output_dir: Path = Path("tests/comparison_output"),
|
|
) -> Dict[str, Any]:
|
|
"""Capture frames from sideline pipeline using a preset.
|
|
|
|
Args:
|
|
preset_name: Name of preset to use
|
|
frame_count: Number of frames to capture
|
|
output_dir: Directory to save captured frames
|
|
|
|
Returns:
|
|
Dictionary with captured frames and metadata
|
|
"""
|
|
from engine.pipeline.presets import get_preset
|
|
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Load preset - try comparison presets first, then built-in presets
|
|
try:
|
|
preset = load_comparison_preset(preset_name)
|
|
# Convert dict to object-like access
|
|
from types import SimpleNamespace
|
|
|
|
preset = SimpleNamespace(**preset)
|
|
except (FileNotFoundError, ValueError):
|
|
# Fall back to built-in presets
|
|
preset = get_preset(preset_name)
|
|
if not preset:
|
|
raise ValueError(
|
|
f"Preset '{preset_name}' not found in comparison or built-in presets"
|
|
)
|
|
|
|
# Create pipeline config from preset
|
|
config = PipelineConfig(
|
|
source=preset.source,
|
|
display="null", # Always use null display for capture
|
|
camera=preset.camera,
|
|
effects=preset.effects,
|
|
)
|
|
|
|
# Create pipeline
|
|
ctx = PipelineContext()
|
|
ctx.terminal_width = preset.viewport_width
|
|
ctx.terminal_height = preset.viewport_height
|
|
pipeline = Pipeline(config=config, context=ctx)
|
|
|
|
# Create params
|
|
params = PipelineParams(
|
|
viewport_width=preset.viewport_width,
|
|
viewport_height=preset.viewport_height,
|
|
)
|
|
ctx.params = params
|
|
|
|
# Add message overlay stage if enabled
|
|
if getattr(preset, "enable_message_overlay", False):
|
|
from engine.pipeline.adapters import MessageOverlayConfig, MessageOverlayStage
|
|
|
|
overlay_config = MessageOverlayConfig(
|
|
enabled=True,
|
|
display_secs=30,
|
|
)
|
|
pipeline.add_stage(
|
|
"message_overlay", MessageOverlayStage(config=overlay_config)
|
|
)
|
|
|
|
# Build pipeline
|
|
pipeline.build()
|
|
|
|
# Capture frames
|
|
frames = []
|
|
start_time = time.time()
|
|
|
|
for i in range(frame_count):
|
|
frame_start = time.time()
|
|
stage_result = pipeline.execute()
|
|
frame_time = time.time() - frame_start
|
|
|
|
# Extract buffer from result
|
|
buffer = stage_result.data if stage_result.success else []
|
|
|
|
frames.append(
|
|
{
|
|
"frame_number": i,
|
|
"buffer": buffer,
|
|
"width": preset.viewport_width,
|
|
"height": preset.viewport_height,
|
|
"render_time_ms": frame_time * 1000,
|
|
}
|
|
)
|
|
|
|
total_time = time.time() - start_time
|
|
|
|
# Save captured data
|
|
output_file = output_dir / f"{preset_name}_sideline.json"
|
|
captured_data = {
|
|
"preset": preset_name,
|
|
"config": {
|
|
"source": preset.source,
|
|
"camera": preset.camera,
|
|
"effects": preset.effects,
|
|
"viewport_width": preset.viewport_width,
|
|
"viewport_height": preset.viewport_height,
|
|
"enable_message_overlay": getattr(preset, "enable_message_overlay", False),
|
|
},
|
|
"capture_stats": {
|
|
"frame_count": frame_count,
|
|
"total_time_ms": total_time * 1000,
|
|
"avg_frame_time_ms": (total_time * 1000) / frame_count,
|
|
"fps": frame_count / total_time if total_time > 0 else 0,
|
|
},
|
|
"frames": frames,
|
|
}
|
|
|
|
with open(output_file, "w") as f:
|
|
json.dump(captured_data, f, indent=2)
|
|
|
|
return captured_data
|
|
|
|
|
|
def compare_captured_outputs(
|
|
sideline_file: Path,
|
|
upstream_file: Path,
|
|
output_dir: Path = Path("tests/comparison_output"),
|
|
) -> Dict[str, Any]:
|
|
"""Compare captured outputs from sideline and upstream.
|
|
|
|
Args:
|
|
sideline_file: Path to sideline captured output
|
|
upstream_file: Path to upstream captured output
|
|
output_dir: Directory to save comparison results
|
|
|
|
Returns:
|
|
Dictionary with comparison results
|
|
"""
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Load captured data
|
|
with open(sideline_file) as f:
|
|
sideline_data = json.load(f)
|
|
|
|
with open(upstream_file) as f:
|
|
upstream_data = json.load(f)
|
|
|
|
# Compare configurations
|
|
config_diff = {}
|
|
for key in [
|
|
"source",
|
|
"camera",
|
|
"effects",
|
|
"viewport_width",
|
|
"viewport_height",
|
|
"enable_message_overlay",
|
|
]:
|
|
sideline_val = sideline_data["config"].get(key)
|
|
upstream_val = upstream_data["config"].get(key)
|
|
if sideline_val != upstream_val:
|
|
config_diff[key] = {"sideline": sideline_val, "upstream": upstream_val}
|
|
|
|
# Compare frame counts
|
|
sideline_frames = len(sideline_data["frames"])
|
|
upstream_frames = len(upstream_data["frames"])
|
|
frame_count_match = sideline_frames == upstream_frames
|
|
|
|
# Compare individual frames
|
|
frame_comparisons = []
|
|
total_diff = 0
|
|
max_diff = 0
|
|
identical_frames = 0
|
|
|
|
min_frames = min(sideline_frames, upstream_frames)
|
|
for i in range(min_frames):
|
|
sideline_frame = sideline_data["frames"][i]
|
|
upstream_frame = upstream_data["frames"][i]
|
|
|
|
sideline_buffer = sideline_frame["buffer"]
|
|
upstream_buffer = upstream_frame["buffer"]
|
|
|
|
# Compare buffers line by line
|
|
line_diffs = []
|
|
frame_diff = 0
|
|
max_lines = max(len(sideline_buffer), len(upstream_buffer))
|
|
|
|
for line_idx in range(max_lines):
|
|
sideline_line = (
|
|
sideline_buffer[line_idx] if line_idx < len(sideline_buffer) else ""
|
|
)
|
|
upstream_line = (
|
|
upstream_buffer[line_idx] if line_idx < len(upstream_buffer) else ""
|
|
)
|
|
|
|
if sideline_line != upstream_line:
|
|
line_diffs.append(
|
|
{
|
|
"line": line_idx,
|
|
"sideline": sideline_line,
|
|
"upstream": upstream_line,
|
|
}
|
|
)
|
|
frame_diff += 1
|
|
|
|
if frame_diff == 0:
|
|
identical_frames += 1
|
|
|
|
total_diff += frame_diff
|
|
max_diff = max(max_diff, frame_diff)
|
|
|
|
frame_comparisons.append(
|
|
{
|
|
"frame_number": i,
|
|
"differences": frame_diff,
|
|
"line_diffs": line_diffs[
|
|
:5
|
|
], # Only store first 5 differences per frame
|
|
"render_time_diff_ms": sideline_frame.get("render_time_ms", 0)
|
|
- upstream_frame.get("render_time_ms", 0),
|
|
}
|
|
)
|
|
|
|
# Calculate statistics
|
|
stats = {
|
|
"total_frames_compared": min_frames,
|
|
"identical_frames": identical_frames,
|
|
"frames_with_differences": min_frames - identical_frames,
|
|
"total_differences": total_diff,
|
|
"max_differences_per_frame": max_diff,
|
|
"avg_differences_per_frame": total_diff / min_frames if min_frames > 0 else 0,
|
|
"match_percentage": (identical_frames / min_frames * 100)
|
|
if min_frames > 0
|
|
else 0,
|
|
}
|
|
|
|
# Compare performance stats
|
|
sideline_stats = sideline_data.get("capture_stats", {})
|
|
upstream_stats = upstream_data.get("capture_stats", {})
|
|
performance_comparison = {
|
|
"sideline": {
|
|
"total_time_ms": sideline_stats.get("total_time_ms", 0),
|
|
"avg_frame_time_ms": sideline_stats.get("avg_frame_time_ms", 0),
|
|
"fps": sideline_stats.get("fps", 0),
|
|
},
|
|
"upstream": {
|
|
"total_time_ms": upstream_stats.get("total_time_ms", 0),
|
|
"avg_frame_time_ms": upstream_stats.get("avg_frame_time_ms", 0),
|
|
"fps": upstream_stats.get("fps", 0),
|
|
},
|
|
"diff": {
|
|
"total_time_ms": sideline_stats.get("total_time_ms", 0)
|
|
- upstream_stats.get("total_time_ms", 0),
|
|
"avg_frame_time_ms": sideline_stats.get("avg_frame_time_ms", 0)
|
|
- upstream_stats.get("avg_frame_time_ms", 0),
|
|
"fps": sideline_stats.get("fps", 0) - upstream_stats.get("fps", 0),
|
|
},
|
|
}
|
|
|
|
# Build comparison result
|
|
result = {
|
|
"preset": sideline_data["preset"],
|
|
"config_diff": config_diff,
|
|
"frame_count_match": frame_count_match,
|
|
"stats": stats,
|
|
"performance_comparison": performance_comparison,
|
|
"frame_comparisons": frame_comparisons,
|
|
"sideline_file": str(sideline_file),
|
|
"upstream_file": str(upstream_file),
|
|
}
|
|
|
|
# Save comparison result
|
|
output_file = output_dir / f"{sideline_data['preset']}_comparison.json"
|
|
with open(output_file, "w") as f:
|
|
json.dump(result, f, indent=2)
|
|
|
|
return result
|
|
|
|
|
|
def generate_html_report(
|
|
comparison_results: List[Dict[str, Any]],
|
|
output_dir: Path = Path("tests/comparison_output"),
|
|
) -> Path:
|
|
"""Generate HTML report from comparison results.
|
|
|
|
Args:
|
|
comparison_results: List of comparison results
|
|
output_dir: Directory to save HTML report
|
|
|
|
Returns:
|
|
Path to generated HTML report
|
|
"""
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
html_content = """
|
|
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>Mainline Comparison Report</title>
|
|
<style>
|
|
body { font-family: Arial, sans-serif; margin: 20px; background: #f5f5f5; }
|
|
.header { background: #2c3e50; color: white; padding: 20px; border-radius: 5px; }
|
|
.summary { background: white; padding: 15px; margin: 10px 0; border-radius: 5px; }
|
|
.preset { background: white; margin: 10px 0; padding: 15px; border-radius: 5px; }
|
|
.preset-header { font-size: 1.2em; font-weight: bold; margin-bottom: 10px; }
|
|
.stats { display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px; margin: 10px 0; }
|
|
.stat-box { background: #ecf0f1; padding: 10px; border-radius: 3px; text-align: center; }
|
|
.stat-value { font-size: 1.5em; font-weight: bold; }
|
|
.stat-label { font-size: 0.9em; color: #7f8c8d; }
|
|
.match { color: #27ae60; }
|
|
.mismatch { color: #e74c3c; }
|
|
.warning { color: #f39c12; }
|
|
.frame-comparison { margin: 10px 0; }
|
|
.frame-grid { display: grid; grid-template-columns: 1fr 1fr; gap: 10px; }
|
|
.frame-box { background: #ecf0f1; padding: 10px; border-radius: 3px; }
|
|
.frame-header { font-weight: bold; margin-bottom: 5px; }
|
|
.diff-line { background: #ffeaa7; padding: 2px 5px; margin: 2px 0; font-family: monospace; font-size: 0.8em; }
|
|
.performance { background: #e8f4f8; padding: 15px; margin: 10px 0; border-radius: 5px; }
|
|
.performance-grid { display: grid; grid-template-columns: 1fr 1fr 1fr; gap: 10px; }
|
|
.perf-box { text-align: center; padding: 10px; }
|
|
.perf-sideline { background: #d5f4e6; }
|
|
.perf-upstream { background: #fde8e8; }
|
|
.perf-diff { background: #fff3cd; }
|
|
.timestamp { color: #7f8c8d; font-size: 0.9em; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<div class="header">
|
|
<h1>Mainline Pipeline Comparison Report</h1>
|
|
<p class="timestamp">Generated: {{timestamp}}</p>
|
|
</div>
|
|
|
|
<div class="summary">
|
|
<h2>Summary</h2>
|
|
<div class="stats">
|
|
<div class="stat-box">
|
|
<div class="stat-value" id="total-presets">0</div>
|
|
<div class="stat-label">Presets Tested</div>
|
|
</div>
|
|
<div class="stat-box">
|
|
<div class="stat-value" id="total-match">0%</div>
|
|
<div class="stat-label">Average Match Rate</div>
|
|
</div>
|
|
<div class="stat-box">
|
|
<div class="stat-value" id="total-frames">0</div>
|
|
<div class="stat-label">Total Frames Compared</div>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
|
|
<div id="preset-results">
|
|
<!-- Preset results will be inserted here -->
|
|
</div>
|
|
|
|
<script>
|
|
const comparisonData = {{comparison_data}};
|
|
const summary = {{summary}};
|
|
|
|
// Update summary
|
|
document.getElementById('total-presets').textContent = summary.total_presets;
|
|
document.getElementById('total-match').textContent = summary.average_match.toFixed(1) + '%';
|
|
document.getElementById('total-frames').textContent = summary.total_frames;
|
|
|
|
// Generate preset results
|
|
const resultsContainer = document.getElementById('preset-results');
|
|
|
|
comparisonData.forEach(result => {
|
|
const presetDiv = document.createElement('div');
|
|
presetDiv.className = 'preset';
|
|
|
|
const matchClass = result.stats.match_percentage >= 95 ? 'match' :
|
|
result.stats.match_percentage >= 80 ? 'warning' : 'mismatch';
|
|
|
|
presetDiv.innerHTML = `
|
|
<div class="preset-header">${result.preset}</div>
|
|
<div class="stats">
|
|
<div class="stat-box">
|
|
<div class="stat-value ${matchClass}">${result.stats.match_percentage.toFixed(1)}%</div>
|
|
<div class="stat-label">Frame Match Rate</div>
|
|
</div>
|
|
<div class="stat-box">
|
|
<div class="stat-value">${result.stats.total_frames_compared}</div>
|
|
<div class="stat-label">Frames Compared</div>
|
|
</div>
|
|
<div class="stat-box">
|
|
<div class="stat-value">${result.stats.identical_frames}</div>
|
|
<div class="stat-label">Identical Frames</div>
|
|
</div>
|
|
</div>
|
|
|
|
<div class="performance">
|
|
<h3>Performance Comparison</h3>
|
|
<div class="performance-grid">
|
|
<div class="perf-box perf-sideline">
|
|
<div>Sideline</div>
|
|
<div class="stat-value">${result.performance_comparison.sideline.avg_frame_time_ms.toFixed(2)}ms</div>
|
|
<div class="stat-label">${result.performance_comparison.sideline.fps.toFixed(1)} FPS</div>
|
|
</div>
|
|
<div class="perf-box perf-upstream">
|
|
<div>Upstream</div>
|
|
<div class="stat-value">${result.performance_comparison.upstream.avg_frame_time_ms.toFixed(2)}ms</div>
|
|
<div class="stat-label">${result.performance_comparison.upstream.fps.toFixed(1)} FPS</div>
|
|
</div>
|
|
<div class="perf-box perf-diff">
|
|
<div>Difference</div>
|
|
<div class="stat-value">${result.performance_comparison.diff.avg_frame_time_ms.toFixed(2)}ms</div>
|
|
<div class="stat-label">${result.performance_comparison.diff.fps.toFixed(1)} FPS</div>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
`;
|
|
|
|
resultsContainer.appendChild(presetDiv);
|
|
});
|
|
</script>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
# Generate comparison data for JavaScript
|
|
comparison_data_json = json.dumps(comparison_results)
|
|
|
|
# Calculate summary statistics
|
|
total_presets = len(comparison_results)
|
|
total_frames = sum(r["stats"]["total_frames_compared"] for r in comparison_results)
|
|
total_identical = sum(r["stats"]["identical_frames"] for r in comparison_results)
|
|
average_match = (total_identical / total_frames * 100) if total_frames > 0 else 0
|
|
|
|
summary = {
|
|
"total_presets": total_presets,
|
|
"total_frames": total_frames,
|
|
"total_identical": total_identical,
|
|
"average_match": average_match,
|
|
}
|
|
|
|
# Replace placeholders
|
|
html_content = html_content.replace(
|
|
"{{timestamp}}", time.strftime("%Y-%m-%d %H:%M:%S")
|
|
)
|
|
html_content = html_content.replace("{{comparison_data}}", comparison_data_json)
|
|
html_content = html_content.replace("{{summary}}", json.dumps(summary))
|
|
|
|
# Save HTML report
|
|
output_file = output_dir / "comparison_report.html"
|
|
with open(output_file, "w") as f:
|
|
f.write(html_content)
|
|
|
|
return output_file
|