forked from genewildish/Mainline
feat(integration): Complete feature rewrite with pipeline architecture, effects system, and display improvements
Major changes: - Pipeline architecture with capability-based dependency resolution - Effects plugin system with performance monitoring - Display abstraction with multiple backends (terminal, null, websocket) - Camera system for viewport scrolling - Sensor framework for real-time input - Command-and-control system via ntfy - WebSocket display backend for browser clients - Comprehensive test suite and documentation Issue #48: ADR for preset scripting language included This commit consolidates 110 individual commits into a single feature integration that can be reviewed and tested before further refinement.
This commit is contained in:
473
tests/acceptance_report.py
Normal file
473
tests/acceptance_report.py
Normal file
@@ -0,0 +1,473 @@
|
||||
"""
|
||||
HTML Acceptance Test Report Generator
|
||||
|
||||
Generates HTML reports showing frame buffers from acceptance tests.
|
||||
Uses NullDisplay to capture frames and renders them with monospace font.
|
||||
"""
|
||||
|
||||
import html
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
ANSI_256_TO_RGB = {
|
||||
0: (0, 0, 0),
|
||||
1: (128, 0, 0),
|
||||
2: (0, 128, 0),
|
||||
3: (128, 128, 0),
|
||||
4: (0, 0, 128),
|
||||
5: (128, 0, 128),
|
||||
6: (0, 128, 128),
|
||||
7: (192, 192, 192),
|
||||
8: (128, 128, 128),
|
||||
9: (255, 0, 0),
|
||||
10: (0, 255, 0),
|
||||
11: (255, 255, 0),
|
||||
12: (0, 0, 255),
|
||||
13: (255, 0, 255),
|
||||
14: (0, 255, 255),
|
||||
15: (255, 255, 255),
|
||||
}
|
||||
|
||||
|
||||
def ansi_to_rgb(color_code: int) -> tuple[int, int, int]:
|
||||
"""Convert ANSI 256-color code to RGB tuple."""
|
||||
if 0 <= color_code <= 15:
|
||||
return ANSI_256_TO_RGB.get(color_code, (255, 255, 255))
|
||||
elif 16 <= color_code <= 231:
|
||||
color_code -= 16
|
||||
r = (color_code // 36) * 51
|
||||
g = ((color_code % 36) // 6) * 51
|
||||
b = (color_code % 6) * 51
|
||||
return (r, g, b)
|
||||
elif 232 <= color_code <= 255:
|
||||
gray = (color_code - 232) * 10 + 8
|
||||
return (gray, gray, gray)
|
||||
return (255, 255, 255)
|
||||
|
||||
|
||||
def parse_ansi_line(line: str) -> list[dict[str, Any]]:
|
||||
"""Parse a single line with ANSI escape codes into styled segments.
|
||||
|
||||
Returns list of dicts with 'text', 'fg', 'bg', 'bold' keys.
|
||||
"""
|
||||
import re
|
||||
|
||||
segments = []
|
||||
current_fg = None
|
||||
current_bg = None
|
||||
current_bold = False
|
||||
pos = 0
|
||||
|
||||
# Find all ANSI escape sequences
|
||||
escape_pattern = re.compile(r"\x1b\[([0-9;]*)m")
|
||||
|
||||
while pos < len(line):
|
||||
match = escape_pattern.search(line, pos)
|
||||
if not match:
|
||||
# Remaining text with current styling
|
||||
if pos < len(line):
|
||||
text = line[pos:]
|
||||
if text:
|
||||
segments.append(
|
||||
{
|
||||
"text": text,
|
||||
"fg": current_fg,
|
||||
"bg": current_bg,
|
||||
"bold": current_bold,
|
||||
}
|
||||
)
|
||||
break
|
||||
|
||||
# Add text before escape sequence
|
||||
if match.start() > pos:
|
||||
text = line[pos : match.start()]
|
||||
if text:
|
||||
segments.append(
|
||||
{
|
||||
"text": text,
|
||||
"fg": current_fg,
|
||||
"bg": current_bg,
|
||||
"bold": current_bold,
|
||||
}
|
||||
)
|
||||
|
||||
# Parse escape sequence
|
||||
codes = match.group(1).split(";") if match.group(1) else ["0"]
|
||||
for code in codes:
|
||||
code = code.strip()
|
||||
if not code or code == "0":
|
||||
current_fg = None
|
||||
current_bg = None
|
||||
current_bold = False
|
||||
elif code == "1":
|
||||
current_bold = True
|
||||
elif code.isdigit():
|
||||
code_int = int(code)
|
||||
if 30 <= code_int <= 37:
|
||||
current_fg = ansi_to_rgb(code_int - 30 + 8)
|
||||
elif 90 <= code_int <= 97:
|
||||
current_fg = ansi_to_rgb(code_int - 90)
|
||||
elif code_int == 38:
|
||||
current_fg = (255, 255, 255)
|
||||
elif code_int == 39:
|
||||
current_fg = None
|
||||
|
||||
pos = match.end()
|
||||
|
||||
return segments
|
||||
|
||||
|
||||
def render_line_to_html(line: str) -> str:
|
||||
"""Render a single terminal line to HTML with styling."""
|
||||
import re
|
||||
|
||||
result = ""
|
||||
pos = 0
|
||||
current_fg = None
|
||||
current_bg = None
|
||||
current_bold = False
|
||||
|
||||
escape_pattern = re.compile(r"(\x1b\[[0-9;]*m)|(\x1b\[([0-9]+);([0-9]+)H)")
|
||||
|
||||
while pos < len(line):
|
||||
match = escape_pattern.search(line, pos)
|
||||
if not match:
|
||||
# Remaining text
|
||||
if pos < len(line):
|
||||
text = html.escape(line[pos:])
|
||||
if text:
|
||||
style = _build_style(current_fg, current_bg, current_bold)
|
||||
result += f"<span{style}>{text}</span>"
|
||||
break
|
||||
|
||||
# Handle cursor positioning - just skip it for rendering
|
||||
if match.group(2): # Cursor positioning \x1b[row;colH
|
||||
pos = match.end()
|
||||
continue
|
||||
|
||||
# Handle style codes
|
||||
if match.group(1):
|
||||
codes = match.group(1)[2:-1].split(";") if match.group(1) else ["0"]
|
||||
for code in codes:
|
||||
code = code.strip()
|
||||
if not code or code == "0":
|
||||
current_fg = None
|
||||
current_bg = None
|
||||
current_bold = False
|
||||
elif code == "1":
|
||||
current_bold = True
|
||||
elif code.isdigit():
|
||||
code_int = int(code)
|
||||
if 30 <= code_int <= 37:
|
||||
current_fg = ansi_to_rgb(code_int - 30 + 8)
|
||||
elif 90 <= code_int <= 97:
|
||||
current_fg = ansi_to_rgb(code_int - 90)
|
||||
|
||||
pos = match.end()
|
||||
continue
|
||||
|
||||
pos = match.end()
|
||||
|
||||
# Handle remaining text without escape codes
|
||||
if pos < len(line):
|
||||
text = html.escape(line[pos:])
|
||||
if text:
|
||||
style = _build_style(current_fg, current_bg, current_bold)
|
||||
result += f"<span{style}>{text}</span>"
|
||||
|
||||
return result or html.escape(line)
|
||||
|
||||
|
||||
def _build_style(
|
||||
fg: tuple[int, int, int] | None, bg: tuple[int, int, int] | None, bold: bool
|
||||
) -> str:
|
||||
"""Build CSS style string from color values."""
|
||||
styles = []
|
||||
if fg:
|
||||
styles.append(f"color: rgb({fg[0]},{fg[1]},{fg[2]})")
|
||||
if bg:
|
||||
styles.append(f"background-color: rgb({bg[0]},{bg[1]},{bg[2]})")
|
||||
if bold:
|
||||
styles.append("font-weight: bold")
|
||||
if not styles:
|
||||
return ""
|
||||
return f' style="{"; ".join(styles)}"'
|
||||
|
||||
|
||||
def render_frame_to_html(frame: list[str], frame_number: int = 0) -> str:
|
||||
"""Render a complete frame (list of lines) to HTML."""
|
||||
html_lines = []
|
||||
for i, line in enumerate(frame):
|
||||
# Strip ANSI cursor positioning but preserve colors
|
||||
clean_line = (
|
||||
line.replace("\x1b[1;1H", "")
|
||||
.replace("\x1b[2;1H", "")
|
||||
.replace("\x1b[3;1H", "")
|
||||
)
|
||||
rendered = render_line_to_html(clean_line)
|
||||
html_lines.append(f'<div class="frame-line" data-line="{i}">{rendered}</div>')
|
||||
|
||||
return f"""<div class="frame" id="frame-{frame_number}">
|
||||
<div class="frame-header">Frame {frame_number} ({len(frame)} lines)</div>
|
||||
<div class="frame-content">
|
||||
{"".join(html_lines)}
|
||||
</div>
|
||||
</div>"""
|
||||
|
||||
|
||||
def generate_test_report(
|
||||
test_name: str,
|
||||
frames: list[list[str]],
|
||||
status: str = "PASS",
|
||||
duration_ms: float = 0.0,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> str:
|
||||
"""Generate HTML report for a single test."""
|
||||
frames_html = ""
|
||||
for i, frame in enumerate(frames):
|
||||
frames_html += render_frame_to_html(frame, i)
|
||||
|
||||
metadata_html = ""
|
||||
if metadata:
|
||||
metadata_html = '<div class="metadata">'
|
||||
for key, value in metadata.items():
|
||||
metadata_html += f'<div class="meta-row"><span class="meta-key">{key}:</span> <span class="meta-value">{value}</span></div>'
|
||||
metadata_html += "</div>"
|
||||
|
||||
status_class = "pass" if status == "PASS" else "fail"
|
||||
|
||||
return f"""<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>{test_name} - Acceptance Test Report</title>
|
||||
<style>
|
||||
body {{
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
||||
background: #1a1a2e;
|
||||
color: #eee;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
}}
|
||||
.test-report {{
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
}}
|
||||
.test-header {{
|
||||
background: #16213e;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 20px;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
}}
|
||||
.test-name {{
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
color: #fff;
|
||||
}}
|
||||
.status {{
|
||||
padding: 8px 16px;
|
||||
border-radius: 4px;
|
||||
font-weight: bold;
|
||||
}}
|
||||
.status.pass {{
|
||||
background: #28a745;
|
||||
color: white;
|
||||
}}
|
||||
.status.fail {{
|
||||
background: #dc3545;
|
||||
color: white;
|
||||
}}
|
||||
.frame {{
|
||||
background: #0f0f1a;
|
||||
border: 1px solid #333;
|
||||
border-radius: 4px;
|
||||
margin-bottom: 20px;
|
||||
overflow: hidden;
|
||||
}}
|
||||
.frame-header {{
|
||||
background: #16213e;
|
||||
padding: 10px 15px;
|
||||
font-size: 14px;
|
||||
color: #888;
|
||||
border-bottom: 1px solid #333;
|
||||
}}
|
||||
.frame-content {{
|
||||
padding: 15px;
|
||||
font-family: 'Fira Code', 'Consolas', 'Monaco', monospace;
|
||||
font-size: 13px;
|
||||
line-height: 1.4;
|
||||
white-space: pre;
|
||||
overflow-x: auto;
|
||||
}}
|
||||
.frame-line {{
|
||||
min-height: 1.4em;
|
||||
}}
|
||||
.metadata {{
|
||||
background: #16213e;
|
||||
padding: 15px;
|
||||
border-radius: 4px;
|
||||
margin-bottom: 20px;
|
||||
}}
|
||||
.meta-row {{
|
||||
display: flex;
|
||||
gap: 20px;
|
||||
font-size: 14px;
|
||||
}}
|
||||
.meta-key {{
|
||||
color: #888;
|
||||
}}
|
||||
.meta-value {{
|
||||
color: #fff;
|
||||
}}
|
||||
.footer {{
|
||||
text-align: center;
|
||||
color: #666;
|
||||
font-size: 12px;
|
||||
margin-top: 40px;
|
||||
}}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="test-report">
|
||||
<div class="test-header">
|
||||
<div class="test-name">{test_name}</div>
|
||||
<div class="status {status_class}">{status}</div>
|
||||
</div>
|
||||
{metadata_html}
|
||||
{frames_html}
|
||||
<div class="footer">
|
||||
Generated: {datetime.now().isoformat()}
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>"""
|
||||
|
||||
|
||||
def save_report(
|
||||
test_name: str,
|
||||
frames: list[list[str]],
|
||||
output_dir: str = "test-reports",
|
||||
status: str = "PASS",
|
||||
duration_ms: float = 0.0,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> str:
|
||||
"""Save HTML report to disk and return the file path."""
|
||||
output_path = Path(output_dir)
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Sanitize test name for filename
|
||||
safe_name = "".join(c if c.isalnum() or c in "-_" else "_" for c in test_name)
|
||||
filename = f"{safe_name}.html"
|
||||
filepath = output_path / filename
|
||||
|
||||
html_content = generate_test_report(
|
||||
test_name, frames, status, duration_ms, metadata
|
||||
)
|
||||
filepath.write_text(html_content)
|
||||
|
||||
return str(filepath)
|
||||
|
||||
|
||||
def save_index_report(
|
||||
reports: list[dict[str, Any]],
|
||||
output_dir: str = "test-reports",
|
||||
) -> str:
|
||||
"""Generate an index HTML page linking to all test reports."""
|
||||
output_path = Path(output_dir)
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
rows = ""
|
||||
for report in reports:
|
||||
safe_name = "".join(
|
||||
c if c.isalnum() or c in "-_" else "_" for c in report["test_name"]
|
||||
)
|
||||
filename = f"{safe_name}.html"
|
||||
status_class = "pass" if report["status"] == "PASS" else "fail"
|
||||
rows += f"""
|
||||
<tr>
|
||||
<td><a href="{filename}">{report["test_name"]}</a></td>
|
||||
<td class="status {status_class}">{report["status"]}</td>
|
||||
<td>{report.get("duration_ms", 0):.1f}ms</td>
|
||||
<td>{report.get("frame_count", 0)}</td>
|
||||
</tr>
|
||||
"""
|
||||
|
||||
html = f"""<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Acceptance Test Reports</title>
|
||||
<style>
|
||||
body {{
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
||||
background: #1a1a2e;
|
||||
color: #eee;
|
||||
margin: 0;
|
||||
padding: 40px;
|
||||
}}
|
||||
h1 {{
|
||||
color: #fff;
|
||||
margin-bottom: 30px;
|
||||
}}
|
||||
table {{
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}}
|
||||
th, td {{
|
||||
padding: 12px;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #333;
|
||||
}}
|
||||
th {{
|
||||
background: #16213e;
|
||||
color: #888;
|
||||
font-weight: normal;
|
||||
}}
|
||||
a {{
|
||||
color: #4dabf7;
|
||||
text-decoration: none;
|
||||
}}
|
||||
a:hover {{
|
||||
text-decoration: underline;
|
||||
}}
|
||||
.status {{
|
||||
padding: 4px 8px;
|
||||
border-radius: 4px;
|
||||
font-size: 12px;
|
||||
font-weight: bold;
|
||||
}}
|
||||
.status.pass {{
|
||||
background: #28a745;
|
||||
color: white;
|
||||
}}
|
||||
.status.fail {{
|
||||
background: #dc3545;
|
||||
color: white;
|
||||
}}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Acceptance Test Reports</h1>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Test</th>
|
||||
<th>Status</th>
|
||||
<th>Duration</th>
|
||||
<th>Frames</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{rows}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
</html>"""
|
||||
|
||||
index_path = output_path / "index.html"
|
||||
index_path.write_text(html)
|
||||
return str(index_path)
|
||||
36
tests/conftest.py
Normal file
36
tests/conftest.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
Pytest configuration for mainline.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
"""Configure pytest to skip integration tests by default."""
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"integration: marks tests as integration tests (require external services)",
|
||||
)
|
||||
config.addinivalue_line("markers", "ntfy: marks tests that require ntfy service")
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
"""Skip integration/e2e tests unless explicitly requested with -m."""
|
||||
# Get the current marker expression
|
||||
marker_expr = config.getoption("-m", default="")
|
||||
|
||||
# If explicitly running integration or e2e, don't skip them
|
||||
if marker_expr in ("integration", "e2e", "integration or e2e"):
|
||||
return
|
||||
|
||||
# Skip integration tests
|
||||
skip_integration = pytest.mark.skip(reason="need -m integration to run")
|
||||
for item in items:
|
||||
if "integration" in item.keywords:
|
||||
item.add_marker(skip_integration)
|
||||
|
||||
# Skip e2e tests by default (they require browser/display)
|
||||
skip_e2e = pytest.mark.skip(reason="need -m e2e to run")
|
||||
for item in items:
|
||||
if "e2e" in item.keywords and "integration" not in item.keywords:
|
||||
item.add_marker(skip_e2e)
|
||||
133
tests/e2e/test_web_client.py
Normal file
133
tests/e2e/test_web_client.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
End-to-end tests for web client with headless browser.
|
||||
"""
|
||||
|
||||
import os
|
||||
import socketserver
|
||||
import threading
|
||||
from http.server import HTTPServer, SimpleHTTPRequestHandler
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
CLIENT_DIR = Path(__file__).parent.parent.parent / "client"
|
||||
|
||||
|
||||
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
|
||||
"""Threaded HTTP server for handling concurrent requests."""
|
||||
|
||||
daemon_threads = True
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def http_server():
|
||||
"""Start a local HTTP server for the client."""
|
||||
os.chdir(CLIENT_DIR)
|
||||
|
||||
handler = SimpleHTTPRequestHandler
|
||||
server = ThreadedHTTPServer(("127.0.0.1", 0), handler)
|
||||
port = server.server_address[1]
|
||||
|
||||
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
||||
thread.start()
|
||||
|
||||
yield f"http://127.0.0.1:{port}"
|
||||
|
||||
server.shutdown()
|
||||
|
||||
|
||||
class TestWebClient:
|
||||
"""Tests for the web client using Playwright."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_browser(self):
|
||||
"""Set up browser for tests."""
|
||||
pytest.importorskip("playwright")
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
self.playwright = sync_playwright().start()
|
||||
self.browser = self.playwright.chromium.launch(headless=True)
|
||||
self.context = self.browser.new_context()
|
||||
self.page = self.context.new_page()
|
||||
|
||||
yield
|
||||
|
||||
self.page.close()
|
||||
self.context.close()
|
||||
self.browser.close()
|
||||
self.playwright.stop()
|
||||
|
||||
def test_client_loads(self, http_server):
|
||||
"""Web client loads without errors."""
|
||||
response = self.page.goto(http_server)
|
||||
assert response.status == 200, f"Page load failed with status {response.status}"
|
||||
|
||||
self.page.wait_for_load_state("domcontentloaded")
|
||||
|
||||
content = self.page.content()
|
||||
assert "<canvas" in content, "Canvas element not found in page"
|
||||
|
||||
canvas = self.page.locator("#terminal")
|
||||
assert canvas.count() > 0, "Canvas not found"
|
||||
|
||||
def test_status_shows_connecting(self, http_server):
|
||||
"""Status shows connecting initially."""
|
||||
self.page.goto(http_server)
|
||||
self.page.wait_for_load_state("domcontentloaded")
|
||||
|
||||
status = self.page.locator("#status")
|
||||
assert status.count() > 0, "Status element not found"
|
||||
|
||||
def test_canvas_has_dimensions(self, http_server):
|
||||
"""Canvas has correct dimensions after load."""
|
||||
self.page.goto(http_server)
|
||||
self.page.wait_for_load_state("domcontentloaded")
|
||||
|
||||
canvas = self.page.locator("#terminal")
|
||||
assert canvas.count() > 0, "Canvas not found"
|
||||
|
||||
def test_no_console_errors_on_load(self, http_server):
|
||||
"""No JavaScript errors on page load (websocket errors are expected without server)."""
|
||||
js_errors = []
|
||||
|
||||
def handle_console(msg):
|
||||
if msg.type == "error":
|
||||
text = msg.text
|
||||
if "WebSocket" not in text:
|
||||
js_errors.append(text)
|
||||
|
||||
self.page.on("console", handle_console)
|
||||
self.page.goto(http_server)
|
||||
self.page.wait_for_load_state("domcontentloaded")
|
||||
|
||||
assert len(js_errors) == 0, f"JavaScript errors: {js_errors}"
|
||||
|
||||
|
||||
class TestWebClientProtocol:
|
||||
"""Tests for WebSocket protocol handling in client."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_browser(self):
|
||||
"""Set up browser for tests."""
|
||||
pytest.importorskip("playwright")
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
self.playwright = sync_playwright().start()
|
||||
self.browser = self.playwright.chromium.launch(headless=True)
|
||||
self.context = self.browser.new_context()
|
||||
self.page = self.context.new_page()
|
||||
|
||||
yield
|
||||
|
||||
self.page.close()
|
||||
self.context.close()
|
||||
self.browser.close()
|
||||
self.playwright.stop()
|
||||
|
||||
def test_websocket_reconnection(self, http_server):
|
||||
"""Client attempts reconnection on disconnect."""
|
||||
self.page.goto(http_server)
|
||||
self.page.wait_for_load_state("domcontentloaded")
|
||||
|
||||
status = self.page.locator("#status")
|
||||
assert status.count() > 0, "Status element not found"
|
||||
3
tests/fixtures/test.svg
vendored
3
tests/fixtures/test.svg
vendored
@@ -1,3 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" viewBox="0 0 100 100">
|
||||
<rect x="10" y="10" width="80" height="80" fill="black"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 155 B |
31
tests/kitty_test.py
Normal file
31
tests/kitty_test.py
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test script for Kitty graphics display."""
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def test_kitty_simple():
|
||||
"""Test simple Kitty graphics output with embedded PNG."""
|
||||
import base64
|
||||
|
||||
# Minimal 1x1 red pixel PNG (pre-encoded)
|
||||
# This is a tiny valid PNG with a red pixel
|
||||
png_red_1x1 = (
|
||||
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00"
|
||||
b"\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde"
|
||||
b"\x00\x00\x00\x0cIDATx\x9cc\xf8\xcf\xc0\x00\x00\x00"
|
||||
b"\x03\x00\x01\x00\x05\xfe\xd4\x00\x00\x00\x00IEND\xaeB`\x82"
|
||||
)
|
||||
|
||||
encoded = base64.b64encode(png_red_1x1).decode("ascii")
|
||||
|
||||
graphic = f"\x1b_Gf=100,t=d,s=1,v=1,c=1,r=1;{encoded}\x1b\\"
|
||||
sys.stdout.buffer.write(graphic.encode("utf-8"))
|
||||
sys.stdout.flush()
|
||||
|
||||
print("\n[If you see a red dot above, Kitty graphics is working!]")
|
||||
print("[If you see nothing or garbage, it's not working]")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_kitty_simple()
|
||||
290
tests/test_acceptance.py
Normal file
290
tests/test_acceptance.py
Normal file
@@ -0,0 +1,290 @@
|
||||
"""
|
||||
Acceptance tests for HUD visibility and positioning.
|
||||
|
||||
These tests verify that HUD appears in the final output frame.
|
||||
Frames are captured and saved as HTML reports for visual verification.
|
||||
"""
|
||||
|
||||
import queue
|
||||
|
||||
from engine.data_sources.sources import ListDataSource, SourceItem
|
||||
from engine.effects.plugins.hud import HudEffect
|
||||
from engine.pipeline import Pipeline, PipelineConfig
|
||||
from engine.pipeline.adapters import (
|
||||
DataSourceStage,
|
||||
DisplayStage,
|
||||
EffectPluginStage,
|
||||
SourceItemsToBufferStage,
|
||||
)
|
||||
from engine.pipeline.core import PipelineContext
|
||||
from engine.pipeline.params import PipelineParams
|
||||
from tests.acceptance_report import save_report
|
||||
|
||||
|
||||
class FrameCaptureDisplay:
|
||||
"""Display that captures frames for HTML report generation."""
|
||||
|
||||
def __init__(self):
|
||||
self.frames: queue.Queue[list[str]] = queue.Queue()
|
||||
self.width = 80
|
||||
self.height = 24
|
||||
self._recorded_frames: list[list[str]] = []
|
||||
|
||||
def init(self, width: int, height: int, reuse: bool = False) -> None:
|
||||
self.width = width
|
||||
self.height = height
|
||||
|
||||
def show(self, buffer: list[str], border: bool = False) -> None:
|
||||
self._recorded_frames.append(list(buffer))
|
||||
self.frames.put(list(buffer))
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
|
||||
def cleanup(self) -> None:
|
||||
pass
|
||||
|
||||
def get_dimensions(self) -> tuple[int, int]:
|
||||
return (self.width, self.height)
|
||||
|
||||
def get_recorded_frames(self) -> list[list[str]]:
|
||||
return self._recorded_frames
|
||||
|
||||
|
||||
def _build_pipeline_with_hud(
|
||||
items: list[SourceItem],
|
||||
) -> tuple[Pipeline, FrameCaptureDisplay, PipelineContext]:
|
||||
"""Build a pipeline with HUD effect."""
|
||||
display = FrameCaptureDisplay()
|
||||
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = display.width
|
||||
params.viewport_height = display.height
|
||||
params.frame_number = 0
|
||||
params.effect_order = ["noise", "hud"]
|
||||
params.effect_enabled = {"noise": False}
|
||||
ctx.params = params
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(
|
||||
source="list",
|
||||
display="terminal",
|
||||
effects=["hud"],
|
||||
enable_metrics=True,
|
||||
),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
source = ListDataSource(items, name="test-source")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test-source"))
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
|
||||
|
||||
hud_effect = HudEffect()
|
||||
pipeline.add_stage("hud", EffectPluginStage(hud_effect, name="hud"))
|
||||
|
||||
pipeline.add_stage("display", DisplayStage(display, name="terminal"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
return pipeline, display, ctx
|
||||
|
||||
|
||||
class TestHUDAcceptance:
|
||||
"""Acceptance tests for HUD visibility."""
|
||||
|
||||
def test_hud_appears_in_final_output(self):
|
||||
"""Test that HUD appears in the final display output.
|
||||
|
||||
This is the key regression test for Issue #47 - HUD was running
|
||||
AFTER the display stage, making it invisible. Now it should appear
|
||||
in the frame captured by the display.
|
||||
"""
|
||||
items = [SourceItem(content="Test content line", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline_with_hud(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Pipeline execution failed: {result.error}"
|
||||
|
||||
frame = display.frames.get(timeout=1)
|
||||
frame_text = "\n".join(frame)
|
||||
|
||||
assert "MAINLINE" in frame_text, "HUD header not found in final output"
|
||||
assert "EFFECT:" in frame_text, "EFFECT line not found in final output"
|
||||
assert "PIPELINE:" in frame_text, "PIPELINE line not found in final output"
|
||||
|
||||
save_report(
|
||||
test_name="test_hud_appears_in_final_output",
|
||||
frames=display.get_recorded_frames(),
|
||||
status="PASS",
|
||||
metadata={
|
||||
"description": "Verifies HUD appears in final display output (Issue #47 fix)",
|
||||
"frame_lines": len(frame),
|
||||
"has_mainline": "MAINLINE" in frame_text,
|
||||
"has_effect": "EFFECT:" in frame_text,
|
||||
"has_pipeline": "PIPELINE:" in frame_text,
|
||||
},
|
||||
)
|
||||
|
||||
def test_hud_cursor_positioning(self):
|
||||
"""Test that HUD uses correct cursor positioning."""
|
||||
items = [SourceItem(content="Sample content", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline_with_hud(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
assert result.success
|
||||
|
||||
frame = display.frames.get(timeout=1)
|
||||
has_cursor_pos = any("\x1b[" in line and "H" in line for line in frame)
|
||||
|
||||
save_report(
|
||||
test_name="test_hud_cursor_positioning",
|
||||
frames=display.get_recorded_frames(),
|
||||
status="PASS",
|
||||
metadata={
|
||||
"description": "Verifies HUD uses cursor positioning",
|
||||
"has_cursor_positioning": has_cursor_pos,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestCameraSpeedAcceptance:
|
||||
"""Acceptance tests for camera speed modulation."""
|
||||
|
||||
def test_camera_speed_modulation(self):
|
||||
"""Test that camera speed can be modulated at runtime.
|
||||
|
||||
This verifies the camera speed modulation feature added in Phase 1.
|
||||
"""
|
||||
from engine.camera import Camera
|
||||
from engine.pipeline.adapters import CameraClockStage, CameraStage
|
||||
|
||||
display = FrameCaptureDisplay()
|
||||
items = [
|
||||
SourceItem(content=f"Line {i}", source="test", timestamp=str(i))
|
||||
for i in range(50)
|
||||
]
|
||||
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = display.width
|
||||
params.viewport_height = display.height
|
||||
params.frame_number = 0
|
||||
params.camera_speed = 1.0
|
||||
ctx.params = params
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(
|
||||
source="list",
|
||||
display="terminal",
|
||||
camera="scroll",
|
||||
enable_metrics=False,
|
||||
),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
source = ListDataSource(items, name="test")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test"))
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="render"))
|
||||
|
||||
camera = Camera.scroll(speed=0.5)
|
||||
pipeline.add_stage(
|
||||
"camera_update", CameraClockStage(camera, name="camera-clock")
|
||||
)
|
||||
pipeline.add_stage("camera", CameraStage(camera, name="camera"))
|
||||
pipeline.add_stage("display", DisplayStage(display, name="terminal"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
initial_camera_speed = camera.speed
|
||||
|
||||
for _ in range(3):
|
||||
pipeline.execute(items)
|
||||
|
||||
speed_after_first_run = camera.speed
|
||||
|
||||
params.camera_speed = 5.0
|
||||
ctx.params = params
|
||||
|
||||
for _ in range(3):
|
||||
pipeline.execute(items)
|
||||
|
||||
speed_after_increase = camera.speed
|
||||
|
||||
assert speed_after_increase == 5.0, (
|
||||
f"Camera speed should be modulated to 5.0, got {speed_after_increase}"
|
||||
)
|
||||
|
||||
params.camera_speed = 0.0
|
||||
ctx.params = params
|
||||
|
||||
for _ in range(3):
|
||||
pipeline.execute(items)
|
||||
|
||||
speed_after_stop = camera.speed
|
||||
assert speed_after_stop == 0.0, (
|
||||
f"Camera speed should be 0.0, got {speed_after_stop}"
|
||||
)
|
||||
|
||||
save_report(
|
||||
test_name="test_camera_speed_modulation",
|
||||
frames=display.get_recorded_frames()[:5],
|
||||
status="PASS",
|
||||
metadata={
|
||||
"description": "Verifies camera speed can be modulated at runtime",
|
||||
"initial_camera_speed": initial_camera_speed,
|
||||
"speed_after_first_run": speed_after_first_run,
|
||||
"speed_after_increase": speed_after_increase,
|
||||
"speed_after_stop": speed_after_stop,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestEmptyLinesAcceptance:
|
||||
"""Acceptance tests for empty line handling."""
|
||||
|
||||
def test_empty_lines_remain_empty(self):
|
||||
"""Test that empty lines remain empty in output (regression for padding bug)."""
|
||||
items = [
|
||||
SourceItem(content="Line1\n\nLine3\n\nLine5", source="test", timestamp="0")
|
||||
]
|
||||
|
||||
display = FrameCaptureDisplay()
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = display.width
|
||||
params.viewport_height = display.height
|
||||
ctx.params = params
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(enable_metrics=False),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
source = ListDataSource(items, name="test")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test"))
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="render"))
|
||||
pipeline.add_stage("display", DisplayStage(display, name="terminal"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
result = pipeline.execute(items)
|
||||
assert result.success
|
||||
|
||||
frame = display.frames.get(timeout=1)
|
||||
has_truly_empty = any(not line for line in frame)
|
||||
|
||||
save_report(
|
||||
test_name="test_empty_lines_remain_empty",
|
||||
frames=display.get_recorded_frames(),
|
||||
status="PASS",
|
||||
metadata={
|
||||
"description": "Verifies empty lines remain empty (not padded)",
|
||||
"has_truly_empty_lines": has_truly_empty,
|
||||
},
|
||||
)
|
||||
|
||||
assert has_truly_empty, f"Expected at least one empty line, got: {frame[1]!r}"
|
||||
345
tests/test_adapters.py
Normal file
345
tests/test_adapters.py
Normal file
@@ -0,0 +1,345 @@
|
||||
"""
|
||||
Tests for engine/pipeline/adapters.py - Stage adapters for the pipeline.
|
||||
|
||||
Tests Stage adapters that bridge existing components to the Stage interface:
|
||||
- DataSourceStage: Wraps DataSource objects
|
||||
- DisplayStage: Wraps Display backends
|
||||
- PassthroughStage: Simple pass-through stage for pre-rendered data
|
||||
- SourceItemsToBufferStage: Converts SourceItem objects to text buffers
|
||||
- EffectPluginStage: Wraps effect plugins
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from engine.data_sources.sources import SourceItem
|
||||
from engine.pipeline.adapters import (
|
||||
DataSourceStage,
|
||||
DisplayStage,
|
||||
EffectPluginStage,
|
||||
PassthroughStage,
|
||||
SourceItemsToBufferStage,
|
||||
)
|
||||
from engine.pipeline.core import PipelineContext
|
||||
|
||||
|
||||
class TestDataSourceStage:
|
||||
"""Test DataSourceStage adapter."""
|
||||
|
||||
def test_datasource_stage_name(self):
|
||||
"""DataSourceStage stores name correctly."""
|
||||
mock_source = MagicMock()
|
||||
stage = DataSourceStage(mock_source, name="headlines")
|
||||
assert stage.name == "headlines"
|
||||
|
||||
def test_datasource_stage_category(self):
|
||||
"""DataSourceStage has 'source' category."""
|
||||
mock_source = MagicMock()
|
||||
stage = DataSourceStage(mock_source, name="headlines")
|
||||
assert stage.category == "source"
|
||||
|
||||
def test_datasource_stage_capabilities(self):
|
||||
"""DataSourceStage advertises source capability."""
|
||||
mock_source = MagicMock()
|
||||
stage = DataSourceStage(mock_source, name="headlines")
|
||||
assert "source.headlines" in stage.capabilities
|
||||
|
||||
def test_datasource_stage_dependencies(self):
|
||||
"""DataSourceStage has no dependencies."""
|
||||
mock_source = MagicMock()
|
||||
stage = DataSourceStage(mock_source, name="headlines")
|
||||
assert stage.dependencies == set()
|
||||
|
||||
def test_datasource_stage_process_calls_get_items(self):
|
||||
"""DataSourceStage.process() calls source.get_items()."""
|
||||
mock_items = [
|
||||
SourceItem(content="Item 1", source="headlines", timestamp="12:00"),
|
||||
]
|
||||
mock_source = MagicMock()
|
||||
mock_source.get_items.return_value = mock_items
|
||||
|
||||
stage = DataSourceStage(mock_source, name="headlines")
|
||||
ctx = PipelineContext()
|
||||
result = stage.process(None, ctx)
|
||||
|
||||
assert result == mock_items
|
||||
mock_source.get_items.assert_called_once()
|
||||
|
||||
def test_datasource_stage_process_fallback_returns_data(self):
|
||||
"""DataSourceStage.process() returns data if no get_items method."""
|
||||
mock_source = MagicMock(spec=[]) # No get_items method
|
||||
stage = DataSourceStage(mock_source, name="headlines")
|
||||
ctx = PipelineContext()
|
||||
test_data = [{"content": "test"}]
|
||||
|
||||
result = stage.process(test_data, ctx)
|
||||
assert result == test_data
|
||||
|
||||
|
||||
class TestDisplayStage:
|
||||
"""Test DisplayStage adapter."""
|
||||
|
||||
def test_display_stage_name(self):
|
||||
"""DisplayStage stores name correctly."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
assert stage.name == "terminal"
|
||||
|
||||
def test_display_stage_category(self):
|
||||
"""DisplayStage has 'display' category."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
assert stage.category == "display"
|
||||
|
||||
def test_display_stage_capabilities(self):
|
||||
"""DisplayStage advertises display capability."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
assert "display.output" in stage.capabilities
|
||||
|
||||
def test_display_stage_dependencies(self):
|
||||
"""DisplayStage depends on render.output."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
assert "render.output" in stage.dependencies
|
||||
|
||||
def test_display_stage_init(self):
|
||||
"""DisplayStage.init() calls display.init() with dimensions."""
|
||||
mock_display = MagicMock()
|
||||
mock_display.init.return_value = True
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MagicMock()
|
||||
ctx.params.viewport_width = 100
|
||||
ctx.params.viewport_height = 30
|
||||
|
||||
result = stage.init(ctx)
|
||||
|
||||
assert result is True
|
||||
mock_display.init.assert_called_once_with(100, 30, reuse=False)
|
||||
|
||||
def test_display_stage_init_uses_defaults(self):
|
||||
"""DisplayStage.init() uses defaults when params missing."""
|
||||
mock_display = MagicMock()
|
||||
mock_display.init.return_value = True
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.params = None
|
||||
|
||||
result = stage.init(ctx)
|
||||
|
||||
assert result is True
|
||||
mock_display.init.assert_called_once_with(80, 24, reuse=False)
|
||||
|
||||
def test_display_stage_process_calls_show(self):
|
||||
"""DisplayStage.process() calls display.show() with data."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
|
||||
test_buffer = [[["A", "red"] for _ in range(80)] for _ in range(24)]
|
||||
ctx = PipelineContext()
|
||||
result = stage.process(test_buffer, ctx)
|
||||
|
||||
assert result == test_buffer
|
||||
mock_display.show.assert_called_once_with(test_buffer)
|
||||
|
||||
def test_display_stage_process_skips_none_data(self):
|
||||
"""DisplayStage.process() skips show() if data is None."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
|
||||
ctx = PipelineContext()
|
||||
result = stage.process(None, ctx)
|
||||
|
||||
assert result is None
|
||||
mock_display.show.assert_not_called()
|
||||
|
||||
def test_display_stage_cleanup(self):
|
||||
"""DisplayStage.cleanup() calls display.cleanup()."""
|
||||
mock_display = MagicMock()
|
||||
stage = DisplayStage(mock_display, name="terminal")
|
||||
|
||||
stage.cleanup()
|
||||
|
||||
mock_display.cleanup.assert_called_once()
|
||||
|
||||
|
||||
class TestPassthroughStage:
|
||||
"""Test PassthroughStage adapter."""
|
||||
|
||||
def test_passthrough_stage_name(self):
|
||||
"""PassthroughStage stores name correctly."""
|
||||
stage = PassthroughStage(name="test")
|
||||
assert stage.name == "test"
|
||||
|
||||
def test_passthrough_stage_category(self):
|
||||
"""PassthroughStage has 'render' category."""
|
||||
stage = PassthroughStage()
|
||||
assert stage.category == "render"
|
||||
|
||||
def test_passthrough_stage_is_optional(self):
|
||||
"""PassthroughStage is optional."""
|
||||
stage = PassthroughStage()
|
||||
assert stage.optional is True
|
||||
|
||||
def test_passthrough_stage_capabilities(self):
|
||||
"""PassthroughStage advertises render output capability."""
|
||||
stage = PassthroughStage()
|
||||
assert "render.output" in stage.capabilities
|
||||
|
||||
def test_passthrough_stage_dependencies(self):
|
||||
"""PassthroughStage depends on source."""
|
||||
stage = PassthroughStage()
|
||||
assert "source" in stage.dependencies
|
||||
|
||||
def test_passthrough_stage_process_returns_data_unchanged(self):
|
||||
"""PassthroughStage.process() returns data unchanged."""
|
||||
stage = PassthroughStage()
|
||||
ctx = PipelineContext()
|
||||
|
||||
test_data = [
|
||||
SourceItem(content="Line 1", source="test", timestamp="12:00"),
|
||||
]
|
||||
result = stage.process(test_data, ctx)
|
||||
|
||||
assert result == test_data
|
||||
assert result is test_data
|
||||
|
||||
|
||||
class TestSourceItemsToBufferStage:
|
||||
"""Test SourceItemsToBufferStage adapter."""
|
||||
|
||||
def test_source_items_to_buffer_stage_name(self):
|
||||
"""SourceItemsToBufferStage stores name correctly."""
|
||||
stage = SourceItemsToBufferStage(name="custom-name")
|
||||
assert stage.name == "custom-name"
|
||||
|
||||
def test_source_items_to_buffer_stage_category(self):
|
||||
"""SourceItemsToBufferStage has 'render' category."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
assert stage.category == "render"
|
||||
|
||||
def test_source_items_to_buffer_stage_is_optional(self):
|
||||
"""SourceItemsToBufferStage is optional."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
assert stage.optional is True
|
||||
|
||||
def test_source_items_to_buffer_stage_capabilities(self):
|
||||
"""SourceItemsToBufferStage advertises render output capability."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
assert "render.output" in stage.capabilities
|
||||
|
||||
def test_source_items_to_buffer_stage_dependencies(self):
|
||||
"""SourceItemsToBufferStage depends on source."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
assert "source" in stage.dependencies
|
||||
|
||||
def test_source_items_to_buffer_stage_process_single_line_item(self):
|
||||
"""SourceItemsToBufferStage converts single-line SourceItem."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
ctx = PipelineContext()
|
||||
|
||||
items = [
|
||||
SourceItem(content="Single line content", source="test", timestamp="12:00"),
|
||||
]
|
||||
result = stage.process(items, ctx)
|
||||
|
||||
assert isinstance(result, list)
|
||||
assert len(result) >= 1
|
||||
# Result should be lines of text
|
||||
assert all(isinstance(line, str) for line in result)
|
||||
|
||||
def test_source_items_to_buffer_stage_process_multiline_item(self):
|
||||
"""SourceItemsToBufferStage splits multiline SourceItem content."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
ctx = PipelineContext()
|
||||
|
||||
content = "Line 1\nLine 2\nLine 3"
|
||||
items = [
|
||||
SourceItem(content=content, source="test", timestamp="12:00"),
|
||||
]
|
||||
result = stage.process(items, ctx)
|
||||
|
||||
# Should have at least 3 lines
|
||||
assert len(result) >= 3
|
||||
assert all(isinstance(line, str) for line in result)
|
||||
|
||||
def test_source_items_to_buffer_stage_process_multiple_items(self):
|
||||
"""SourceItemsToBufferStage handles multiple SourceItems."""
|
||||
stage = SourceItemsToBufferStage()
|
||||
ctx = PipelineContext()
|
||||
|
||||
items = [
|
||||
SourceItem(content="Item 1", source="test", timestamp="12:00"),
|
||||
SourceItem(content="Item 2", source="test", timestamp="12:01"),
|
||||
SourceItem(content="Item 3", source="test", timestamp="12:02"),
|
||||
]
|
||||
result = stage.process(items, ctx)
|
||||
|
||||
# Should have at least 3 lines (one per item, possibly more)
|
||||
assert len(result) >= 3
|
||||
assert all(isinstance(line, str) for line in result)
|
||||
|
||||
|
||||
class TestEffectPluginStage:
|
||||
"""Test EffectPluginStage adapter."""
|
||||
|
||||
def test_effect_plugin_stage_name(self):
|
||||
"""EffectPluginStage stores name correctly."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
assert stage.name == "blur"
|
||||
|
||||
def test_effect_plugin_stage_category(self):
|
||||
"""EffectPluginStage has 'effect' category."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
assert stage.category == "effect"
|
||||
|
||||
def test_effect_plugin_stage_is_not_optional(self):
|
||||
"""EffectPluginStage is required when configured."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
assert stage.optional is False
|
||||
|
||||
def test_effect_plugin_stage_capabilities(self):
|
||||
"""EffectPluginStage advertises effect capability with name."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
assert "effect.blur" in stage.capabilities
|
||||
|
||||
def test_effect_plugin_stage_dependencies(self):
|
||||
"""EffectPluginStage has no static dependencies."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
# EffectPluginStage has empty dependencies - they are resolved dynamically
|
||||
assert stage.dependencies == set()
|
||||
|
||||
def test_effect_plugin_stage_stage_type(self):
|
||||
"""EffectPluginStage.stage_type returns effect for non-HUD."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
assert stage.stage_type == "effect"
|
||||
|
||||
def test_effect_plugin_stage_hud_special_handling(self):
|
||||
"""EffectPluginStage has special handling for HUD effect."""
|
||||
mock_effect = MagicMock()
|
||||
stage = EffectPluginStage(mock_effect, name="hud")
|
||||
assert stage.stage_type == "overlay"
|
||||
assert stage.is_overlay is True
|
||||
assert stage.render_order == 100
|
||||
|
||||
def test_effect_plugin_stage_process(self):
|
||||
"""EffectPluginStage.process() calls effect.process()."""
|
||||
mock_effect = MagicMock()
|
||||
mock_effect.process.return_value = "processed_data"
|
||||
|
||||
stage = EffectPluginStage(mock_effect, name="blur")
|
||||
ctx = PipelineContext()
|
||||
test_buffer = "test_buffer"
|
||||
|
||||
result = stage.process(test_buffer, ctx)
|
||||
|
||||
assert result == "processed_data"
|
||||
mock_effect.process.assert_called_once()
|
||||
215
tests/test_app.py
Normal file
215
tests/test_app.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""
|
||||
Integration tests for engine/app.py - pipeline orchestration.
|
||||
|
||||
Tests the main entry point and pipeline mode initialization.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.app import main, run_pipeline_mode
|
||||
from engine.pipeline import get_preset
|
||||
|
||||
|
||||
class TestMain:
|
||||
"""Test main() entry point."""
|
||||
|
||||
def test_main_calls_run_pipeline_mode_with_default_preset(self):
|
||||
"""main() runs default preset (demo) when no args provided."""
|
||||
with patch("engine.app.main.run_pipeline_mode") as mock_run:
|
||||
sys.argv = ["mainline.py"]
|
||||
main()
|
||||
mock_run.assert_called_once_with("demo")
|
||||
|
||||
def test_main_calls_run_pipeline_mode_with_config_preset(self):
|
||||
"""main() uses PRESET from config if set."""
|
||||
with (
|
||||
patch("engine.config.PIPELINE_DIAGRAM", False),
|
||||
patch("engine.config.PRESET", "demo"),
|
||||
patch("engine.config.PIPELINE_MODE", False),
|
||||
patch("engine.app.main.run_pipeline_mode") as mock_run,
|
||||
):
|
||||
sys.argv = ["mainline.py"]
|
||||
main()
|
||||
mock_run.assert_called_once_with("demo")
|
||||
|
||||
def test_main_exits_on_unknown_preset(self):
|
||||
"""main() exits with error for unknown preset."""
|
||||
with (
|
||||
patch("engine.config.PIPELINE_DIAGRAM", False),
|
||||
patch("engine.config.PRESET", "nonexistent"),
|
||||
patch("engine.config.PIPELINE_MODE", False),
|
||||
patch("engine.pipeline.list_presets", return_value=["demo", "poetry"]),
|
||||
):
|
||||
sys.argv = ["mainline.py"]
|
||||
with pytest.raises(SystemExit) as exc_info:
|
||||
main()
|
||||
assert exc_info.value.code == 1
|
||||
|
||||
|
||||
class TestRunPipelineMode:
|
||||
"""Test run_pipeline_mode() initialization."""
|
||||
|
||||
def test_run_pipeline_mode_loads_valid_preset(self):
|
||||
"""run_pipeline_mode() loads a valid preset."""
|
||||
preset = get_preset("demo")
|
||||
assert preset is not None
|
||||
assert preset.name == "demo"
|
||||
assert preset.source == "headlines"
|
||||
|
||||
def test_run_pipeline_mode_exits_on_invalid_preset(self):
|
||||
"""run_pipeline_mode() exits if preset not found."""
|
||||
with pytest.raises(SystemExit) as exc_info:
|
||||
run_pipeline_mode("invalid-preset-xyz")
|
||||
assert exc_info.value.code == 1
|
||||
|
||||
def test_run_pipeline_mode_exits_when_no_content_available(self):
|
||||
"""run_pipeline_mode() exits if no content can be fetched."""
|
||||
with (
|
||||
patch("engine.app.pipeline_runner.load_cache", return_value=None),
|
||||
patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]),
|
||||
patch(
|
||||
"engine.app.pipeline_runner.fetch_all", return_value=([], None, None)
|
||||
), # Mock background thread
|
||||
patch("engine.app.pipeline_runner.save_cache"), # Prevent disk I/O
|
||||
patch("engine.effects.plugins.discover_plugins"),
|
||||
pytest.raises(SystemExit) as exc_info,
|
||||
):
|
||||
run_pipeline_mode("demo")
|
||||
assert exc_info.value.code == 1
|
||||
|
||||
def test_run_pipeline_mode_uses_cache_over_fetch(self):
|
||||
"""run_pipeline_mode() uses cached content if available."""
|
||||
cached = ["cached_item"]
|
||||
with (
|
||||
patch(
|
||||
"engine.app.pipeline_runner.load_cache", return_value=cached
|
||||
) as mock_load,
|
||||
patch("engine.app.pipeline_runner.fetch_all") as mock_fetch,
|
||||
patch("engine.app.pipeline_runner.fetch_all_fast"),
|
||||
patch("engine.app.pipeline_runner.DisplayRegistry.create") as mock_create,
|
||||
):
|
||||
mock_display = Mock()
|
||||
mock_display.init = Mock()
|
||||
mock_display.get_dimensions = Mock(return_value=(80, 24))
|
||||
mock_display.is_quit_requested = Mock(return_value=True)
|
||||
mock_display.clear_quit_request = Mock()
|
||||
mock_display.show = Mock()
|
||||
mock_display.cleanup = Mock()
|
||||
mock_create.return_value = mock_display
|
||||
|
||||
try:
|
||||
run_pipeline_mode("demo")
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
|
||||
# Verify fetch_all was NOT called (cache was used)
|
||||
mock_fetch.assert_not_called()
|
||||
mock_load.assert_called_once()
|
||||
|
||||
def test_run_pipeline_mode_creates_display(self):
|
||||
"""run_pipeline_mode() creates a display backend."""
|
||||
with (
|
||||
patch("engine.app.pipeline_runner.load_cache", return_value=["item"]),
|
||||
patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]),
|
||||
patch("engine.app.DisplayRegistry.create") as mock_create,
|
||||
):
|
||||
mock_display = Mock()
|
||||
mock_display.init = Mock()
|
||||
mock_display.get_dimensions = Mock(return_value=(80, 24))
|
||||
mock_display.is_quit_requested = Mock(return_value=True)
|
||||
mock_display.clear_quit_request = Mock()
|
||||
mock_display.show = Mock()
|
||||
mock_display.cleanup = Mock()
|
||||
mock_create.return_value = mock_display
|
||||
|
||||
try:
|
||||
run_pipeline_mode("demo-base")
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
|
||||
# Verify display was created with 'terminal' (preset display)
|
||||
mock_create.assert_called_once_with("terminal")
|
||||
|
||||
def test_run_pipeline_mode_respects_display_cli_flag(self):
|
||||
"""run_pipeline_mode() uses --display CLI flag if provided."""
|
||||
sys.argv = ["mainline.py", "--display", "websocket"]
|
||||
|
||||
with (
|
||||
patch("engine.app.pipeline_runner.load_cache", return_value=["item"]),
|
||||
patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]),
|
||||
patch("engine.app.DisplayRegistry.create") as mock_create,
|
||||
):
|
||||
mock_display = Mock()
|
||||
mock_display.init = Mock()
|
||||
mock_display.get_dimensions = Mock(return_value=(80, 24))
|
||||
mock_display.is_quit_requested = Mock(return_value=True)
|
||||
mock_display.clear_quit_request = Mock()
|
||||
mock_display.show = Mock()
|
||||
mock_display.cleanup = Mock()
|
||||
mock_create.return_value = mock_display
|
||||
|
||||
try:
|
||||
run_pipeline_mode("demo")
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
|
||||
# Verify display was created with CLI override
|
||||
mock_create.assert_called_once_with("websocket")
|
||||
|
||||
def test_run_pipeline_mode_fetches_poetry_for_poetry_source(self):
|
||||
"""run_pipeline_mode() fetches poetry for poetry preset."""
|
||||
with (
|
||||
patch("engine.app.pipeline_runner.load_cache", return_value=None),
|
||||
patch(
|
||||
"engine.app.pipeline_runner.fetch_poetry",
|
||||
return_value=(["poem"], None, None),
|
||||
) as mock_fetch_poetry,
|
||||
patch("engine.app.pipeline_runner.fetch_all") as mock_fetch_all,
|
||||
patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]),
|
||||
patch("engine.app.pipeline_runner.DisplayRegistry.create") as mock_create,
|
||||
):
|
||||
mock_display = Mock()
|
||||
mock_display.init = Mock()
|
||||
mock_display.get_dimensions = Mock(return_value=(80, 24))
|
||||
mock_display.is_quit_requested = Mock(return_value=True)
|
||||
mock_display.clear_quit_request = Mock()
|
||||
mock_display.show = Mock()
|
||||
mock_display.cleanup = Mock()
|
||||
mock_create.return_value = mock_display
|
||||
|
||||
try:
|
||||
run_pipeline_mode("poetry")
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
|
||||
# Verify fetch_poetry was called, not fetch_all
|
||||
mock_fetch_poetry.assert_called_once()
|
||||
mock_fetch_all.assert_not_called()
|
||||
|
||||
def test_run_pipeline_mode_discovers_effect_plugins(self):
|
||||
"""run_pipeline_mode() discovers available effect plugins."""
|
||||
with (
|
||||
patch("engine.app.pipeline_runner.load_cache", return_value=["item"]),
|
||||
patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]),
|
||||
patch("engine.effects.plugins.discover_plugins") as mock_discover,
|
||||
patch("engine.app.pipeline_runner.DisplayRegistry.create") as mock_create,
|
||||
):
|
||||
mock_display = Mock()
|
||||
mock_display.init = Mock()
|
||||
mock_display.get_dimensions = Mock(return_value=(80, 24))
|
||||
mock_display.is_quit_requested = Mock(return_value=True)
|
||||
mock_display.clear_quit_request = Mock()
|
||||
mock_display.show = Mock()
|
||||
mock_display.cleanup = Mock()
|
||||
mock_create.return_value = mock_display
|
||||
|
||||
try:
|
||||
run_pipeline_mode("demo")
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
pass
|
||||
|
||||
# Verify effects_plugins.discover_plugins was called
|
||||
mock_discover.assert_called_once()
|
||||
380
tests/test_benchmark.py
Normal file
380
tests/test_benchmark.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""
|
||||
Tests for engine.benchmark module - performance regression tests.
|
||||
"""
|
||||
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.display import MultiDisplay, NullDisplay, TerminalDisplay
|
||||
from engine.effects import EffectContext, get_registry
|
||||
from engine.effects.plugins import discover_plugins
|
||||
|
||||
|
||||
def _is_coverage_active():
|
||||
"""Check if coverage is active."""
|
||||
# Check if coverage module is loaded
|
||||
import sys
|
||||
|
||||
return "coverage" in sys.modules or "cov" in sys.modules
|
||||
|
||||
|
||||
def _get_min_fps_threshold(base_threshold: int) -> int:
|
||||
"""
|
||||
Get minimum FPS threshold adjusted for coverage mode.
|
||||
|
||||
Coverage instrumentation typically slows execution by 2-5x.
|
||||
We adjust thresholds accordingly to avoid false positives.
|
||||
"""
|
||||
if _is_coverage_active():
|
||||
# Coverage typically slows execution by 2-5x
|
||||
# Use a more conservative threshold (25% of original to account for higher overhead)
|
||||
return max(500, int(base_threshold * 0.25))
|
||||
return base_threshold
|
||||
|
||||
|
||||
def _get_iterations() -> int:
|
||||
"""Get number of iterations for benchmarks."""
|
||||
# Check for environment variable override
|
||||
env_iterations = os.environ.get("BENCHMARK_ITERATIONS")
|
||||
if env_iterations:
|
||||
try:
|
||||
return int(env_iterations)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Default based on coverage mode
|
||||
if _is_coverage_active():
|
||||
return 100 # Fewer iterations when coverage is active
|
||||
return 500 # Default iterations
|
||||
|
||||
|
||||
class TestBenchmarkNullDisplay:
|
||||
"""Performance tests for NullDisplay - regression tests."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_null_display_minimum_fps(self):
|
||||
"""NullDisplay should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
display = NullDisplay()
|
||||
display.init(80, 24)
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
display.show(buffer)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(20000)
|
||||
|
||||
assert fps >= min_fps, f"NullDisplay FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_effects_minimum_throughput(self):
|
||||
"""Effects should meet minimum processing throughput."""
|
||||
import time
|
||||
|
||||
from engine.effects import EffectContext, get_registry
|
||||
from engine.effects.plugins import discover_plugins
|
||||
|
||||
discover_plugins()
|
||||
registry = get_registry()
|
||||
effect = registry.get("noise")
|
||||
assert effect is not None, "Noise effect should be registered"
|
||||
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
)
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
effect.process(buffer, ctx)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(10000)
|
||||
|
||||
assert fps >= min_fps, (
|
||||
f"Effect processing FPS {fps:.0f} below minimum {min_fps}"
|
||||
)
|
||||
|
||||
|
||||
class TestBenchmarkWebSocketDisplay:
|
||||
"""Performance tests for WebSocketDisplay."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_websocket_display_minimum_fps(self):
|
||||
"""WebSocketDisplay should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
with patch("engine.display.backends.websocket.websockets", None):
|
||||
from engine.display import WebSocketDisplay
|
||||
|
||||
display = WebSocketDisplay()
|
||||
display.init(80, 24)
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
display.show(buffer)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(10000)
|
||||
|
||||
assert fps >= min_fps, (
|
||||
f"WebSocketDisplay FPS {fps:.0f} below minimum {min_fps}"
|
||||
)
|
||||
|
||||
|
||||
class TestBenchmarkTerminalDisplay:
|
||||
"""Performance tests for TerminalDisplay."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_terminal_display_minimum_fps(self):
|
||||
"""TerminalDisplay should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
display = TerminalDisplay()
|
||||
display.init(80, 24)
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
display.show(buffer)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(10000)
|
||||
|
||||
assert fps >= min_fps, f"TerminalDisplay FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
|
||||
class TestBenchmarkMultiDisplay:
|
||||
"""Performance tests for MultiDisplay."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_multi_display_minimum_fps(self):
|
||||
"""MultiDisplay should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
with patch("engine.display.backends.websocket.websockets", None):
|
||||
from engine.display import WebSocketDisplay
|
||||
|
||||
null_display = NullDisplay()
|
||||
null_display.init(80, 24)
|
||||
ws_display = WebSocketDisplay()
|
||||
ws_display.init(80, 24)
|
||||
|
||||
display = MultiDisplay([null_display, ws_display])
|
||||
display.init(80, 24)
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
display.show(buffer)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(5000)
|
||||
|
||||
assert fps >= min_fps, f"MultiDisplay FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
|
||||
class TestBenchmarkEffects:
|
||||
"""Performance tests for various effects."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_fade_effect_minimum_fps(self):
|
||||
"""Fade effect should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
discover_plugins()
|
||||
registry = get_registry()
|
||||
effect = registry.get("fade")
|
||||
assert effect is not None, "Fade effect should be registered"
|
||||
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
)
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
effect.process(buffer, ctx)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(7000)
|
||||
|
||||
assert fps >= min_fps, f"Fade effect FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_glitch_effect_minimum_fps(self):
|
||||
"""Glitch effect should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
discover_plugins()
|
||||
registry = get_registry()
|
||||
effect = registry.get("glitch")
|
||||
assert effect is not None, "Glitch effect should be registered"
|
||||
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
)
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
effect.process(buffer, ctx)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(5000)
|
||||
|
||||
assert fps >= min_fps, f"Glitch effect FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_border_effect_minimum_fps(self):
|
||||
"""Border effect should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
discover_plugins()
|
||||
registry = get_registry()
|
||||
effect = registry.get("border")
|
||||
assert effect is not None, "Border effect should be registered"
|
||||
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
)
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
effect.process(buffer, ctx)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(5000)
|
||||
|
||||
assert fps >= min_fps, f"Border effect FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_tint_effect_minimum_fps(self):
|
||||
"""Tint effect should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
discover_plugins()
|
||||
registry = get_registry()
|
||||
effect = registry.get("tint")
|
||||
assert effect is not None, "Tint effect should be registered"
|
||||
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
)
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
effect.process(buffer, ctx)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(8000)
|
||||
|
||||
assert fps >= min_fps, f"Tint effect FPS {fps:.0f} below minimum {min_fps}"
|
||||
|
||||
|
||||
class TestBenchmarkPipeline:
|
||||
"""Performance tests for pipeline execution."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_pipeline_execution_minimum_fps(self):
|
||||
"""Pipeline execution should meet minimum performance threshold."""
|
||||
import time
|
||||
|
||||
from engine.data_sources.sources import EmptyDataSource
|
||||
from engine.pipeline import Pipeline, StageRegistry, discover_stages
|
||||
from engine.pipeline.adapters import DataSourceStage, SourceItemsToBufferStage
|
||||
|
||||
discover_stages()
|
||||
|
||||
# Create a minimal pipeline with empty source to avoid network calls
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Create empty source directly (not registered in stage registry)
|
||||
empty_source = EmptyDataSource(width=80, height=24)
|
||||
source_stage = DataSourceStage(empty_source, name="empty")
|
||||
|
||||
# Add render stage to convert items to text buffer
|
||||
render_stage = SourceItemsToBufferStage(name="items-to-buffer")
|
||||
|
||||
# Get null display from registry
|
||||
null_display = StageRegistry.create("display", "null")
|
||||
assert null_display is not None, "null display should be registered"
|
||||
|
||||
pipeline.add_stage("source", source_stage)
|
||||
pipeline.add_stage("render", render_stage)
|
||||
pipeline.add_stage("display", null_display)
|
||||
pipeline.build()
|
||||
|
||||
iterations = _get_iterations()
|
||||
start = time.perf_counter()
|
||||
for _ in range(iterations):
|
||||
pipeline.execute()
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
fps = iterations / elapsed
|
||||
min_fps = _get_min_fps_threshold(1000)
|
||||
|
||||
assert fps >= min_fps, (
|
||||
f"Pipeline execution FPS {fps:.0f} below minimum {min_fps}"
|
||||
)
|
||||
111
tests/test_border_effect.py
Normal file
111
tests/test_border_effect.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""
|
||||
Tests for BorderEffect.
|
||||
"""
|
||||
|
||||
from engine.effects.plugins.border import BorderEffect
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
|
||||
def make_ctx(terminal_width: int = 80, terminal_height: int = 24) -> EffectContext:
|
||||
"""Create a mock EffectContext."""
|
||||
return EffectContext(
|
||||
terminal_width=terminal_width,
|
||||
terminal_height=terminal_height,
|
||||
scroll_cam=0,
|
||||
ticker_height=terminal_height,
|
||||
)
|
||||
|
||||
|
||||
class TestBorderEffect:
|
||||
"""Tests for BorderEffect."""
|
||||
|
||||
def test_basic_init(self):
|
||||
"""BorderEffect initializes with defaults."""
|
||||
effect = BorderEffect()
|
||||
assert effect.name == "border"
|
||||
assert effect.config.enabled is True
|
||||
|
||||
def test_adds_border(self):
|
||||
"""BorderEffect adds border around content."""
|
||||
effect = BorderEffect()
|
||||
buf = [
|
||||
"Hello World",
|
||||
"Test Content",
|
||||
"Third Line",
|
||||
]
|
||||
ctx = make_ctx(terminal_width=20, terminal_height=10)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Should have top and bottom borders
|
||||
assert len(result) >= 3
|
||||
# First line should start with border character
|
||||
assert result[0][0] in "┌┎┍"
|
||||
# Last line should end with border character
|
||||
assert result[-1][-1] in "┘┖┚"
|
||||
|
||||
def test_border_with_small_buffer(self):
|
||||
"""BorderEffect handles small buffer (too small for border)."""
|
||||
effect = BorderEffect()
|
||||
buf = ["ab"] # Too small for proper border
|
||||
ctx = make_ctx(terminal_width=10, terminal_height=5)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Should still try to add border but result may differ
|
||||
# At minimum should have output
|
||||
assert len(result) >= 1
|
||||
|
||||
def test_metrics_in_border(self):
|
||||
"""BorderEffect includes FPS and frame time in border."""
|
||||
effect = BorderEffect()
|
||||
buf = ["x" * 10] * 5
|
||||
ctx = make_ctx(terminal_width=20, terminal_height=10)
|
||||
|
||||
# Add metrics to context
|
||||
ctx.set_state(
|
||||
"metrics",
|
||||
{
|
||||
"avg_ms": 16.5,
|
||||
"frame_count": 100,
|
||||
"fps": 60.0,
|
||||
},
|
||||
)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Check for FPS in top border
|
||||
top_line = result[0]
|
||||
assert "FPS" in top_line or "60" in top_line
|
||||
|
||||
# Check for frame time in bottom border
|
||||
bottom_line = result[-1]
|
||||
assert "ms" in bottom_line or "16" in bottom_line
|
||||
|
||||
def test_no_metrics(self):
|
||||
"""BorderEffect works without metrics."""
|
||||
effect = BorderEffect()
|
||||
buf = ["content"] * 5
|
||||
ctx = make_ctx(terminal_width=20, terminal_height=10)
|
||||
# No metrics set
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Should still have border characters
|
||||
assert len(result) >= 3
|
||||
assert result[0][0] in "┌┎┍"
|
||||
|
||||
def test_crops_before_bordering(self):
|
||||
"""BorderEffect crops input before adding border."""
|
||||
effect = BorderEffect()
|
||||
buf = ["x" * 100] * 50 # Very large buffer
|
||||
ctx = make_ctx(terminal_width=20, terminal_height=10)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Should be cropped to fit, then bordered
|
||||
# Result should be <= terminal_height with border
|
||||
assert len(result) <= ctx.terminal_height
|
||||
# Each line should be <= terminal_width
|
||||
for line in result:
|
||||
assert len(line) <= ctx.terminal_width
|
||||
68
tests/test_camera.py
Normal file
68
tests/test_camera.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from engine.camera import Camera, CameraMode
|
||||
|
||||
|
||||
def test_camera_vertical_default():
|
||||
"""Test default vertical camera."""
|
||||
cam = Camera()
|
||||
assert cam.mode == CameraMode.FEED
|
||||
assert cam.x == 0
|
||||
assert cam.y == 0
|
||||
|
||||
|
||||
def test_camera_vertical_factory():
|
||||
"""Test vertical factory method."""
|
||||
cam = Camera.feed(speed=2.0)
|
||||
assert cam.mode == CameraMode.FEED
|
||||
assert cam.speed == 2.0
|
||||
|
||||
|
||||
def test_camera_horizontal():
|
||||
"""Test horizontal camera."""
|
||||
cam = Camera.horizontal(speed=1.5)
|
||||
assert cam.mode == CameraMode.HORIZONTAL
|
||||
cam.update(1.0)
|
||||
assert cam.x > 0
|
||||
|
||||
|
||||
def test_camera_omni():
|
||||
"""Test omnidirectional camera."""
|
||||
cam = Camera.omni(speed=1.0)
|
||||
assert cam.mode == CameraMode.OMNI
|
||||
cam.update(1.0)
|
||||
assert cam.x > 0
|
||||
assert cam.y > 0
|
||||
|
||||
|
||||
def test_camera_floating():
|
||||
"""Test floating camera with sinusoidal motion."""
|
||||
cam = Camera.floating(speed=1.0)
|
||||
assert cam.mode == CameraMode.FLOATING
|
||||
y_before = cam.y
|
||||
cam.update(0.5)
|
||||
y_after = cam.y
|
||||
assert y_before != y_after
|
||||
|
||||
|
||||
def test_camera_reset():
|
||||
"""Test camera reset."""
|
||||
cam = Camera.vertical()
|
||||
cam.update(1.0)
|
||||
assert cam.y > 0
|
||||
cam.reset()
|
||||
assert cam.x == 0
|
||||
assert cam.y == 0
|
||||
|
||||
|
||||
def test_camera_custom_update():
|
||||
"""Test custom update function."""
|
||||
call_count = 0
|
||||
|
||||
def custom_update(camera, dt):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
camera.x += int(10 * dt)
|
||||
|
||||
cam = Camera.custom(custom_update)
|
||||
cam.update(1.0)
|
||||
assert call_count == 1
|
||||
assert cam.x == 10
|
||||
826
tests/test_camera_acceptance.py
Normal file
826
tests/test_camera_acceptance.py
Normal file
@@ -0,0 +1,826 @@
|
||||
"""
|
||||
Camera acceptance tests using NullDisplay frame recording and ReplayDisplay.
|
||||
|
||||
Tests all camera modes by:
|
||||
1. Creating deterministic source data (numbered lines)
|
||||
2. Running pipeline with small viewport (40x15)
|
||||
3. Recording frames with NullDisplay
|
||||
4. Asserting expected viewport content for each mode
|
||||
|
||||
Usage:
|
||||
pytest tests/test_camera_acceptance.py -v
|
||||
pytest tests/test_camera_acceptance.py --show-frames -v
|
||||
|
||||
The --show-frames flag displays recorded frames for visual verification.
|
||||
"""
|
||||
|
||||
import math
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from engine.camera import Camera, CameraMode
|
||||
from engine.display import DisplayRegistry
|
||||
from engine.effects import get_registry
|
||||
from engine.pipeline import Pipeline, PipelineConfig, PipelineContext
|
||||
from engine.pipeline.adapters import (
|
||||
CameraClockStage,
|
||||
CameraStage,
|
||||
FontStage,
|
||||
ViewportFilterStage,
|
||||
create_stage_from_display,
|
||||
create_stage_from_effect,
|
||||
)
|
||||
from engine.pipeline.params import PipelineParams
|
||||
|
||||
|
||||
def get_camera_position(pipeline, camera):
|
||||
"""Helper to get camera position directly from the camera object.
|
||||
|
||||
The pipeline context's camera_y/camera_x values may be transformed by
|
||||
ViewportFilterStage (filtered relative position). This helper gets the
|
||||
true camera position from the camera object itself.
|
||||
|
||||
Args:
|
||||
pipeline: The pipeline instance
|
||||
camera: The camera object
|
||||
|
||||
Returns:
|
||||
tuple (x, y) of the camera's absolute position
|
||||
"""
|
||||
return (camera.x, camera.y)
|
||||
|
||||
|
||||
# Register custom CLI option for showing frames
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--show-frames",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Display recorded frames for visual verification",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def show_frames(request):
|
||||
"""Get the --show-frames flag value."""
|
||||
try:
|
||||
return request.config.getoption("--show-frames")
|
||||
except ValueError:
|
||||
# Option not registered, default to False
|
||||
return False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def viewport_dims():
|
||||
"""Small viewport dimensions for testing."""
|
||||
return (40, 15)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def items():
|
||||
"""Create deterministic test data - numbered lines for easy verification."""
|
||||
# Create 100 numbered lines: LINE 000, LINE 001, etc.
|
||||
return [{"text": f"LINE {i:03d} - This is line number {i}"} for i in range(100)]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def null_display(viewport_dims):
|
||||
"""Create a NullDisplay for testing."""
|
||||
display = DisplayRegistry.create("null")
|
||||
display.init(viewport_dims[0], viewport_dims[1])
|
||||
return display
|
||||
|
||||
|
||||
def create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims, effects=None
|
||||
):
|
||||
"""Helper to create a pipeline with a specific camera."""
|
||||
effects = effects or []
|
||||
width, height = viewport_dims
|
||||
|
||||
params = PipelineParams()
|
||||
params.viewport_width = width
|
||||
params.viewport_height = height
|
||||
|
||||
config = PipelineConfig(
|
||||
source="fixture",
|
||||
display="null",
|
||||
camera="scroll",
|
||||
effects=effects,
|
||||
)
|
||||
|
||||
pipeline = Pipeline(config=config, context=PipelineContext())
|
||||
|
||||
from engine.data_sources.sources import ListDataSource
|
||||
from engine.pipeline.adapters import DataSourceStage
|
||||
|
||||
list_source = ListDataSource(items, name="fixture")
|
||||
pipeline.add_stage("source", DataSourceStage(list_source, name="fixture"))
|
||||
|
||||
# Add camera update stage to ensure camera_y is available for viewport filter
|
||||
pipeline.add_stage("camera_update", CameraClockStage(camera, name="camera-clock"))
|
||||
|
||||
# Note: camera should come after font/viewport_filter, before effects
|
||||
pipeline.add_stage("viewport_filter", ViewportFilterStage(name="viewport-filter"))
|
||||
pipeline.add_stage("font", FontStage(name="font"))
|
||||
pipeline.add_stage(
|
||||
"camera",
|
||||
CameraStage(
|
||||
camera, name="radial" if camera.mode == CameraMode.RADIAL else "vertical"
|
||||
),
|
||||
)
|
||||
|
||||
if effects:
|
||||
effect_registry = get_registry()
|
||||
for effect_name in effects:
|
||||
effect = effect_registry.get(effect_name)
|
||||
if effect:
|
||||
pipeline.add_stage(
|
||||
f"effect_{effect_name}",
|
||||
create_stage_from_effect(effect, effect_name),
|
||||
)
|
||||
|
||||
pipeline.add_stage("display", create_stage_from_display(null_display, "null"))
|
||||
pipeline.build()
|
||||
|
||||
if not pipeline.initialize():
|
||||
return None
|
||||
|
||||
ctx = pipeline.context
|
||||
ctx.params = params
|
||||
ctx.set("display", null_display)
|
||||
ctx.set("items", items)
|
||||
ctx.set("pipeline", pipeline)
|
||||
ctx.set("pipeline_order", pipeline.execution_order)
|
||||
|
||||
return pipeline
|
||||
|
||||
|
||||
class DisplayHelper:
|
||||
"""Helper to display frames for visual verification."""
|
||||
|
||||
@staticmethod
|
||||
def show_frame(buffer, title, viewport_dims, marker_line=None):
|
||||
"""Display a single frame with visual markers."""
|
||||
width, height = viewport_dims
|
||||
print(f"\n{'=' * (width + 20)}")
|
||||
print(f" {title}")
|
||||
print(f"{'=' * (width + 20)}")
|
||||
|
||||
for i, line in enumerate(buffer[:height]):
|
||||
# Add marker if this line should be highlighted
|
||||
marker = ">>>" if marker_line == i else " "
|
||||
print(f"{marker} [{i:2}] {line[:width]}")
|
||||
|
||||
print(f"{'=' * (width + 20)}\n")
|
||||
|
||||
|
||||
class TestFeedCamera:
|
||||
"""Test FEED mode: rapid single-item scrolling (1 row/frame at speed=1.0)."""
|
||||
|
||||
def test_feed_camera_scrolls_down(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""FEED camera should move content down (y increases) at 1 row/frame."""
|
||||
camera = Camera.feed(speed=1.0)
|
||||
camera.set_canvas_size(200, 100)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
# Run for 10 frames with small delay between frames
|
||||
# to ensure camera has time to move (dt calculation relies on time.perf_counter())
|
||||
import time
|
||||
|
||||
for frame in range(10):
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
if frame < 9: # No need to sleep after last frame
|
||||
time.sleep(0.02) # Wait 20ms so dt~0.02, camera moves ~1.2 rows
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(frames[0], "FEED Camera - Frame 0", viewport_dims)
|
||||
DisplayHelper.show_frame(frames[5], "FEED Camera - Frame 5", viewport_dims)
|
||||
DisplayHelper.show_frame(frames[9], "FEED Camera - Frame 9", viewport_dims)
|
||||
|
||||
# FEED mode: each frame y increases by speed*dt*60
|
||||
# At dt=1.0, speed=1.0: y increases by 60 per frame
|
||||
# But clamp to canvas bounds (200)
|
||||
# Frame 0: y=0, should show LINE 000
|
||||
# Frame 1: y=60, should show LINE 060
|
||||
|
||||
# Verify frame 0 contains ASCII art content (rendered from LINE 000)
|
||||
# The text is converted to block characters, so check for non-empty frames
|
||||
assert len(frames[0]) > 0, "Frame 0 should not be empty"
|
||||
assert frames[0][0].strip() != "", "Frame 0 should have visible content"
|
||||
|
||||
# Verify camera position changed between frames
|
||||
# Feed mode moves 1 row per frame at speed=1.0 with dt~0.02
|
||||
# After 5 frames, camera should have moved down
|
||||
assert camera.y > 0, f"Camera should have moved down, y={camera.y}"
|
||||
|
||||
# Verify different frames show different content (camera is scrolling)
|
||||
# Check that frame 0 and frame 5 are different
|
||||
frame_0_str = "\n".join(frames[0])
|
||||
frame_5_str = "\n".join(frames[5])
|
||||
assert frame_0_str != frame_5_str, (
|
||||
"Frame 0 and Frame 5 should show different content"
|
||||
)
|
||||
|
||||
|
||||
class TestScrollCamera:
|
||||
"""Test SCROLL mode: smooth vertical scrolling with float accumulation."""
|
||||
|
||||
def test_scroll_camera_smooth_movement(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""SCROLL camera should move content smoothly with sub-integer precision."""
|
||||
camera = Camera.scroll(speed=0.5)
|
||||
camera.set_canvas_size(0, 200) # Match viewport width for text wrapping
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
# Run for 20 frames
|
||||
for frame in range(20):
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "SCROLL Camera - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[10], "SCROLL Camera - Frame 10", viewport_dims
|
||||
)
|
||||
|
||||
# SCROLL mode uses float accumulation for smooth scrolling
|
||||
# At speed=0.5, dt=1.0: y increases by 0.5 * 60 = 30 pixels per frame
|
||||
# Verify camera_y is increasing (which causes the scroll)
|
||||
camera_y_values = []
|
||||
for frame in range(5):
|
||||
# Get camera.y directly (not filtered context value)
|
||||
pipeline.context.set("frame_number", frame)
|
||||
pipeline.execute(items)
|
||||
camera_y_values.append(camera.y)
|
||||
|
||||
print(f"\nSCROLL test - camera_y positions: {camera_y_values}")
|
||||
|
||||
# Verify camera_y is non-zero (camera is moving)
|
||||
assert camera_y_values[-1] > 0, (
|
||||
"Camera should have scrolled down (camera_y > 0)"
|
||||
)
|
||||
|
||||
# Verify camera_y is increasing
|
||||
for i in range(len(camera_y_values) - 1):
|
||||
assert camera_y_values[i + 1] >= camera_y_values[i], (
|
||||
f"Camera_y should be non-decreasing: {camera_y_values}"
|
||||
)
|
||||
|
||||
|
||||
class TestHorizontalCamera:
|
||||
"""Test HORIZONTAL mode: left/right scrolling."""
|
||||
|
||||
def test_horizontal_camera_scrolls_right(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""HORIZONTAL camera should move content right (x increases)."""
|
||||
camera = Camera.horizontal(speed=1.0)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
for frame in range(10):
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "HORIZONTAL Camera - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[5], "HORIZONTAL Camera - Frame 5", viewport_dims
|
||||
)
|
||||
|
||||
# HORIZONTAL mode: x increases by speed*dt*60
|
||||
# At dt=1.0, speed=1.0: x increases by 60 per frame
|
||||
# Frame 0: x=0
|
||||
# Frame 5: x=300 (clamped to canvas_width-viewport_width)
|
||||
|
||||
# Verify frame 0 contains content (ASCII art of LINE 000)
|
||||
assert len(frames[0]) > 0, "Frame 0 should not be empty"
|
||||
assert frames[0][0].strip() != "", "Frame 0 should have visible content"
|
||||
|
||||
# Verify camera x is increasing
|
||||
print("\nHORIZONTAL test - camera positions:")
|
||||
for i in range(10):
|
||||
print(f" Frame {i}: x={camera.x}, y={camera.y}")
|
||||
camera.update(1.0)
|
||||
|
||||
# Verify camera moved
|
||||
assert camera.x > 0, f"Camera should have moved right, x={camera.x}"
|
||||
|
||||
|
||||
class TestOmniCamera:
|
||||
"""Test OMNI mode: diagonal scrolling (x and y increase together)."""
|
||||
|
||||
def test_omni_camera_diagonal_movement(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""OMNI camera should move content diagonally (both x and y increase)."""
|
||||
camera = Camera.omni(speed=1.0)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
for frame in range(10):
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(frames[0], "OMNI Camera - Frame 0", viewport_dims)
|
||||
DisplayHelper.show_frame(frames[5], "OMNI Camera - Frame 5", viewport_dims)
|
||||
|
||||
# OMNI mode: y increases by speed*dt*60, x increases by speed*dt*60*0.5
|
||||
# At dt=1.0, speed=1.0: y += 60, x += 30
|
||||
|
||||
# Verify frame 0 contains content (ASCII art)
|
||||
assert len(frames[0]) > 0, "Frame 0 should not be empty"
|
||||
assert frames[0][0].strip() != "", "Frame 0 should have visible content"
|
||||
|
||||
print("\nOMNI test - camera positions:")
|
||||
camera.reset()
|
||||
for frame in range(5):
|
||||
print(f" Frame {frame}: x={camera.x}, y={camera.y}")
|
||||
camera.update(1.0)
|
||||
|
||||
# Verify camera moved
|
||||
assert camera.y > 0, f"Camera should have moved down, y={camera.y}"
|
||||
|
||||
|
||||
class TestFloatingCamera:
|
||||
"""Test FLOATING mode: sinusoidal bobbing motion."""
|
||||
|
||||
def test_floating_camera_bobbing(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""FLOATING camera should move content in a sinusoidal pattern."""
|
||||
camera = Camera.floating(speed=1.0)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
for frame in range(32):
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "FLOATING Camera - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[8], "FLOATING Camera - Frame 8 (quarter cycle)", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[16], "FLOATING Camera - Frame 16 (half cycle)", viewport_dims
|
||||
)
|
||||
|
||||
# FLOATING mode: y = sin(time*2) * speed * 30
|
||||
# Period: 2π / 2 = π ≈ 3.14 seconds (or ~3.14 frames at dt=1.0)
|
||||
# Full cycle ~32 frames
|
||||
|
||||
print("\nFLOATING test - sinusoidal motion:")
|
||||
camera.reset()
|
||||
for frame in range(16):
|
||||
print(f" Frame {frame}: y={camera.y}, x={camera.x}")
|
||||
camera.update(1.0)
|
||||
|
||||
# Verify y oscillates around 0
|
||||
camera.reset()
|
||||
camera.update(1.0) # Frame 1
|
||||
y1 = camera.y
|
||||
camera.update(1.0) # Frame 2
|
||||
y2 = camera.y
|
||||
camera.update(1.0) # Frame 3
|
||||
y3 = camera.y
|
||||
|
||||
# After a few frames, y should oscillate (not monotonic)
|
||||
assert y1 != y2 or y2 != y3, "FLOATING camera should oscillate"
|
||||
|
||||
|
||||
class TestBounceCamera:
|
||||
"""Test BOUNCE mode: bouncing DVD-style motion."""
|
||||
|
||||
def test_bounce_camera_reverses_at_edges(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""BOUNCE camera should reverse direction when hitting canvas edges."""
|
||||
camera = Camera.bounce(speed=5.0) # Faster for quicker test
|
||||
# Set zoom > 1.0 so viewport is smaller than canvas, allowing movement
|
||||
camera.set_zoom(2.0) # Zoom out 2x, viewport is half the canvas size
|
||||
camera.set_canvas_size(400, 400)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
for frame in range(50):
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "BOUNCE Camera - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[25], "BOUNCE Camera - Frame 25", viewport_dims
|
||||
)
|
||||
|
||||
# BOUNCE mode: moves until it hits edge, then reverses
|
||||
# Verify the camera moves and changes direction
|
||||
|
||||
print("\nBOUNCE test - bouncing motion:")
|
||||
camera.reset()
|
||||
camera.set_zoom(2.0) # Reset also resets zoom, so set it again
|
||||
for frame in range(20):
|
||||
print(f" Frame {frame}: x={camera.x}, y={camera.y}")
|
||||
camera.update(1.0)
|
||||
|
||||
# Check that camera hits bounds and reverses
|
||||
camera.reset()
|
||||
camera.set_zoom(2.0) # Reset also resets zoom, so set it again
|
||||
for _ in range(51): # Odd number ensures ending at opposite corner
|
||||
camera.update(1.0)
|
||||
|
||||
# Camera should have hit an edge and reversed direction
|
||||
# With 400x400 canvas, viewport 200x200 (zoom=2), max_x = 200, max_y = 200
|
||||
# Starting at (0,0), after 51 updates it should be at (200, 200)
|
||||
max_x = max(0, camera.canvas_width - camera.viewport_width)
|
||||
print(f"BOUNCE camera final position: x={camera.x}, y={camera.y}")
|
||||
assert camera.x == max_x, (
|
||||
f"Camera should be at max_x ({max_x}), got x={camera.x}"
|
||||
)
|
||||
|
||||
# Check bounds are respected
|
||||
vw = camera.viewport_width
|
||||
vh = camera.viewport_height
|
||||
assert camera.x >= 0 and camera.x <= camera.canvas_width - vw
|
||||
assert camera.y >= 0 and camera.y <= camera.canvas_height - vh
|
||||
|
||||
|
||||
class TestRadialCamera:
|
||||
"""Test RADIAL mode: polar coordinate scanning (rotation around center)."""
|
||||
|
||||
def test_radial_camera_rotates_around_center(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""RADIAL camera should rotate around the center of the canvas."""
|
||||
camera = Camera.radial(speed=0.5)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
for frame in range(32): # 32 frames = 2π at ~0.2 rad/frame
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "RADIAL Camera - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[8], "RADIAL Camera - Frame 8 (quarter turn)", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[16], "RADIAL Camera - Frame 16 (half turn)", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[24], "RADIAL Camera - Frame 24 (3/4 turn)", viewport_dims
|
||||
)
|
||||
|
||||
# RADIAL mode: rotates around center with smooth angular motion
|
||||
# At speed=0.5: theta increases by ~0.2 rad/frame (0.5 * dt * 1.0)
|
||||
|
||||
print("\nRADIAL test - rotational motion:")
|
||||
camera.reset()
|
||||
for frame in range(32):
|
||||
theta_deg = (camera._theta_float * 180 / math.pi) % 360
|
||||
print(
|
||||
f" Frame {frame}: theta={theta_deg:.1f}°, x={camera.x}, y={camera.y}"
|
||||
)
|
||||
camera.update(1.0)
|
||||
|
||||
# Verify rotation occurs (angle should change)
|
||||
camera.reset()
|
||||
theta_start = camera._theta_float
|
||||
camera.update(1.0) # Frame 1
|
||||
theta_mid = camera._theta_float
|
||||
camera.update(1.0) # Frame 2
|
||||
theta_end = camera._theta_float
|
||||
|
||||
assert theta_mid > theta_start, "Theta should increase (rotation)"
|
||||
assert theta_end > theta_mid, "Theta should continue increasing"
|
||||
|
||||
def test_radial_camera_with_sensor_integration(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""RADIAL camera can be driven by external sensor (OSC integration test)."""
|
||||
from engine.sensors.oscillator import (
|
||||
OscillatorSensor,
|
||||
register_oscillator_sensor,
|
||||
)
|
||||
|
||||
# Create an oscillator sensor for testing
|
||||
register_oscillator_sensor(name="test_osc", waveform="sine", frequency=0.5)
|
||||
osc = OscillatorSensor(name="test_osc", waveform="sine", frequency=0.5)
|
||||
|
||||
camera = Camera.radial(speed=0.3)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
# Run frames while modulating camera with oscillator
|
||||
for frame in range(32):
|
||||
# Read oscillator value and set as radial input
|
||||
osc_value = osc.read()
|
||||
if osc_value:
|
||||
camera.set_radial_input(osc_value.value)
|
||||
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "RADIAL+OSC Camera - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[8], "RADIAL+OSC Camera - Frame 8", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[16], "RADIAL+OSC Camera - Frame 16", viewport_dims
|
||||
)
|
||||
|
||||
print("\nRADIAL+OSC test - sensor-driven rotation:")
|
||||
osc.start()
|
||||
camera.reset()
|
||||
for frame in range(16):
|
||||
osc_value = osc.read()
|
||||
if osc_value:
|
||||
camera.set_radial_input(osc_value.value)
|
||||
camera.update(1.0)
|
||||
theta_deg = (camera._theta_float * 180 / math.pi) % 360
|
||||
print(
|
||||
f" Frame {frame}: osc={osc_value.value if osc_value else 0:.3f}, theta={theta_deg:.1f}°"
|
||||
)
|
||||
|
||||
# Verify camera position changes when driven by sensor
|
||||
camera.reset()
|
||||
x_start = camera.x
|
||||
camera.update(1.0)
|
||||
x_mid = camera.x
|
||||
assert x_start != x_mid, "Camera should move when driven by oscillator"
|
||||
|
||||
osc.stop()
|
||||
|
||||
def test_radial_camera_with_direct_angle_setting(
|
||||
self, items, null_display, viewport_dims, show_frames
|
||||
):
|
||||
"""RADIAL camera can have angle set directly for OSC integration."""
|
||||
camera = Camera.radial(speed=0.0) # No auto-rotation
|
||||
camera.set_canvas_size(200, 200)
|
||||
camera._r_float = 80.0 # Set initial radius to see movement
|
||||
|
||||
pipeline = create_pipeline_with_camera(
|
||||
camera, items, null_display, viewport_dims
|
||||
)
|
||||
assert pipeline is not None, "Pipeline creation failed"
|
||||
|
||||
null_display.start_recording()
|
||||
|
||||
# Set angle directly to sweep through full rotation
|
||||
for frame in range(32):
|
||||
angle = (frame / 32) * 2 * math.pi # 0 to 2π over 32 frames
|
||||
camera.set_radial_angle(angle)
|
||||
camera.update(1.0) # Must update to convert polar to Cartesian
|
||||
|
||||
pipeline.context.set("frame_number", frame)
|
||||
result = pipeline.execute(items)
|
||||
assert result.success, f"Frame {frame} execution failed"
|
||||
|
||||
null_display.stop_recording()
|
||||
frames = null_display.get_frames()
|
||||
|
||||
if show_frames:
|
||||
DisplayHelper.show_frame(
|
||||
frames[0], "RADIAL Direct Angle - Frame 0", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[8], "RADIAL Direct Angle - Frame 8", viewport_dims
|
||||
)
|
||||
DisplayHelper.show_frame(
|
||||
frames[16], "RADIAL Direct Angle - Frame 16", viewport_dims
|
||||
)
|
||||
|
||||
print("\nRADIAL Direct Angle test - sweeping rotation:")
|
||||
for frame in range(32):
|
||||
angle = (frame / 32) * 2 * math.pi
|
||||
camera.set_radial_angle(angle)
|
||||
camera.update(1.0) # Update converts angle to x,y position
|
||||
theta_deg = angle * 180 / math.pi
|
||||
print(
|
||||
f" Frame {frame}: set_angle={theta_deg:.1f}°, actual_x={camera.x}, actual_y={camera.y}"
|
||||
)
|
||||
|
||||
# Verify camera position changes as angle sweeps
|
||||
camera.reset()
|
||||
camera._r_float = 80.0 # Set radius for testing
|
||||
camera.set_radial_angle(0)
|
||||
camera.update(1.0)
|
||||
x0 = camera.x
|
||||
camera.set_radial_angle(math.pi / 2)
|
||||
camera.update(1.0)
|
||||
x90 = camera.x
|
||||
assert x0 != x90, (
|
||||
f"Camera position should change with angle (x0={x0}, x90={x90})"
|
||||
)
|
||||
|
||||
|
||||
class TestCameraModeEnum:
|
||||
"""Test CameraMode enum integrity."""
|
||||
|
||||
def test_all_modes_exist(self):
|
||||
"""Verify all camera modes are defined."""
|
||||
modes = [m.name for m in CameraMode]
|
||||
expected = [
|
||||
"FEED",
|
||||
"SCROLL",
|
||||
"HORIZONTAL",
|
||||
"OMNI",
|
||||
"FLOATING",
|
||||
"BOUNCE",
|
||||
"RADIAL",
|
||||
]
|
||||
|
||||
for mode in expected:
|
||||
assert mode in modes, f"CameraMode.{mode} should exist"
|
||||
|
||||
def test_radial_mode_exists(self):
|
||||
"""Verify RADIAL mode is properly defined."""
|
||||
assert CameraMode.RADIAL is not None
|
||||
assert isinstance(CameraMode.RADIAL, CameraMode)
|
||||
assert CameraMode.RADIAL.name == "RADIAL"
|
||||
|
||||
|
||||
class TestCameraFactoryMethods:
|
||||
"""Test camera factory methods create proper camera instances."""
|
||||
|
||||
def test_radial_factory(self):
|
||||
"""RADIAL factory should create a camera with correct mode."""
|
||||
camera = Camera.radial(speed=2.0)
|
||||
assert camera.mode == CameraMode.RADIAL
|
||||
assert camera.speed == 2.0
|
||||
assert hasattr(camera, "_r_float")
|
||||
assert hasattr(camera, "_theta_float")
|
||||
|
||||
def test_radial_factory_initializes_state(self):
|
||||
"""RADIAL factory should initialize radial state."""
|
||||
camera = Camera.radial()
|
||||
assert camera._r_float == 0.0
|
||||
assert camera._theta_float == 0.0
|
||||
|
||||
|
||||
class TestCameraStateSaveRestore:
|
||||
"""Test camera state can be saved and restored (for hot-rebuild)."""
|
||||
|
||||
def test_radial_camera_state_save(self):
|
||||
"""RADIAL camera should save polar coordinate state."""
|
||||
camera = Camera.radial()
|
||||
camera._theta_float = math.pi / 4
|
||||
camera._r_float = 50.0
|
||||
|
||||
# Save state via CameraStage adapter
|
||||
from engine.pipeline.adapters.camera import CameraStage
|
||||
|
||||
stage = CameraStage(camera)
|
||||
|
||||
state = stage.save_state()
|
||||
assert "_theta_float" in state
|
||||
assert "_r_float" in state
|
||||
assert state["_theta_float"] == math.pi / 4
|
||||
assert state["_r_float"] == 50.0
|
||||
|
||||
def test_radial_camera_state_restore(self):
|
||||
"""RADIAL camera should restore polar coordinate state."""
|
||||
camera1 = Camera.radial()
|
||||
camera1._theta_float = math.pi / 3
|
||||
camera1._r_float = 75.0
|
||||
|
||||
from engine.pipeline.adapters.camera import CameraStage
|
||||
|
||||
stage1 = CameraStage(camera1)
|
||||
state = stage1.save_state()
|
||||
|
||||
# Create new camera and restore
|
||||
camera2 = Camera.radial()
|
||||
stage2 = CameraStage(camera2)
|
||||
stage2.restore_state(state)
|
||||
|
||||
assert abs(camera2._theta_float - math.pi / 3) < 0.001
|
||||
assert abs(camera2._r_float - 75.0) < 0.001
|
||||
|
||||
|
||||
class TestCameraViewportApplication:
|
||||
"""Test camera.apply() properly slices buffers."""
|
||||
|
||||
def test_radial_camera_viewport_slicing(self):
|
||||
"""RADIAL camera should properly slice buffer based on position."""
|
||||
camera = Camera.radial(speed=0.5)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
# Update to move camera
|
||||
camera.update(1.0)
|
||||
|
||||
# Create test buffer with 200 lines
|
||||
buffer = [f"LINE {i:03d}" for i in range(200)]
|
||||
|
||||
# Apply camera viewport (15 lines high)
|
||||
result = camera.apply(buffer, viewport_width=40, viewport_height=15)
|
||||
|
||||
# Result should be exactly 15 lines
|
||||
assert len(result) == 15
|
||||
|
||||
# Each line should be 40 characters (padded or truncated)
|
||||
for line in result:
|
||||
assert len(line) <= 40
|
||||
@@ -1,117 +0,0 @@
|
||||
"""
|
||||
Tests for engine.controller module.
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from engine import config
|
||||
from engine.controller import StreamController
|
||||
|
||||
|
||||
class TestStreamController:
|
||||
"""Tests for StreamController class."""
|
||||
|
||||
def test_init_default_config(self):
|
||||
"""StreamController initializes with default config."""
|
||||
controller = StreamController()
|
||||
assert controller.config is not None
|
||||
assert isinstance(controller.config, config.Config)
|
||||
|
||||
def test_init_custom_config(self):
|
||||
"""StreamController accepts custom config."""
|
||||
custom_config = config.Config(headline_limit=500)
|
||||
controller = StreamController(config=custom_config)
|
||||
assert controller.config.headline_limit == 500
|
||||
|
||||
def test_init_sources_none_by_default(self):
|
||||
"""Sources are None until initialized."""
|
||||
controller = StreamController()
|
||||
assert controller.mic is None
|
||||
assert controller.ntfy is None
|
||||
|
||||
@patch("engine.controller.MicMonitor")
|
||||
@patch("engine.controller.NtfyPoller")
|
||||
def test_initialize_sources(self, mock_ntfy, mock_mic):
|
||||
"""initialize_sources creates mic and ntfy instances."""
|
||||
mock_mic_instance = MagicMock()
|
||||
mock_mic_instance.available = True
|
||||
mock_mic_instance.start.return_value = True
|
||||
mock_mic.return_value = mock_mic_instance
|
||||
|
||||
mock_ntfy_instance = MagicMock()
|
||||
mock_ntfy_instance.start.return_value = True
|
||||
mock_ntfy.return_value = mock_ntfy_instance
|
||||
|
||||
controller = StreamController()
|
||||
mic_ok, ntfy_ok = controller.initialize_sources()
|
||||
|
||||
assert mic_ok is True
|
||||
assert ntfy_ok is True
|
||||
assert controller.mic is not None
|
||||
assert controller.ntfy is not None
|
||||
|
||||
@patch("engine.controller.MicMonitor")
|
||||
@patch("engine.controller.NtfyPoller")
|
||||
def test_initialize_sources_mic_unavailable(self, mock_ntfy, mock_mic):
|
||||
"""initialize_sources handles unavailable mic."""
|
||||
mock_mic_instance = MagicMock()
|
||||
mock_mic_instance.available = False
|
||||
mock_mic.return_value = mock_mic_instance
|
||||
|
||||
mock_ntfy_instance = MagicMock()
|
||||
mock_ntfy_instance.start.return_value = True
|
||||
mock_ntfy.return_value = mock_ntfy_instance
|
||||
|
||||
controller = StreamController()
|
||||
mic_ok, ntfy_ok = controller.initialize_sources()
|
||||
|
||||
assert mic_ok is False
|
||||
assert ntfy_ok is True
|
||||
|
||||
|
||||
class TestStreamControllerCleanup:
|
||||
"""Tests for StreamController cleanup."""
|
||||
|
||||
@patch("engine.controller.MicMonitor")
|
||||
def test_cleanup_stops_mic(self, mock_mic):
|
||||
"""cleanup stops the microphone if running."""
|
||||
mock_mic_instance = MagicMock()
|
||||
mock_mic.return_value = mock_mic_instance
|
||||
|
||||
controller = StreamController()
|
||||
controller.mic = mock_mic_instance
|
||||
controller.cleanup()
|
||||
|
||||
mock_mic_instance.stop.assert_called_once()
|
||||
|
||||
|
||||
class TestStreamControllerWarmup:
|
||||
"""Tests for StreamController topic warmup."""
|
||||
|
||||
def test_warmup_topics_idempotent(self):
|
||||
"""warmup_topics can be called multiple times."""
|
||||
StreamController._topics_warmed = False
|
||||
|
||||
with patch("urllib.request.urlopen") as mock_urlopen:
|
||||
StreamController.warmup_topics()
|
||||
StreamController.warmup_topics()
|
||||
|
||||
assert mock_urlopen.call_count >= 3
|
||||
|
||||
def test_warmup_topics_sets_flag(self):
|
||||
"""warmup_topics sets the warmed flag."""
|
||||
StreamController._topics_warmed = False
|
||||
|
||||
with patch("urllib.request.urlopen"):
|
||||
StreamController.warmup_topics()
|
||||
|
||||
assert StreamController._topics_warmed is True
|
||||
|
||||
def test_warmup_topics_skips_after_first(self):
|
||||
"""warmup_topics skips after first call."""
|
||||
StreamController._topics_warmed = True
|
||||
|
||||
with patch("urllib.request.urlopen") as mock_urlopen:
|
||||
StreamController.warmup_topics()
|
||||
|
||||
mock_urlopen.assert_not_called()
|
||||
99
tests/test_crop_effect.py
Normal file
99
tests/test_crop_effect.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""
|
||||
Tests for CropEffect.
|
||||
"""
|
||||
|
||||
from engine.effects.plugins.crop import CropEffect
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
|
||||
def make_ctx(terminal_width: int = 80, terminal_height: int = 24) -> EffectContext:
|
||||
"""Create a mock EffectContext."""
|
||||
return EffectContext(
|
||||
terminal_width=terminal_width,
|
||||
terminal_height=terminal_height,
|
||||
scroll_cam=0,
|
||||
ticker_height=terminal_height,
|
||||
)
|
||||
|
||||
|
||||
class TestCropEffect:
|
||||
"""Tests for CropEffect."""
|
||||
|
||||
def test_basic_init(self):
|
||||
"""CropEffect initializes with defaults."""
|
||||
effect = CropEffect()
|
||||
assert effect.name == "crop"
|
||||
assert effect.config.enabled is True
|
||||
|
||||
def test_crop_wider_buffer(self):
|
||||
"""CropEffect crops wide buffer to terminal width."""
|
||||
effect = CropEffect()
|
||||
buf = [
|
||||
"This is a very long line that exceeds the terminal width of eighty characters!",
|
||||
"Another long line that should also be cropped to fit within the terminal bounds!",
|
||||
"Short",
|
||||
]
|
||||
ctx = make_ctx(terminal_width=40, terminal_height=10)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Lines should be cropped to 40 chars
|
||||
assert len(result[0]) == 40
|
||||
assert len(result[1]) == 40
|
||||
assert result[2] == "Short" + " " * 35 # padded to width
|
||||
|
||||
def test_crop_taller_buffer(self):
|
||||
"""CropEffect crops tall buffer to terminal height."""
|
||||
effect = CropEffect()
|
||||
buf = ["line"] * 30 # 30 lines
|
||||
ctx = make_ctx(terminal_width=80, terminal_height=10)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Should be cropped to 10 lines
|
||||
assert len(result) == 10
|
||||
|
||||
def test_pad_shorter_lines(self):
|
||||
"""CropEffect pads lines shorter than width."""
|
||||
effect = CropEffect()
|
||||
buf = ["short", "medium length", ""]
|
||||
ctx = make_ctx(terminal_width=20, terminal_height=5)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
assert len(result[0]) == 20 # padded
|
||||
assert len(result[1]) == 20 # padded
|
||||
assert len(result[2]) == 20 # padded (was empty)
|
||||
|
||||
def test_pad_to_height(self):
|
||||
"""CropEffect pads with empty lines if buffer is too short."""
|
||||
effect = CropEffect()
|
||||
buf = ["line1", "line2"]
|
||||
ctx = make_ctx(terminal_width=20, terminal_height=10)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
# Should have 10 lines
|
||||
assert len(result) == 10
|
||||
# Last 8 should be empty padding
|
||||
for i in range(2, 10):
|
||||
assert result[i] == " " * 20
|
||||
|
||||
def test_empty_buffer(self):
|
||||
"""CropEffect handles empty buffer."""
|
||||
effect = CropEffect()
|
||||
ctx = make_ctx()
|
||||
|
||||
result = effect.process([], ctx)
|
||||
|
||||
assert result == []
|
||||
|
||||
def test_uses_context_dimensions(self):
|
||||
"""CropEffect uses context terminal_width/terminal_height."""
|
||||
effect = CropEffect()
|
||||
buf = ["x" * 100]
|
||||
ctx = make_ctx(terminal_width=50, terminal_height=1)
|
||||
|
||||
result = effect.process(buf, ctx)
|
||||
|
||||
assert len(result[0]) == 50
|
||||
220
tests/test_data_sources.py
Normal file
220
tests/test_data_sources.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""
|
||||
Tests for engine/data_sources/sources.py - data source implementations.
|
||||
|
||||
Tests HeadlinesDataSource, PoetryDataSource, EmptyDataSource, and the
|
||||
base DataSource class functionality.
|
||||
"""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.data_sources.sources import (
|
||||
EmptyDataSource,
|
||||
HeadlinesDataSource,
|
||||
PoetryDataSource,
|
||||
SourceItem,
|
||||
)
|
||||
|
||||
|
||||
class TestSourceItem:
|
||||
"""Test SourceItem dataclass."""
|
||||
|
||||
def test_source_item_creation(self):
|
||||
"""SourceItem can be created with required fields."""
|
||||
item = SourceItem(
|
||||
content="Test headline",
|
||||
source="test_source",
|
||||
timestamp="2024-01-01",
|
||||
)
|
||||
assert item.content == "Test headline"
|
||||
assert item.source == "test_source"
|
||||
assert item.timestamp == "2024-01-01"
|
||||
assert item.metadata is None
|
||||
|
||||
def test_source_item_with_metadata(self):
|
||||
"""SourceItem can include optional metadata."""
|
||||
metadata = {"author": "John", "category": "tech"}
|
||||
item = SourceItem(
|
||||
content="Test",
|
||||
source="test",
|
||||
timestamp="2024-01-01",
|
||||
metadata=metadata,
|
||||
)
|
||||
assert item.metadata == metadata
|
||||
|
||||
|
||||
class TestEmptyDataSource:
|
||||
"""Test EmptyDataSource."""
|
||||
|
||||
def test_empty_source_name(self):
|
||||
"""EmptyDataSource has correct name."""
|
||||
source = EmptyDataSource()
|
||||
assert source.name == "empty"
|
||||
|
||||
def test_empty_source_is_not_dynamic(self):
|
||||
"""EmptyDataSource is static, not dynamic."""
|
||||
source = EmptyDataSource()
|
||||
assert source.is_dynamic is False
|
||||
|
||||
def test_empty_source_fetch_returns_blank_content(self):
|
||||
"""EmptyDataSource.fetch() returns blank lines."""
|
||||
source = EmptyDataSource(width=80, height=24)
|
||||
items = source.fetch()
|
||||
|
||||
assert len(items) == 1
|
||||
assert isinstance(items[0], SourceItem)
|
||||
assert items[0].source == "empty"
|
||||
# Content should be 24 lines of 80 spaces
|
||||
lines = items[0].content.split("\n")
|
||||
assert len(lines) == 24
|
||||
assert all(len(line) == 80 for line in lines)
|
||||
|
||||
def test_empty_source_get_items_caches_result(self):
|
||||
"""EmptyDataSource.get_items() caches the result."""
|
||||
source = EmptyDataSource()
|
||||
items1 = source.get_items()
|
||||
items2 = source.get_items()
|
||||
# Should return same cached items (same object reference)
|
||||
assert items1 is items2
|
||||
|
||||
|
||||
class TestHeadlinesDataSource:
|
||||
"""Test HeadlinesDataSource."""
|
||||
|
||||
def test_headlines_source_name(self):
|
||||
"""HeadlinesDataSource has correct name."""
|
||||
source = HeadlinesDataSource()
|
||||
assert source.name == "headlines"
|
||||
|
||||
def test_headlines_source_is_static(self):
|
||||
"""HeadlinesDataSource is static."""
|
||||
source = HeadlinesDataSource()
|
||||
assert source.is_dynamic is False
|
||||
|
||||
def test_headlines_fetch_returns_source_items(self):
|
||||
"""HeadlinesDataSource.fetch() returns SourceItem list."""
|
||||
mock_items = [
|
||||
("Test Article 1", "source1", "10:30"),
|
||||
("Test Article 2", "source2", "11:45"),
|
||||
]
|
||||
with patch("engine.fetch.fetch_all") as mock_fetch_all:
|
||||
mock_fetch_all.return_value = (mock_items, 2, 0)
|
||||
|
||||
source = HeadlinesDataSource()
|
||||
items = source.fetch()
|
||||
|
||||
assert len(items) == 2
|
||||
assert all(isinstance(item, SourceItem) for item in items)
|
||||
assert items[0].content == "Test Article 1"
|
||||
assert items[0].source == "source1"
|
||||
assert items[0].timestamp == "10:30"
|
||||
|
||||
def test_headlines_fetch_with_empty_feed(self):
|
||||
"""HeadlinesDataSource handles empty feeds gracefully."""
|
||||
with patch("engine.fetch.fetch_all") as mock_fetch_all:
|
||||
mock_fetch_all.return_value = ([], 0, 1)
|
||||
|
||||
source = HeadlinesDataSource()
|
||||
items = source.fetch()
|
||||
|
||||
# Should return empty list
|
||||
assert isinstance(items, list)
|
||||
assert len(items) == 0
|
||||
|
||||
def test_headlines_get_items_caches_result(self):
|
||||
"""HeadlinesDataSource.get_items() caches the result."""
|
||||
mock_items = [("Test Article", "source", "12:00")]
|
||||
with patch("engine.fetch.fetch_all") as mock_fetch_all:
|
||||
mock_fetch_all.return_value = (mock_items, 1, 0)
|
||||
|
||||
source = HeadlinesDataSource()
|
||||
items1 = source.get_items()
|
||||
items2 = source.get_items()
|
||||
|
||||
# Should only call fetch once (cached)
|
||||
assert mock_fetch_all.call_count == 1
|
||||
assert items1 is items2
|
||||
|
||||
def test_headlines_refresh_clears_cache(self):
|
||||
"""HeadlinesDataSource.refresh() clears cache and refetches."""
|
||||
mock_items = [("Test Article", "source", "12:00")]
|
||||
with patch("engine.fetch.fetch_all") as mock_fetch_all:
|
||||
mock_fetch_all.return_value = (mock_items, 1, 0)
|
||||
|
||||
source = HeadlinesDataSource()
|
||||
source.get_items()
|
||||
source.refresh()
|
||||
source.get_items()
|
||||
|
||||
# Should call fetch twice (once for initial, once for refresh)
|
||||
assert mock_fetch_all.call_count == 2
|
||||
|
||||
|
||||
class TestPoetryDataSource:
|
||||
"""Test PoetryDataSource."""
|
||||
|
||||
def test_poetry_source_name(self):
|
||||
"""PoetryDataSource has correct name."""
|
||||
source = PoetryDataSource()
|
||||
assert source.name == "poetry"
|
||||
|
||||
def test_poetry_source_is_static(self):
|
||||
"""PoetryDataSource is static."""
|
||||
source = PoetryDataSource()
|
||||
assert source.is_dynamic is False
|
||||
|
||||
def test_poetry_fetch_returns_source_items(self):
|
||||
"""PoetryDataSource.fetch() returns SourceItem list."""
|
||||
mock_items = [
|
||||
("Poetry line 1", "Poetry Source 1", ""),
|
||||
("Poetry line 2", "Poetry Source 2", ""),
|
||||
]
|
||||
with patch("engine.fetch.fetch_poetry") as mock_fetch_poetry:
|
||||
mock_fetch_poetry.return_value = (mock_items, 2, 0)
|
||||
|
||||
source = PoetryDataSource()
|
||||
items = source.fetch()
|
||||
|
||||
assert len(items) == 2
|
||||
assert all(isinstance(item, SourceItem) for item in items)
|
||||
assert items[0].content == "Poetry line 1"
|
||||
assert items[0].source == "Poetry Source 1"
|
||||
|
||||
def test_poetry_get_items_caches_result(self):
|
||||
"""PoetryDataSource.get_items() caches result."""
|
||||
mock_items = [("Poetry line", "Poetry Source", "")]
|
||||
with patch("engine.fetch.fetch_poetry") as mock_fetch_poetry:
|
||||
mock_fetch_poetry.return_value = (mock_items, 1, 0)
|
||||
|
||||
source = PoetryDataSource()
|
||||
items1 = source.get_items()
|
||||
items2 = source.get_items()
|
||||
|
||||
# Should only fetch once (cached)
|
||||
assert mock_fetch_poetry.call_count == 1
|
||||
assert items1 is items2
|
||||
|
||||
|
||||
class TestDataSourceInterface:
|
||||
"""Test DataSource base class interface."""
|
||||
|
||||
def test_data_source_stream_not_implemented(self):
|
||||
"""DataSource.stream() raises NotImplementedError."""
|
||||
source = EmptyDataSource()
|
||||
with pytest.raises(NotImplementedError):
|
||||
source.stream()
|
||||
|
||||
def test_data_source_is_dynamic_defaults_false(self):
|
||||
"""DataSource.is_dynamic defaults to False."""
|
||||
source = EmptyDataSource()
|
||||
assert source.is_dynamic is False
|
||||
|
||||
def test_data_source_refresh_updates_cache(self):
|
||||
"""DataSource.refresh() updates internal cache."""
|
||||
source = EmptyDataSource()
|
||||
source.get_items()
|
||||
items_refreshed = source.refresh()
|
||||
|
||||
# refresh() should return new items
|
||||
assert isinstance(items_refreshed, list)
|
||||
@@ -2,7 +2,13 @@
|
||||
Tests for engine.display module.
|
||||
"""
|
||||
|
||||
from engine.display import NullDisplay, TerminalDisplay
|
||||
import sys
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.display import DisplayRegistry, NullDisplay, TerminalDisplay, render_border
|
||||
from engine.display.backends.multi import MultiDisplay
|
||||
|
||||
|
||||
class TestDisplayProtocol:
|
||||
@@ -25,6 +31,66 @@ class TestDisplayProtocol:
|
||||
assert hasattr(display, "cleanup")
|
||||
|
||||
|
||||
class TestDisplayRegistry:
|
||||
"""Tests for DisplayRegistry class."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Reset registry before each test."""
|
||||
DisplayRegistry._backends = {}
|
||||
DisplayRegistry._initialized = False
|
||||
|
||||
def test_register_adds_backend(self):
|
||||
"""register adds a backend to the registry."""
|
||||
DisplayRegistry.register("test", TerminalDisplay)
|
||||
assert DisplayRegistry.get("test") == TerminalDisplay
|
||||
|
||||
def test_register_case_insensitive(self):
|
||||
"""register is case insensitive."""
|
||||
DisplayRegistry.register("TEST", TerminalDisplay)
|
||||
assert DisplayRegistry.get("test") == TerminalDisplay
|
||||
|
||||
def test_get_returns_none_for_unknown(self):
|
||||
"""get returns None for unknown backend."""
|
||||
assert DisplayRegistry.get("unknown") is None
|
||||
|
||||
def test_list_backends_returns_all(self):
|
||||
"""list_backends returns all registered backends."""
|
||||
DisplayRegistry.register("a", TerminalDisplay)
|
||||
DisplayRegistry.register("b", NullDisplay)
|
||||
backends = DisplayRegistry.list_backends()
|
||||
assert "a" in backends
|
||||
assert "b" in backends
|
||||
|
||||
def test_create_returns_instance(self):
|
||||
"""create returns a display instance."""
|
||||
DisplayRegistry.register("test", NullDisplay)
|
||||
display = DisplayRegistry.create("test")
|
||||
assert isinstance(display, NullDisplay)
|
||||
|
||||
def test_create_returns_none_for_unknown(self):
|
||||
"""create returns None for unknown backend."""
|
||||
display = DisplayRegistry.create("unknown")
|
||||
assert display is None
|
||||
|
||||
def test_initialize_registers_defaults(self):
|
||||
"""initialize registers default backends."""
|
||||
DisplayRegistry.initialize()
|
||||
assert DisplayRegistry.get("terminal") == TerminalDisplay
|
||||
assert DisplayRegistry.get("null") == NullDisplay
|
||||
from engine.display.backends.pygame import PygameDisplay
|
||||
from engine.display.backends.websocket import WebSocketDisplay
|
||||
|
||||
assert DisplayRegistry.get("websocket") == WebSocketDisplay
|
||||
assert DisplayRegistry.get("pygame") == PygameDisplay
|
||||
|
||||
def test_initialize_idempotent(self):
|
||||
"""initialize can be called multiple times safely."""
|
||||
DisplayRegistry.initialize()
|
||||
DisplayRegistry._backends["custom"] = TerminalDisplay
|
||||
DisplayRegistry.initialize()
|
||||
assert "custom" in DisplayRegistry.list_backends()
|
||||
|
||||
|
||||
class TestTerminalDisplay:
|
||||
"""Tests for TerminalDisplay class."""
|
||||
|
||||
@@ -52,6 +118,119 @@ class TestTerminalDisplay:
|
||||
display = TerminalDisplay()
|
||||
display.cleanup()
|
||||
|
||||
def test_get_dimensions_returns_cached_value(self):
|
||||
"""get_dimensions returns cached dimensions for stability."""
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
# Mock terminal size to ensure deterministic dimensions
|
||||
term_size = os.terminal_size((80, 24))
|
||||
with patch("os.get_terminal_size", return_value=term_size):
|
||||
display = TerminalDisplay()
|
||||
display.init(80, 24)
|
||||
d1 = display.get_dimensions()
|
||||
assert d1 == (80, 24)
|
||||
|
||||
def test_show_clears_screen_before_each_frame(self):
|
||||
"""show clears previous frame to prevent visual wobble.
|
||||
|
||||
Regression test: Previously show() didn't clear the screen,
|
||||
causing old content to remain and creating visual wobble.
|
||||
The fix adds \\033[H\\033[J (cursor home + erase down) before each frame.
|
||||
"""
|
||||
from io import BytesIO
|
||||
|
||||
display = TerminalDisplay()
|
||||
display.init(80, 24)
|
||||
|
||||
buffer = ["line1", "line2", "line3"]
|
||||
|
||||
fake_buffer = BytesIO()
|
||||
fake_stdout = MagicMock()
|
||||
fake_stdout.buffer = fake_buffer
|
||||
with patch.object(sys, "stdout", fake_stdout):
|
||||
display.show(buffer)
|
||||
|
||||
output = fake_buffer.getvalue().decode("utf-8")
|
||||
assert output.startswith("\033[H\033[J"), (
|
||||
f"Output should start with clear sequence, got: {repr(output[:20])}"
|
||||
)
|
||||
|
||||
def test_show_clears_screen_on_subsequent_frames(self):
|
||||
"""show clears screen on every frame, not just the first.
|
||||
|
||||
Regression test: Ensures each show() call includes the clear sequence.
|
||||
"""
|
||||
from io import BytesIO
|
||||
|
||||
# Use target_fps=0 to disable frame skipping in test
|
||||
display = TerminalDisplay(target_fps=0)
|
||||
display.init(80, 24)
|
||||
|
||||
buffer = ["line1", "line2"]
|
||||
|
||||
for i in range(3):
|
||||
fake_buffer = BytesIO()
|
||||
fake_stdout = MagicMock()
|
||||
fake_stdout.buffer = fake_buffer
|
||||
with patch.object(sys, "stdout", fake_stdout):
|
||||
display.show(buffer)
|
||||
|
||||
output = fake_buffer.getvalue().decode("utf-8")
|
||||
assert output.startswith("\033[H\033[J"), (
|
||||
f"Frame {i} should start with clear sequence"
|
||||
)
|
||||
|
||||
def test_get_dimensions_stable_across_rapid_calls(self):
|
||||
"""get_dimensions should not fluctuate when called rapidly.
|
||||
|
||||
This test catches the bug where os.get_terminal_size() returns
|
||||
inconsistent values, causing visual wobble.
|
||||
"""
|
||||
display = TerminalDisplay()
|
||||
display.init(80, 24)
|
||||
|
||||
# Get dimensions 10 times rapidly (simulating frame loop)
|
||||
dims = [display.get_dimensions() for _ in range(10)]
|
||||
|
||||
# All should be the same - this would fail if os.get_terminal_size()
|
||||
# returns different values each call
|
||||
assert len(set(dims)) == 1, f"Dimensions should be stable, got: {set(dims)}"
|
||||
|
||||
def test_show_with_border_uses_render_border(self):
|
||||
"""show with border=True calls render_border with FPS."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
display = TerminalDisplay()
|
||||
display.init(80, 24)
|
||||
|
||||
buffer = ["line1", "line2"]
|
||||
|
||||
# Mock get_monitor to provide FPS
|
||||
mock_monitor = MagicMock()
|
||||
mock_monitor.get_stats.return_value = {
|
||||
"pipeline": {"avg_ms": 16.5},
|
||||
"frame_count": 100,
|
||||
}
|
||||
|
||||
# Mock render_border to verify it's called
|
||||
with (
|
||||
patch("engine.display.get_monitor", return_value=mock_monitor),
|
||||
patch("engine.display.render_border", wraps=render_border) as mock_render,
|
||||
):
|
||||
display.show(buffer, border=True)
|
||||
|
||||
# Verify render_border was called with correct arguments
|
||||
assert mock_render.called
|
||||
args, kwargs = mock_render.call_args
|
||||
# Arguments: buffer, width, height, fps, frame_time (positional)
|
||||
assert args[0] == buffer
|
||||
assert args[1] == 80
|
||||
assert args[2] == 24
|
||||
assert args[3] == pytest.approx(60.6, rel=0.1) # fps = 1000/16.5
|
||||
assert args[4] == pytest.approx(16.5, rel=0.1)
|
||||
assert kwargs == {} # no keyword arguments
|
||||
|
||||
|
||||
class TestNullDisplay:
|
||||
"""Tests for NullDisplay class."""
|
||||
@@ -77,3 +256,178 @@ class TestNullDisplay:
|
||||
"""cleanup does nothing."""
|
||||
display = NullDisplay()
|
||||
display.cleanup()
|
||||
|
||||
def test_show_stores_last_buffer(self):
|
||||
"""show stores last buffer for testing inspection."""
|
||||
display = NullDisplay()
|
||||
display.init(80, 24)
|
||||
|
||||
buffer = ["line1", "line2", "line3"]
|
||||
display.show(buffer)
|
||||
|
||||
assert display._last_buffer == buffer
|
||||
|
||||
def test_show_tracks_last_buffer_across_calls(self):
|
||||
"""show updates last_buffer on each call."""
|
||||
display = NullDisplay()
|
||||
display.init(80, 24)
|
||||
|
||||
display.show(["first"])
|
||||
assert display._last_buffer == ["first"]
|
||||
|
||||
display.show(["second"])
|
||||
assert display._last_buffer == ["second"]
|
||||
|
||||
|
||||
class TestRenderBorder:
|
||||
"""Tests for render_border function."""
|
||||
|
||||
def test_render_border_adds_corners(self):
|
||||
"""render_border adds corner characters."""
|
||||
from engine.display import render_border
|
||||
|
||||
buffer = ["hello", "world"]
|
||||
result = render_border(buffer, width=10, height=5)
|
||||
|
||||
assert result[0][0] in "┌┎┍" # top-left
|
||||
assert result[0][-1] in "┐┒┓" # top-right
|
||||
assert result[-1][0] in "└┚┖" # bottom-left
|
||||
assert result[-1][-1] in "┘┛┙" # bottom-right
|
||||
|
||||
def test_render_border_dimensions(self):
|
||||
"""render_border output matches requested dimensions."""
|
||||
from engine.display import render_border
|
||||
|
||||
buffer = ["line1", "line2", "line3"]
|
||||
result = render_border(buffer, width=20, height=10)
|
||||
|
||||
# Output should be exactly height lines
|
||||
assert len(result) == 10
|
||||
# Each line should be exactly width characters
|
||||
for line in result:
|
||||
assert len(line) == 20
|
||||
|
||||
def test_render_border_with_fps(self):
|
||||
"""render_border includes FPS in top border when provided."""
|
||||
from engine.display import render_border
|
||||
|
||||
buffer = ["test"]
|
||||
result = render_border(buffer, width=20, height=5, fps=60.0)
|
||||
|
||||
top_line = result[0]
|
||||
assert "FPS:60" in top_line or "FPS: 60" in top_line
|
||||
|
||||
def test_render_border_with_frame_time(self):
|
||||
"""render_border includes frame time in bottom border when provided."""
|
||||
from engine.display import render_border
|
||||
|
||||
buffer = ["test"]
|
||||
result = render_border(buffer, width=20, height=5, frame_time=16.5)
|
||||
|
||||
bottom_line = result[-1]
|
||||
assert "16.5ms" in bottom_line
|
||||
|
||||
def test_render_border_crops_content_to_fit(self):
|
||||
"""render_border crops content to fit within borders."""
|
||||
from engine.display import render_border
|
||||
|
||||
# Buffer larger than viewport
|
||||
buffer = ["x" * 100] * 50
|
||||
result = render_border(buffer, width=20, height=10)
|
||||
|
||||
# Result shrinks to fit viewport
|
||||
assert len(result) == 10
|
||||
for line in result[1:-1]: # Skip border lines
|
||||
assert len(line) == 20
|
||||
|
||||
def test_render_border_preserves_content(self):
|
||||
"""render_border preserves content within borders."""
|
||||
from engine.display import render_border
|
||||
|
||||
buffer = ["hello world", "test line"]
|
||||
result = render_border(buffer, width=20, height=5)
|
||||
|
||||
# Content should appear in the middle rows
|
||||
content_lines = result[1:-1]
|
||||
assert any("hello world" in line for line in content_lines)
|
||||
|
||||
def test_render_border_with_small_buffer(self):
|
||||
"""render_border handles buffers smaller than viewport."""
|
||||
from engine.display import render_border
|
||||
|
||||
buffer = ["hi"]
|
||||
result = render_border(buffer, width=20, height=10)
|
||||
|
||||
# Should still produce full viewport with padding
|
||||
assert len(result) == 10
|
||||
# All lines should be full width
|
||||
for line in result:
|
||||
assert len(line) == 20
|
||||
|
||||
|
||||
class TestMultiDisplay:
|
||||
"""Tests for MultiDisplay class."""
|
||||
|
||||
def test_init_stores_dimensions(self):
|
||||
"""init stores dimensions and forwards to displays."""
|
||||
mock_display1 = MagicMock()
|
||||
mock_display2 = MagicMock()
|
||||
multi = MultiDisplay([mock_display1, mock_display2])
|
||||
|
||||
multi.init(120, 40)
|
||||
|
||||
assert multi.width == 120
|
||||
assert multi.height == 40
|
||||
mock_display1.init.assert_called_once_with(120, 40, reuse=False)
|
||||
mock_display2.init.assert_called_once_with(120, 40, reuse=False)
|
||||
|
||||
def test_show_forwards_to_all_displays(self):
|
||||
"""show forwards buffer to all displays."""
|
||||
mock_display1 = MagicMock()
|
||||
mock_display2 = MagicMock()
|
||||
multi = MultiDisplay([mock_display1, mock_display2])
|
||||
|
||||
buffer = ["line1", "line2"]
|
||||
multi.show(buffer, border=False)
|
||||
|
||||
mock_display1.show.assert_called_once_with(buffer, border=False)
|
||||
mock_display2.show.assert_called_once_with(buffer, border=False)
|
||||
|
||||
def test_clear_forwards_to_all_displays(self):
|
||||
"""clear forwards to all displays."""
|
||||
mock_display1 = MagicMock()
|
||||
mock_display2 = MagicMock()
|
||||
multi = MultiDisplay([mock_display1, mock_display2])
|
||||
|
||||
multi.clear()
|
||||
|
||||
mock_display1.clear.assert_called_once()
|
||||
mock_display2.clear.assert_called_once()
|
||||
|
||||
def test_cleanup_forwards_to_all_displays(self):
|
||||
"""cleanup forwards to all displays."""
|
||||
mock_display1 = MagicMock()
|
||||
mock_display2 = MagicMock()
|
||||
multi = MultiDisplay([mock_display1, mock_display2])
|
||||
|
||||
multi.cleanup()
|
||||
|
||||
mock_display1.cleanup.assert_called_once()
|
||||
mock_display2.cleanup.assert_called_once()
|
||||
|
||||
def test_empty_displays_list(self):
|
||||
"""handles empty displays list gracefully."""
|
||||
multi = MultiDisplay([])
|
||||
multi.init(80, 24)
|
||||
multi.show(["test"])
|
||||
multi.clear()
|
||||
multi.cleanup()
|
||||
|
||||
def test_init_with_reuse(self):
|
||||
"""init passes reuse flag to child displays."""
|
||||
mock_display = MagicMock()
|
||||
multi = MultiDisplay([mock_display])
|
||||
|
||||
multi.init(80, 24, reuse=True)
|
||||
|
||||
mock_display.init.assert_called_once_with(80, 24, reuse=True)
|
||||
|
||||
@@ -5,8 +5,10 @@ Tests for engine.effects.controller module.
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from engine.effects.controller import (
|
||||
_format_stats,
|
||||
handle_effects_command,
|
||||
set_effect_chain_ref,
|
||||
show_effects_menu,
|
||||
)
|
||||
|
||||
|
||||
@@ -92,6 +94,29 @@ class TestHandleEffectsCommand:
|
||||
assert "Reordered pipeline" in result
|
||||
mock_chain_instance.reorder.assert_called_once_with(["noise", "fade"])
|
||||
|
||||
def test_reorder_failure(self):
|
||||
"""reorder returns error on failure."""
|
||||
with patch("engine.effects.controller.get_registry") as mock_registry:
|
||||
mock_registry.return_value.list_all.return_value = {}
|
||||
|
||||
with patch("engine.effects.controller._get_effect_chain") as mock_chain:
|
||||
mock_chain_instance = MagicMock()
|
||||
mock_chain_instance.reorder.return_value = False
|
||||
mock_chain.return_value = mock_chain_instance
|
||||
|
||||
result = handle_effects_command("/effects reorder bad")
|
||||
|
||||
assert "Failed to reorder" in result
|
||||
|
||||
def test_unknown_effect(self):
|
||||
"""unknown effect returns error."""
|
||||
with patch("engine.effects.controller.get_registry") as mock_registry:
|
||||
mock_registry.return_value.list_all.return_value = {}
|
||||
|
||||
result = handle_effects_command("/effects unknown on")
|
||||
|
||||
assert "Unknown effect" in result
|
||||
|
||||
def test_unknown_command(self):
|
||||
"""unknown command returns error."""
|
||||
result = handle_effects_command("/unknown")
|
||||
@@ -102,6 +127,105 @@ class TestHandleEffectsCommand:
|
||||
result = handle_effects_command("not a command")
|
||||
assert "Unknown command" in result
|
||||
|
||||
def test_invalid_intensity_value(self):
|
||||
"""invalid intensity value returns error."""
|
||||
with patch("engine.effects.controller.get_registry") as mock_registry:
|
||||
mock_plugin = MagicMock()
|
||||
mock_registry.return_value.get.return_value = mock_plugin
|
||||
mock_registry.return_value.list_all.return_value = {"noise": mock_plugin}
|
||||
|
||||
result = handle_effects_command("/effects noise intensity bad")
|
||||
|
||||
assert "Invalid intensity" in result
|
||||
|
||||
def test_missing_action(self):
|
||||
"""missing action returns usage."""
|
||||
with patch("engine.effects.controller.get_registry") as mock_registry:
|
||||
mock_plugin = MagicMock()
|
||||
mock_registry.return_value.get.return_value = mock_plugin
|
||||
mock_registry.return_value.list_all.return_value = {"noise": mock_plugin}
|
||||
|
||||
result = handle_effects_command("/effects noise")
|
||||
|
||||
assert "Usage" in result
|
||||
|
||||
def test_stats_command(self):
|
||||
"""stats command returns formatted stats."""
|
||||
with patch("engine.effects.controller.get_monitor") as mock_monitor:
|
||||
mock_monitor.return_value.get_stats.return_value = {
|
||||
"frame_count": 100,
|
||||
"pipeline": {"avg_ms": 1.5, "min_ms": 1.0, "max_ms": 2.0},
|
||||
"effects": {},
|
||||
}
|
||||
|
||||
result = handle_effects_command("/effects stats")
|
||||
|
||||
assert "Performance Stats" in result
|
||||
|
||||
def test_list_only_effects(self):
|
||||
"""list command works with just /effects."""
|
||||
with patch("engine.effects.controller.get_registry") as mock_registry:
|
||||
mock_plugin = MagicMock()
|
||||
mock_plugin.config.enabled = False
|
||||
mock_plugin.config.intensity = 0.5
|
||||
mock_registry.return_value.list_all.return_value = {"noise": mock_plugin}
|
||||
|
||||
with patch("engine.effects.controller._get_effect_chain") as mock_chain:
|
||||
mock_chain.return_value = None
|
||||
|
||||
result = handle_effects_command("/effects")
|
||||
|
||||
assert "noise: OFF" in result
|
||||
|
||||
|
||||
class TestShowEffectsMenu:
|
||||
"""Tests for show_effects_menu function."""
|
||||
|
||||
def test_returns_formatted_menu(self):
|
||||
"""returns formatted effects menu."""
|
||||
with patch("engine.effects.controller.get_registry") as mock_registry:
|
||||
mock_plugin = MagicMock()
|
||||
mock_plugin.config.enabled = True
|
||||
mock_plugin.config.intensity = 0.75
|
||||
mock_registry.return_value.list_all.return_value = {"noise": mock_plugin}
|
||||
|
||||
with patch("engine.effects.controller._get_effect_chain") as mock_chain:
|
||||
mock_chain_instance = MagicMock()
|
||||
mock_chain_instance.get_order.return_value = ["noise"]
|
||||
mock_chain.return_value = mock_chain_instance
|
||||
|
||||
result = show_effects_menu()
|
||||
|
||||
assert "EFFECTS MENU" in result
|
||||
assert "noise" in result
|
||||
|
||||
|
||||
class TestFormatStats:
|
||||
"""Tests for _format_stats function."""
|
||||
|
||||
def test_returns_error_when_no_monitor(self):
|
||||
"""returns error when monitor unavailable."""
|
||||
with patch("engine.effects.controller.get_monitor") as mock_monitor:
|
||||
mock_monitor.return_value.get_stats.return_value = {"error": "No data"}
|
||||
|
||||
result = _format_stats()
|
||||
|
||||
assert "No data" in result
|
||||
|
||||
def test_formats_pipeline_stats(self):
|
||||
"""formats pipeline stats correctly."""
|
||||
with patch("engine.effects.controller.get_monitor") as mock_monitor:
|
||||
mock_monitor.return_value.get_stats.return_value = {
|
||||
"frame_count": 50,
|
||||
"pipeline": {"avg_ms": 2.5, "min_ms": 2.0, "max_ms": 3.0},
|
||||
"effects": {"noise": {"avg_ms": 0.5, "min_ms": 0.4, "max_ms": 0.6}},
|
||||
}
|
||||
|
||||
result = _format_stats()
|
||||
|
||||
assert "Pipeline" in result
|
||||
assert "noise" in result
|
||||
|
||||
|
||||
class TestSetEffectChainRef:
|
||||
"""Tests for set_effect_chain_ref function."""
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
"""
|
||||
Tests for engine.emitters module.
|
||||
"""
|
||||
|
||||
from engine.emitters import EventEmitter, Startable, Stoppable
|
||||
|
||||
|
||||
class TestEventEmitterProtocol:
|
||||
"""Tests for EventEmitter protocol."""
|
||||
|
||||
def test_protocol_exists(self):
|
||||
"""EventEmitter protocol is defined."""
|
||||
assert EventEmitter is not None
|
||||
|
||||
def test_protocol_has_subscribe_method(self):
|
||||
"""EventEmitter has subscribe method in protocol."""
|
||||
assert hasattr(EventEmitter, "subscribe")
|
||||
|
||||
def test_protocol_has_unsubscribe_method(self):
|
||||
"""EventEmitter has unsubscribe method in protocol."""
|
||||
assert hasattr(EventEmitter, "unsubscribe")
|
||||
|
||||
|
||||
class TestStartableProtocol:
|
||||
"""Tests for Startable protocol."""
|
||||
|
||||
def test_protocol_exists(self):
|
||||
"""Startable protocol is defined."""
|
||||
assert Startable is not None
|
||||
|
||||
def test_protocol_has_start_method(self):
|
||||
"""Startable has start method in protocol."""
|
||||
assert hasattr(Startable, "start")
|
||||
|
||||
|
||||
class TestStoppableProtocol:
|
||||
"""Tests for Stoppable protocol."""
|
||||
|
||||
def test_protocol_exists(self):
|
||||
"""Stoppable protocol is defined."""
|
||||
assert Stoppable is not None
|
||||
|
||||
def test_protocol_has_stop_method(self):
|
||||
"""Stoppable has stop method in protocol."""
|
||||
assert hasattr(Stoppable, "stop")
|
||||
|
||||
|
||||
class TestProtocolCompliance:
|
||||
"""Tests that existing classes comply with protocols."""
|
||||
|
||||
def test_ntfy_poller_complies_with_protocol(self):
|
||||
"""NtfyPoller implements EventEmitter protocol."""
|
||||
from engine.ntfy import NtfyPoller
|
||||
|
||||
poller = NtfyPoller("http://example.com/topic")
|
||||
assert hasattr(poller, "subscribe")
|
||||
assert hasattr(poller, "unsubscribe")
|
||||
assert callable(poller.subscribe)
|
||||
assert callable(poller.unsubscribe)
|
||||
|
||||
def test_mic_monitor_complies_with_protocol(self):
|
||||
"""MicMonitor implements EventEmitter and Startable protocols."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
assert hasattr(monitor, "subscribe")
|
||||
assert hasattr(monitor, "unsubscribe")
|
||||
assert hasattr(monitor, "start")
|
||||
assert hasattr(monitor, "stop")
|
||||
234
tests/test_fetch.py
Normal file
234
tests/test_fetch.py
Normal file
@@ -0,0 +1,234 @@
|
||||
"""
|
||||
Tests for engine.fetch module.
|
||||
"""
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from engine.fetch import (
|
||||
_fetch_gutenberg,
|
||||
fetch_all,
|
||||
fetch_feed,
|
||||
fetch_poetry,
|
||||
load_cache,
|
||||
save_cache,
|
||||
)
|
||||
|
||||
|
||||
class TestFetchFeed:
|
||||
"""Tests for fetch_feed function."""
|
||||
|
||||
@patch("engine.fetch.urllib.request.urlopen")
|
||||
def test_fetch_success(self, mock_urlopen):
|
||||
"""Successful feed fetch returns parsed feed."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = b"<rss>test</rss>"
|
||||
mock_urlopen.return_value = mock_response
|
||||
|
||||
result = fetch_feed("http://example.com/feed")
|
||||
|
||||
assert result is not None
|
||||
|
||||
@patch("engine.fetch.urllib.request.urlopen")
|
||||
def test_fetch_network_error(self, mock_urlopen):
|
||||
"""Network error returns tuple with None feed."""
|
||||
mock_urlopen.side_effect = Exception("Network error")
|
||||
|
||||
url, feed = fetch_feed("http://example.com/feed")
|
||||
|
||||
assert feed is None
|
||||
|
||||
|
||||
class TestFetchAll:
|
||||
"""Tests for fetch_all function."""
|
||||
|
||||
@patch("engine.fetch.fetch_feed")
|
||||
@patch("engine.fetch.strip_tags")
|
||||
@patch("engine.fetch.skip")
|
||||
@patch("engine.fetch.boot_ln")
|
||||
def test_fetch_all_success(self, mock_boot, mock_skip, mock_strip, mock_fetch_feed):
|
||||
"""Successful fetch returns items."""
|
||||
mock_feed = MagicMock()
|
||||
mock_feed.bozo = False
|
||||
mock_feed.entries = [
|
||||
{"title": "Headline 1", "published_parsed": (2024, 1, 1, 12, 0, 0)},
|
||||
{"title": "Headline 2", "updated_parsed": (2024, 1, 2, 12, 0, 0)},
|
||||
]
|
||||
mock_fetch_feed.return_value = ("http://example.com", mock_feed)
|
||||
mock_skip.return_value = False
|
||||
mock_strip.side_effect = lambda x: x
|
||||
|
||||
items, linked, failed = fetch_all()
|
||||
|
||||
assert linked > 0
|
||||
assert failed == 0
|
||||
|
||||
@patch("engine.fetch.fetch_feed")
|
||||
@patch("engine.fetch.boot_ln")
|
||||
def test_fetch_all_feed_error(self, mock_boot, mock_fetch_feed):
|
||||
"""Feed error increments failed count."""
|
||||
mock_fetch_feed.return_value = ("http://example.com", None)
|
||||
|
||||
items, linked, failed = fetch_all()
|
||||
|
||||
assert failed > 0
|
||||
|
||||
@patch("engine.fetch.fetch_feed")
|
||||
@patch("engine.fetch.strip_tags")
|
||||
@patch("engine.fetch.skip")
|
||||
@patch("engine.fetch.boot_ln")
|
||||
def test_fetch_all_skips_filtered(
|
||||
self, mock_boot, mock_skip, mock_strip, mock_fetch_feed
|
||||
):
|
||||
"""Filtered headlines are skipped."""
|
||||
mock_feed = MagicMock()
|
||||
mock_feed.bozo = False
|
||||
mock_feed.entries = [
|
||||
{"title": "Sports scores"},
|
||||
{"title": "Valid headline"},
|
||||
]
|
||||
mock_fetch_feed.return_value = ("http://example.com", mock_feed)
|
||||
mock_skip.side_effect = lambda x: x == "Sports scores"
|
||||
mock_strip.side_effect = lambda x: x
|
||||
|
||||
items, linked, failed = fetch_all()
|
||||
|
||||
assert any("Valid headline" in item[0] for item in items)
|
||||
|
||||
|
||||
class TestFetchGutenberg:
|
||||
"""Tests for _fetch_gutenberg function."""
|
||||
|
||||
@patch("engine.fetch.urllib.request.urlopen")
|
||||
def test_gutenberg_success(self, mock_urlopen):
|
||||
"""Successful gutenberg fetch returns items."""
|
||||
text = """Project Gutenberg
|
||||
|
||||
*** START OF THE PROJECT GUTENBERG ***
|
||||
This is a test poem with multiple lines
|
||||
that should be parsed as a block.
|
||||
|
||||
Another stanza with more content here.
|
||||
|
||||
*** END OF THE PROJECT GUTENBERG ***
|
||||
"""
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = text.encode("utf-8")
|
||||
mock_urlopen.return_value = mock_response
|
||||
|
||||
result = _fetch_gutenberg("http://example.com/test", "Test")
|
||||
|
||||
assert len(result) > 0
|
||||
|
||||
@patch("engine.fetch.urllib.request.urlopen")
|
||||
def test_gutenberg_network_error(self, mock_urlopen):
|
||||
"""Network error returns empty list."""
|
||||
mock_urlopen.side_effect = Exception("Network error")
|
||||
|
||||
result = _fetch_gutenberg("http://example.com/test", "Test")
|
||||
|
||||
assert result == []
|
||||
|
||||
@patch("engine.fetch.urllib.request.urlopen")
|
||||
def test_gutenberg_skips_short_blocks(self, mock_urlopen):
|
||||
"""Blocks shorter than 20 chars are skipped."""
|
||||
text = """*** START OF THE ***
|
||||
Short
|
||||
*** END OF THE ***
|
||||
"""
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = text.encode("utf-8")
|
||||
mock_urlopen.return_value = mock_response
|
||||
|
||||
result = _fetch_gutenberg("http://example.com/test", "Test")
|
||||
|
||||
assert result == []
|
||||
|
||||
@patch("engine.fetch.urllib.request.urlopen")
|
||||
def test_gutenberg_skips_all_caps_headers(self, mock_urlopen):
|
||||
"""All-caps lines are skipped as headers."""
|
||||
text = """*** START OF THE ***
|
||||
THIS IS ALL CAPS HEADER
|
||||
more content here
|
||||
*** END OF THE ***
|
||||
"""
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = text.encode("utf-8")
|
||||
mock_urlopen.return_value = mock_response
|
||||
|
||||
result = _fetch_gutenberg("http://example.com/test", "Test")
|
||||
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
class TestFetchPoetry:
|
||||
"""Tests for fetch_poetry function."""
|
||||
|
||||
@patch("engine.fetch._fetch_gutenberg")
|
||||
@patch("engine.fetch.boot_ln")
|
||||
def test_fetch_poetry_success(self, mock_boot, mock_fetch):
|
||||
"""Successful poetry fetch returns items."""
|
||||
mock_fetch.return_value = [
|
||||
("Stanza 1 content here", "Test", ""),
|
||||
("Stanza 2 content here", "Test", ""),
|
||||
]
|
||||
|
||||
items, linked, failed = fetch_poetry()
|
||||
|
||||
assert linked > 0
|
||||
assert failed == 0
|
||||
|
||||
@patch("engine.fetch._fetch_gutenberg")
|
||||
@patch("engine.fetch.boot_ln")
|
||||
def test_fetch_poetry_failure(self, mock_boot, mock_fetch):
|
||||
"""Failed fetch increments failed count."""
|
||||
mock_fetch.return_value = []
|
||||
|
||||
items, linked, failed = fetch_poetry()
|
||||
|
||||
assert failed > 0
|
||||
|
||||
|
||||
class TestCache:
|
||||
"""Tests for cache functions."""
|
||||
|
||||
@patch("engine.fetch._cache_path")
|
||||
def test_load_cache_success(self, mock_path):
|
||||
"""Successful cache load returns items."""
|
||||
mock_path.return_value.__str__ = MagicMock(return_value="/tmp/cache")
|
||||
mock_path.return_value.exists.return_value = True
|
||||
mock_path.return_value.read_text.return_value = json.dumps(
|
||||
{"items": [("title", "source", "time")]}
|
||||
)
|
||||
|
||||
result = load_cache()
|
||||
|
||||
assert result is not None
|
||||
|
||||
@patch("engine.fetch._cache_path")
|
||||
def test_load_cache_missing_file(self, mock_path):
|
||||
"""Missing cache file returns None."""
|
||||
mock_path.return_value.exists.return_value = False
|
||||
|
||||
result = load_cache()
|
||||
|
||||
assert result is None
|
||||
|
||||
@patch("engine.fetch._cache_path")
|
||||
def test_load_cache_invalid_json(self, mock_path):
|
||||
"""Invalid JSON returns None."""
|
||||
mock_path.return_value.exists.return_value = True
|
||||
mock_path.return_value.read_text.side_effect = json.JSONDecodeError("", "", 0)
|
||||
|
||||
result = load_cache()
|
||||
|
||||
assert result is None
|
||||
|
||||
@patch("engine.fetch._cache_path")
|
||||
def test_save_cache_success(self, mock_path):
|
||||
"""Save cache writes to file."""
|
||||
mock_path.return_value.__truediv__ = MagicMock(
|
||||
return_value=mock_path.return_value
|
||||
)
|
||||
|
||||
save_cache([("title", "source", "time")])
|
||||
@@ -1,35 +0,0 @@
|
||||
import re
|
||||
|
||||
from engine.fetch_code import fetch_code
|
||||
|
||||
|
||||
def test_return_shape():
|
||||
items, line_count, ignored = fetch_code()
|
||||
assert isinstance(items, list)
|
||||
assert line_count == len(items)
|
||||
assert ignored == 0
|
||||
|
||||
|
||||
def test_items_are_tuples():
|
||||
items, _, _ = fetch_code()
|
||||
assert items, "expected at least one code line"
|
||||
for item in items:
|
||||
assert isinstance(item, tuple) and len(item) == 3
|
||||
text, src, ts = item
|
||||
assert isinstance(text, str)
|
||||
assert isinstance(src, str)
|
||||
assert isinstance(ts, str)
|
||||
|
||||
|
||||
def test_blank_and_comment_lines_excluded():
|
||||
items, _, _ = fetch_code()
|
||||
for text, _, _ in items:
|
||||
assert text.strip(), "blank line should have been filtered"
|
||||
assert not text.strip().startswith("#"), "comment line should have been filtered"
|
||||
|
||||
|
||||
def test_module_path_format():
|
||||
items, _, _ = fetch_code()
|
||||
pattern = re.compile(r"^engine\.\w+$")
|
||||
for _, _, ts in items:
|
||||
assert pattern.match(ts), f"unexpected module path: {ts!r}"
|
||||
@@ -1,151 +0,0 @@
|
||||
"""Tests for the FigmentEffect plugin."""
|
||||
|
||||
import os
|
||||
from enum import Enum
|
||||
|
||||
import pytest
|
||||
|
||||
pytest.importorskip("cairosvg", reason="cairosvg requires system Cairo library")
|
||||
|
||||
from effects_plugins.figment import FigmentEffect, FigmentPhase, FigmentState
|
||||
from engine.effects.types import EffectConfig, EffectContext
|
||||
|
||||
FIXTURE_SVG = os.path.join(os.path.dirname(__file__), "fixtures", "test.svg")
|
||||
FIGMENTS_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
|
||||
|
||||
|
||||
class TestFigmentPhase:
|
||||
def test_is_enum(self):
|
||||
assert issubclass(FigmentPhase, Enum)
|
||||
|
||||
def test_has_all_phases(self):
|
||||
assert hasattr(FigmentPhase, "REVEAL")
|
||||
assert hasattr(FigmentPhase, "HOLD")
|
||||
assert hasattr(FigmentPhase, "DISSOLVE")
|
||||
|
||||
|
||||
class TestFigmentState:
|
||||
def test_creation(self):
|
||||
state = FigmentState(
|
||||
phase=FigmentPhase.REVEAL,
|
||||
progress=0.5,
|
||||
rows=["█▀▄", " █ "],
|
||||
gradient=[46, 40, 34, 28, 22, 22, 34, 40, 46, 82, 118, 231],
|
||||
center_row=5,
|
||||
center_col=10,
|
||||
)
|
||||
assert state.phase == FigmentPhase.REVEAL
|
||||
assert state.progress == 0.5
|
||||
assert len(state.rows) == 2
|
||||
|
||||
|
||||
class TestFigmentEffectInit:
|
||||
def test_name(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
assert effect.name == "figment"
|
||||
|
||||
def test_default_config(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
assert effect.config.enabled is False
|
||||
assert effect.config.intensity == 1.0
|
||||
assert effect.config.params["interval_secs"] == 60
|
||||
assert effect.config.params["display_secs"] == 4.5
|
||||
|
||||
def test_process_is_noop(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
buf = ["line1", "line2"]
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
)
|
||||
result = effect.process(buf, ctx)
|
||||
assert result == buf
|
||||
assert result is buf
|
||||
|
||||
def test_configure(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
new_cfg = EffectConfig(enabled=True, intensity=0.5)
|
||||
effect.configure(new_cfg)
|
||||
assert effect.config.enabled is True
|
||||
assert effect.config.intensity == 0.5
|
||||
|
||||
|
||||
class TestFigmentStateMachine:
|
||||
def test_idle_initially(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
effect.config.enabled = True
|
||||
state = effect.get_figment_state(0, 80, 24)
|
||||
# Timer hasn't fired yet, should be None (idle)
|
||||
assert state is None
|
||||
|
||||
def test_trigger_starts_reveal(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
effect.config.enabled = True
|
||||
effect.trigger(80, 24)
|
||||
state = effect.get_figment_state(1, 80, 24)
|
||||
assert state is not None
|
||||
assert state.phase == FigmentPhase.REVEAL
|
||||
|
||||
def test_full_cycle(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
effect.config.enabled = True
|
||||
effect.config.params["display_secs"] = 0.15 # 3 phases x 0.05s
|
||||
|
||||
effect.trigger(40, 20)
|
||||
|
||||
# Advance through reveal (30 frames at 0.05s = 1.5s, but we shrunk it)
|
||||
# With display_secs=0.15, each phase is 0.05s = 1 frame
|
||||
state = effect.get_figment_state(1, 40, 20)
|
||||
assert state is not None
|
||||
assert state.phase == FigmentPhase.REVEAL
|
||||
|
||||
# Advance enough frames to get through all phases
|
||||
for frame in range(2, 100):
|
||||
state = effect.get_figment_state(frame, 40, 20)
|
||||
if state is None:
|
||||
break
|
||||
|
||||
# Should have completed the full cycle back to idle
|
||||
assert state is None
|
||||
|
||||
def test_timer_fires_at_interval(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
effect.config.enabled = True
|
||||
effect.config.params["interval_secs"] = 0.1 # 2 frames at 20fps
|
||||
|
||||
# Frame 0: idle
|
||||
state = effect.get_figment_state(0, 40, 20)
|
||||
assert state is None
|
||||
|
||||
# Advance past interval (0.1s = 2 frames)
|
||||
state = effect.get_figment_state(1, 40, 20)
|
||||
state = effect.get_figment_state(2, 40, 20)
|
||||
state = effect.get_figment_state(3, 40, 20)
|
||||
# Timer should have fired by now
|
||||
assert state is not None
|
||||
|
||||
|
||||
class TestFigmentEdgeCases:
|
||||
def test_empty_figment_dir(self, tmp_path):
|
||||
effect = FigmentEffect(figment_dir=str(tmp_path))
|
||||
effect.config.enabled = True
|
||||
effect.trigger(40, 20)
|
||||
state = effect.get_figment_state(1, 40, 20)
|
||||
# No SVGs available — should stay idle
|
||||
assert state is None
|
||||
|
||||
def test_missing_figment_dir(self):
|
||||
effect = FigmentEffect(figment_dir="/nonexistent/path")
|
||||
effect.config.enabled = True
|
||||
effect.trigger(40, 20)
|
||||
state = effect.get_figment_state(1, 40, 20)
|
||||
assert state is None
|
||||
|
||||
def test_disabled_ignores_trigger(self):
|
||||
effect = FigmentEffect(figment_dir=FIGMENTS_DIR)
|
||||
effect.config.enabled = False
|
||||
effect.trigger(80, 24)
|
||||
state = effect.get_figment_state(1, 80, 24)
|
||||
assert state is None
|
||||
@@ -1,64 +0,0 @@
|
||||
"""Tests for render_figment_overlay in engine.layers."""
|
||||
|
||||
import pytest
|
||||
|
||||
pytest.importorskip("cairosvg", reason="cairosvg requires system Cairo library")
|
||||
|
||||
from effects_plugins.figment import FigmentPhase, FigmentState
|
||||
from engine.layers import render_figment_overlay
|
||||
|
||||
|
||||
def _make_state(phase=FigmentPhase.HOLD, progress=0.5):
|
||||
return FigmentState(
|
||||
phase=phase,
|
||||
progress=progress,
|
||||
rows=["█▀▄ █", " ▄█▀ ", "█ █"],
|
||||
gradient=[46, 40, 34, 28, 22, 22, 34, 40, 46, 82, 118, 231],
|
||||
center_row=10,
|
||||
center_col=37,
|
||||
)
|
||||
|
||||
|
||||
class TestRenderFigmentOverlay:
|
||||
def test_returns_list_of_strings(self):
|
||||
state = _make_state()
|
||||
result = render_figment_overlay(state, 80, 24)
|
||||
assert isinstance(result, list)
|
||||
assert all(isinstance(s, str) for s in result)
|
||||
|
||||
def test_contains_ansi_positioning(self):
|
||||
state = _make_state()
|
||||
result = render_figment_overlay(state, 80, 24)
|
||||
# Should contain cursor positioning escape codes
|
||||
assert any("\033[" in s for s in result)
|
||||
|
||||
def test_reveal_phase_partial(self):
|
||||
state = _make_state(phase=FigmentPhase.REVEAL, progress=0.0)
|
||||
result = render_figment_overlay(state, 80, 24)
|
||||
# At progress 0.0, very few cells should be visible
|
||||
# Result should still be a valid list
|
||||
assert isinstance(result, list)
|
||||
|
||||
def test_hold_phase_full(self):
|
||||
state = _make_state(phase=FigmentPhase.HOLD, progress=0.5)
|
||||
result = render_figment_overlay(state, 80, 24)
|
||||
# During hold, content should be present
|
||||
assert len(result) > 0
|
||||
|
||||
def test_dissolve_phase(self):
|
||||
state = _make_state(phase=FigmentPhase.DISSOLVE, progress=0.9)
|
||||
result = render_figment_overlay(state, 80, 24)
|
||||
# At high dissolve progress, most cells are gone
|
||||
assert isinstance(result, list)
|
||||
|
||||
def test_empty_rows(self):
|
||||
state = FigmentState(
|
||||
phase=FigmentPhase.HOLD,
|
||||
progress=0.5,
|
||||
rows=[],
|
||||
gradient=[46] * 12,
|
||||
center_row=0,
|
||||
center_col=0,
|
||||
)
|
||||
result = render_figment_overlay(state, 80, 24)
|
||||
assert result == []
|
||||
@@ -1,52 +0,0 @@
|
||||
"""Tests for engine.figment_render module."""
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
pytest.importorskip("cairosvg", reason="cairosvg requires system Cairo library")
|
||||
|
||||
from engine.figment_render import rasterize_svg
|
||||
|
||||
FIXTURE_SVG = os.path.join(os.path.dirname(__file__), "fixtures", "test.svg")
|
||||
|
||||
|
||||
class TestRasterizeSvg:
|
||||
def test_returns_list_of_strings(self):
|
||||
rows = rasterize_svg(FIXTURE_SVG, 40, 20)
|
||||
assert isinstance(rows, list)
|
||||
assert all(isinstance(r, str) for r in rows)
|
||||
|
||||
def test_output_height_matches_terminal_height(self):
|
||||
rows = rasterize_svg(FIXTURE_SVG, 40, 20)
|
||||
assert len(rows) == 20
|
||||
|
||||
def test_output_contains_block_characters(self):
|
||||
rows = rasterize_svg(FIXTURE_SVG, 40, 20)
|
||||
all_chars = "".join(rows)
|
||||
block_chars = {"█", "▀", "▄"}
|
||||
assert any(ch in all_chars for ch in block_chars)
|
||||
|
||||
def test_different_sizes_produce_different_output(self):
|
||||
rows_small = rasterize_svg(FIXTURE_SVG, 20, 10)
|
||||
rows_large = rasterize_svg(FIXTURE_SVG, 80, 40)
|
||||
assert len(rows_small) == 10
|
||||
assert len(rows_large) == 40
|
||||
|
||||
def test_nonexistent_file_raises(self):
|
||||
import pytest
|
||||
|
||||
with pytest.raises((FileNotFoundError, OSError)):
|
||||
rasterize_svg("/nonexistent/file.svg", 40, 20)
|
||||
|
||||
|
||||
class TestRasterizeCache:
|
||||
def test_cache_returns_same_result(self):
|
||||
rows1 = rasterize_svg(FIXTURE_SVG, 40, 20)
|
||||
rows2 = rasterize_svg(FIXTURE_SVG, 40, 20)
|
||||
assert rows1 == rows2
|
||||
|
||||
def test_cache_invalidated_by_size_change(self):
|
||||
rows1 = rasterize_svg(FIXTURE_SVG, 40, 20)
|
||||
rows2 = rasterize_svg(FIXTURE_SVG, 60, 30)
|
||||
assert len(rows1) != len(rows2)
|
||||
@@ -1,40 +0,0 @@
|
||||
"""Tests for engine.figment_trigger module."""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from engine.figment_trigger import FigmentAction, FigmentCommand
|
||||
|
||||
|
||||
class TestFigmentAction:
|
||||
def test_is_enum(self):
|
||||
assert issubclass(FigmentAction, Enum)
|
||||
|
||||
def test_has_trigger(self):
|
||||
assert FigmentAction.TRIGGER.value == "trigger"
|
||||
|
||||
def test_has_set_intensity(self):
|
||||
assert FigmentAction.SET_INTENSITY.value == "set_intensity"
|
||||
|
||||
def test_has_set_interval(self):
|
||||
assert FigmentAction.SET_INTERVAL.value == "set_interval"
|
||||
|
||||
def test_has_set_color(self):
|
||||
assert FigmentAction.SET_COLOR.value == "set_color"
|
||||
|
||||
def test_has_stop(self):
|
||||
assert FigmentAction.STOP.value == "stop"
|
||||
|
||||
|
||||
class TestFigmentCommand:
|
||||
def test_trigger_command(self):
|
||||
cmd = FigmentCommand(action=FigmentAction.TRIGGER)
|
||||
assert cmd.action == FigmentAction.TRIGGER
|
||||
assert cmd.value is None
|
||||
|
||||
def test_set_intensity_command(self):
|
||||
cmd = FigmentCommand(action=FigmentAction.SET_INTENSITY, value=0.8)
|
||||
assert cmd.value == 0.8
|
||||
|
||||
def test_set_color_command(self):
|
||||
cmd = FigmentCommand(action=FigmentAction.SET_COLOR, value="orange")
|
||||
assert cmd.value == "orange"
|
||||
195
tests/test_framebuffer_acceptance.py
Normal file
195
tests/test_framebuffer_acceptance.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""Integration test: FrameBufferStage in the pipeline."""
|
||||
|
||||
import queue
|
||||
|
||||
from engine.data_sources.sources import ListDataSource, SourceItem
|
||||
from engine.effects.types import EffectConfig
|
||||
from engine.pipeline import Pipeline, PipelineConfig
|
||||
from engine.pipeline.adapters import (
|
||||
DataSourceStage,
|
||||
DisplayStage,
|
||||
SourceItemsToBufferStage,
|
||||
)
|
||||
from engine.pipeline.core import PipelineContext
|
||||
from engine.pipeline.stages.framebuffer import FrameBufferStage
|
||||
|
||||
|
||||
class QueueDisplay:
|
||||
"""Stub display that captures every frame into a queue."""
|
||||
|
||||
def __init__(self):
|
||||
self.frames: queue.Queue[list[str]] = queue.Queue()
|
||||
self.width = 80
|
||||
self.height = 24
|
||||
self._init_called = False
|
||||
|
||||
def init(self, width: int, height: int, reuse: bool = False) -> None:
|
||||
self.width = width
|
||||
self.height = height
|
||||
self._init_called = True
|
||||
|
||||
def show(self, buffer: list[str], border: bool = False) -> None:
|
||||
self.frames.put(list(buffer))
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
|
||||
def cleanup(self) -> None:
|
||||
pass
|
||||
|
||||
def get_dimensions(self) -> tuple[int, int]:
|
||||
return (self.width, self.height)
|
||||
|
||||
|
||||
def _build_pipeline(
|
||||
items: list[SourceItem],
|
||||
history_depth: int = 5,
|
||||
width: int = 80,
|
||||
height: int = 24,
|
||||
) -> tuple[Pipeline, QueueDisplay, PipelineContext]:
|
||||
"""Build pipeline: source -> render -> framebuffer -> display."""
|
||||
display = QueueDisplay()
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.set("items", items)
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(enable_metrics=True),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
# Source
|
||||
source = ListDataSource(items, name="test-source")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test-source"))
|
||||
|
||||
# Render
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
|
||||
|
||||
# Framebuffer
|
||||
framebuffer = FrameBufferStage(name="default", history_depth=history_depth)
|
||||
pipeline.add_stage("framebuffer", framebuffer)
|
||||
|
||||
# Display
|
||||
pipeline.add_stage("display", DisplayStage(display, name="queue"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
return pipeline, display, ctx
|
||||
|
||||
|
||||
class TestFrameBufferAcceptance:
|
||||
"""Test FrameBufferStage in a full pipeline."""
|
||||
|
||||
def test_framebuffer_populates_history(self):
|
||||
"""After several frames, framebuffer should have history stored."""
|
||||
items = [
|
||||
SourceItem(content="Frame\nBuffer\nTest", source="test", timestamp="0")
|
||||
]
|
||||
pipeline, display, ctx = _build_pipeline(items, history_depth=5)
|
||||
|
||||
# Run 3 frames
|
||||
for i in range(3):
|
||||
result = pipeline.execute([])
|
||||
assert result.success, f"Pipeline failed at frame {i}: {result.error}"
|
||||
|
||||
# Check framebuffer history in context
|
||||
history = ctx.get("framebuffer.default.history")
|
||||
assert history is not None, "Framebuffer history not found in context"
|
||||
assert len(history) == 3, f"Expected 3 history frames, got {len(history)}"
|
||||
|
||||
def test_framebuffer_respects_depth(self):
|
||||
"""Framebuffer should not exceed configured history depth."""
|
||||
items = [SourceItem(content="Depth\nTest", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline(items, history_depth=3)
|
||||
|
||||
# Run 5 frames
|
||||
for i in range(5):
|
||||
result = pipeline.execute([])
|
||||
assert result.success
|
||||
|
||||
history = ctx.get("framebuffer.default.history")
|
||||
assert history is not None
|
||||
assert len(history) == 3, f"Expected depth 3, got {len(history)}"
|
||||
|
||||
def test_framebuffer_current_intensity(self):
|
||||
"""Framebuffer should compute current intensity map."""
|
||||
items = [SourceItem(content="Intensity\nMap", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline(items, history_depth=5)
|
||||
|
||||
# Run at least one frame
|
||||
result = pipeline.execute([])
|
||||
assert result.success
|
||||
|
||||
intensity = ctx.get("framebuffer.default.current_intensity")
|
||||
assert intensity is not None, "No intensity map in context"
|
||||
# Intensity should be a list of one value per line? Actually it's a 2D array or list?
|
||||
# Let's just check it's non-empty
|
||||
assert len(intensity) > 0, "Intensity map is empty"
|
||||
|
||||
def test_framebuffer_get_frame(self):
|
||||
"""Should be able to retrieve specific frames from history."""
|
||||
items = [SourceItem(content="Retrieve\nFrame", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline(items, history_depth=5)
|
||||
|
||||
# Run 2 frames
|
||||
for i in range(2):
|
||||
result = pipeline.execute([])
|
||||
assert result.success
|
||||
|
||||
# Retrieve frame 0 (most recent)
|
||||
recent = pipeline.get_stage("framebuffer").get_frame(0, ctx)
|
||||
assert recent is not None, "Cannot retrieve recent frame"
|
||||
assert len(recent) > 0, "Recent frame is empty"
|
||||
|
||||
# Retrieve frame 1 (previous)
|
||||
previous = pipeline.get_stage("framebuffer").get_frame(1, ctx)
|
||||
assert previous is not None, "Cannot retrieve previous frame"
|
||||
|
||||
def test_framebuffer_with_motionblur_effect(self):
|
||||
"""MotionBlurEffect should work when depending on framebuffer."""
|
||||
from engine.effects.plugins.motionblur import MotionBlurEffect
|
||||
from engine.pipeline.adapters import EffectPluginStage
|
||||
|
||||
items = [SourceItem(content="Motion\nBlur", source="test", timestamp="0")]
|
||||
display = QueueDisplay()
|
||||
ctx = PipelineContext()
|
||||
ctx.set("items", items)
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(enable_metrics=True),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
source = ListDataSource(items, name="test")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test"))
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="render"))
|
||||
|
||||
framebuffer = FrameBufferStage(name="default", history_depth=3)
|
||||
pipeline.add_stage("framebuffer", framebuffer)
|
||||
|
||||
motionblur = MotionBlurEffect()
|
||||
motionblur.configure(EffectConfig(enabled=True, intensity=0.5))
|
||||
pipeline.add_stage(
|
||||
"motionblur",
|
||||
EffectPluginStage(
|
||||
motionblur,
|
||||
name="motionblur",
|
||||
dependencies={"framebuffer.history.default"},
|
||||
),
|
||||
)
|
||||
|
||||
pipeline.add_stage("display", DisplayStage(display, name="queue"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
# Run a few frames
|
||||
for i in range(5):
|
||||
result = pipeline.execute([])
|
||||
assert result.success, f"Motion blur pipeline failed at frame {i}"
|
||||
|
||||
# Check that history exists
|
||||
history = ctx.get("framebuffer.default.history")
|
||||
assert history is not None
|
||||
assert len(history) > 0
|
||||
237
tests/test_framebuffer_stage.py
Normal file
237
tests/test_framebuffer_stage.py
Normal file
@@ -0,0 +1,237 @@
|
||||
"""
|
||||
Tests for FrameBufferStage.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.pipeline.core import DataType, PipelineContext
|
||||
from engine.pipeline.params import PipelineParams
|
||||
from engine.pipeline.stages.framebuffer import FrameBufferConfig, FrameBufferStage
|
||||
|
||||
|
||||
def make_ctx(width: int = 80, height: int = 24) -> PipelineContext:
|
||||
"""Create a PipelineContext for testing."""
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = width
|
||||
params.viewport_height = height
|
||||
ctx.params = params
|
||||
return ctx
|
||||
|
||||
|
||||
class TestFrameBufferStage:
|
||||
"""Tests for FrameBufferStage."""
|
||||
|
||||
def test_init(self):
|
||||
"""FrameBufferStage initializes with default config."""
|
||||
stage = FrameBufferStage()
|
||||
assert stage.name == "framebuffer"
|
||||
assert stage.category == "effect"
|
||||
assert stage.config.history_depth == 2
|
||||
|
||||
def test_capabilities(self):
|
||||
"""Stage provides framebuffer.history.{name} capability."""
|
||||
stage = FrameBufferStage()
|
||||
assert "framebuffer.history.default" in stage.capabilities
|
||||
|
||||
def test_dependencies(self):
|
||||
"""Stage depends on render.output."""
|
||||
stage = FrameBufferStage()
|
||||
assert "render.output" in stage.dependencies
|
||||
|
||||
def test_inlet_outlet_types(self):
|
||||
"""Stage accepts and produces TEXT_BUFFER."""
|
||||
stage = FrameBufferStage()
|
||||
assert DataType.TEXT_BUFFER in stage.inlet_types
|
||||
assert DataType.TEXT_BUFFER in stage.outlet_types
|
||||
|
||||
def test_init_context(self):
|
||||
"""init initializes context state with prefixed keys."""
|
||||
stage = FrameBufferStage()
|
||||
ctx = make_ctx()
|
||||
|
||||
result = stage.init(ctx)
|
||||
|
||||
assert result is True
|
||||
assert ctx.get("framebuffer.default.history") == []
|
||||
assert ctx.get("framebuffer.default.intensity_history") == []
|
||||
|
||||
def test_process_stores_buffer_in_history(self):
|
||||
"""process stores buffer in history."""
|
||||
stage = FrameBufferStage()
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
buffer = ["line1", "line2", "line3"]
|
||||
result = stage.process(buffer, ctx)
|
||||
|
||||
assert result == buffer # Pass-through
|
||||
history = ctx.get("framebuffer.default.history")
|
||||
assert len(history) == 1
|
||||
assert history[0] == buffer
|
||||
|
||||
def test_process_computes_intensity(self):
|
||||
"""process computes intensity map."""
|
||||
stage = FrameBufferStage()
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
buffer = ["hello world", "test line", ""]
|
||||
stage.process(buffer, ctx)
|
||||
|
||||
intensity = ctx.get("framebuffer.default.current_intensity")
|
||||
assert intensity is not None
|
||||
assert len(intensity) == 3 # Three rows
|
||||
# Non-empty lines should have intensity > 0
|
||||
assert intensity[0] > 0
|
||||
assert intensity[1] > 0
|
||||
# Empty line should have intensity 0
|
||||
assert intensity[2] == 0.0
|
||||
|
||||
def test_process_keeps_multiple_frames(self):
|
||||
"""process keeps configured depth of frames."""
|
||||
config = FrameBufferConfig(history_depth=3, name="test")
|
||||
stage = FrameBufferStage(config)
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
# Process several frames
|
||||
for i in range(5):
|
||||
buffer = [f"frame {i}"]
|
||||
stage.process(buffer, ctx)
|
||||
|
||||
history = ctx.get("framebuffer.test.history")
|
||||
assert len(history) == 3 # Only last 3 kept
|
||||
# Should be in reverse chronological order (most recent first)
|
||||
assert history[0] == ["frame 4"]
|
||||
assert history[1] == ["frame 3"]
|
||||
assert history[2] == ["frame 2"]
|
||||
|
||||
def test_process_keeps_intensity_sync(self):
|
||||
"""process keeps intensity history in sync with frame history."""
|
||||
config = FrameBufferConfig(history_depth=3, name="sync")
|
||||
stage = FrameBufferStage(config)
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
buffers = [
|
||||
["a"],
|
||||
["bb"],
|
||||
["ccc"],
|
||||
]
|
||||
for buf in buffers:
|
||||
stage.process(buf, ctx)
|
||||
|
||||
prefix = "framebuffer.sync"
|
||||
frame_hist = ctx.get(f"{prefix}.history")
|
||||
intensity_hist = ctx.get(f"{prefix}.intensity_history")
|
||||
assert len(frame_hist) == len(intensity_hist) == 3
|
||||
|
||||
# Each frame's intensity should match
|
||||
for i, frame in enumerate(frame_hist):
|
||||
computed_intensity = stage._compute_buffer_intensity(frame, len(frame))
|
||||
assert intensity_hist[i] == pytest.approx(computed_intensity)
|
||||
|
||||
def test_get_frame(self):
|
||||
"""get_frame retrieves frames from history by index."""
|
||||
config = FrameBufferConfig(history_depth=3)
|
||||
stage = FrameBufferStage(config)
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
buffers = [["f1"], ["f2"], ["f3"]]
|
||||
for buf in buffers:
|
||||
stage.process(buf, ctx)
|
||||
|
||||
assert stage.get_frame(0, ctx) == ["f3"] # Most recent
|
||||
assert stage.get_frame(1, ctx) == ["f2"]
|
||||
assert stage.get_frame(2, ctx) == ["f1"]
|
||||
assert stage.get_frame(3, ctx) is None # Out of range
|
||||
|
||||
def test_get_intensity(self):
|
||||
"""get_intensity retrieves intensity maps by index."""
|
||||
stage = FrameBufferStage()
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
buffers = [["line"], ["longer line"]]
|
||||
for buf in buffers:
|
||||
stage.process(buf, ctx)
|
||||
|
||||
intensity0 = stage.get_intensity(0, ctx)
|
||||
intensity1 = stage.get_intensity(1, ctx)
|
||||
assert intensity0 is not None
|
||||
assert intensity1 is not None
|
||||
# Longer line should have higher intensity (more non-space chars)
|
||||
assert sum(intensity1) > sum(intensity0)
|
||||
|
||||
def test_compute_buffer_intensity_simple(self):
|
||||
"""_compute_buffer_intensity computes simple density."""
|
||||
stage = FrameBufferStage()
|
||||
|
||||
buf = ["abc", " ", "de"]
|
||||
intensities = stage._compute_buffer_intensity(buf, max_rows=3)
|
||||
|
||||
assert len(intensities) == 3
|
||||
# "abc" -> 3/3 = 1.0
|
||||
assert pytest.approx(intensities[0]) == 1.0
|
||||
# " " -> 0/2 = 0.0
|
||||
assert pytest.approx(intensities[1]) == 0.0
|
||||
# "de" -> 2/2 = 1.0
|
||||
assert pytest.approx(intensities[2]) == 1.0
|
||||
|
||||
def test_compute_buffer_intensity_with_ansi(self):
|
||||
"""_compute_buffer_intensity strips ANSI codes."""
|
||||
stage = FrameBufferStage()
|
||||
|
||||
# Line with ANSI color codes
|
||||
buf = ["\033[31mred\033[0m", "normal"]
|
||||
intensities = stage._compute_buffer_intensity(buf, max_rows=2)
|
||||
|
||||
assert len(intensities) == 2
|
||||
# Should treat "red" as 3 non-space chars
|
||||
assert pytest.approx(intensities[0]) == 1.0 # "red" = 3/3
|
||||
assert pytest.approx(intensities[1]) == 1.0 # "normal" = 6/6
|
||||
|
||||
def test_compute_buffer_intensity_padding(self):
|
||||
"""_compute_buffer_intensity pads to max_rows."""
|
||||
stage = FrameBufferStage()
|
||||
|
||||
buf = ["short"]
|
||||
intensities = stage._compute_buffer_intensity(buf, max_rows=5)
|
||||
|
||||
assert len(intensities) == 5
|
||||
assert intensities[0] > 0
|
||||
assert all(i == 0.0 for i in intensities[1:])
|
||||
|
||||
def test_thread_safety(self):
|
||||
"""process is thread-safe."""
|
||||
from threading import Thread
|
||||
|
||||
stage = FrameBufferStage(name="threadtest")
|
||||
ctx = make_ctx()
|
||||
stage.init(ctx)
|
||||
|
||||
results = []
|
||||
|
||||
def worker(idx):
|
||||
buffer = [f"thread {idx}"]
|
||||
stage.process(buffer, ctx)
|
||||
results.append(len(ctx.get("framebuffer.threadtest.history", [])))
|
||||
|
||||
threads = [Thread(target=worker, args=(i,)) for i in range(10)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
# All threads should see consistent state
|
||||
assert len(ctx.get("framebuffer.threadtest.history")) <= 2 # Depth limit
|
||||
# All worker threads should have completed without errors
|
||||
assert len(results) == 10
|
||||
|
||||
def test_cleanup(self):
|
||||
"""cleanup does nothing but can be called."""
|
||||
stage = FrameBufferStage()
|
||||
# Should not raise
|
||||
stage.cleanup()
|
||||
240
tests/test_glitch_effect.py
Normal file
240
tests/test_glitch_effect.py
Normal file
@@ -0,0 +1,240 @@
|
||||
"""
|
||||
Tests for Glitch effect - regression tests for stability issues.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.display import NullDisplay
|
||||
from engine.effects.types import EffectConfig, EffectContext
|
||||
|
||||
|
||||
def strip_ansi(s: str) -> str:
|
||||
"""Remove ANSI escape sequences from string."""
|
||||
return re.sub(r"\x1b\[[0-9;]*[a-zA-Z]", "", s)
|
||||
|
||||
|
||||
class TestGlitchEffectStability:
|
||||
"""Regression tests for Glitch effect stability."""
|
||||
|
||||
@pytest.fixture
|
||||
def effect_context(self):
|
||||
"""Create a consistent effect context for testing."""
|
||||
return EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
frame_number=0,
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def stable_buffer(self):
|
||||
"""Create a stable buffer for testing."""
|
||||
return ["line" + str(i).zfill(2) + " " * 60 for i in range(24)]
|
||||
|
||||
def test_glitch_preserves_line_count(self, effect_context, stable_buffer):
|
||||
"""Glitch should not change the number of lines in buffer."""
|
||||
from engine.effects.plugins.glitch import GlitchEffect
|
||||
|
||||
effect = GlitchEffect()
|
||||
result = effect.process(stable_buffer, effect_context)
|
||||
|
||||
assert len(result) == len(stable_buffer), (
|
||||
f"Line count changed from {len(stable_buffer)} to {len(result)}"
|
||||
)
|
||||
|
||||
def test_glitch_preserves_line_lengths(self, effect_context, stable_buffer):
|
||||
"""Glitch should not change individual line lengths - prevents viewport jumping.
|
||||
|
||||
Note: Effects may add ANSI color codes, so we check VISIBLE length (stripped).
|
||||
"""
|
||||
from engine.effects.plugins.glitch import GlitchEffect
|
||||
|
||||
effect = GlitchEffect()
|
||||
|
||||
# Run multiple times to catch randomness
|
||||
for _ in range(10):
|
||||
result = effect.process(stable_buffer, effect_context)
|
||||
for i, (orig, new) in enumerate(zip(stable_buffer, result, strict=False)):
|
||||
visible_new = strip_ansi(new)
|
||||
assert len(visible_new) == len(orig), (
|
||||
f"Line {i} visible length changed from {len(orig)} to {len(visible_new)}"
|
||||
)
|
||||
|
||||
def test_glitch_no_cursor_positioning(self, effect_context, stable_buffer):
|
||||
"""Glitch should not use cursor positioning escape sequences.
|
||||
|
||||
Regression test: Previously glitch used \\033[{row};1H which caused
|
||||
conflicts with HUD and border rendering.
|
||||
"""
|
||||
from engine.effects.plugins.glitch import GlitchEffect
|
||||
|
||||
effect = GlitchEffect()
|
||||
result = effect.process(stable_buffer, effect_context)
|
||||
|
||||
# Check no cursor positioning in output
|
||||
cursor_pos_pattern = re.compile(r"\033\[[0-9]+;[0-9]+H")
|
||||
for i, line in enumerate(result):
|
||||
match = cursor_pos_pattern.search(line)
|
||||
assert match is None, (
|
||||
f"Line {i} contains cursor positioning: {repr(line[:50])}"
|
||||
)
|
||||
|
||||
def test_glitch_output_deterministic_given_seed(
|
||||
self, effect_context, stable_buffer
|
||||
):
|
||||
"""Glitch output should be deterministic given the same random seed."""
|
||||
from engine.effects.plugins.glitch import GlitchEffect
|
||||
|
||||
effect = GlitchEffect()
|
||||
effect.config = EffectConfig(enabled=True, intensity=1.0)
|
||||
|
||||
# With fixed random state, should get same result
|
||||
import random
|
||||
|
||||
random.seed(42)
|
||||
result1 = effect.process(stable_buffer, effect_context)
|
||||
|
||||
random.seed(42)
|
||||
result2 = effect.process(stable_buffer, effect_context)
|
||||
|
||||
assert result1 == result2, (
|
||||
"Glitch should be deterministic with fixed random seed"
|
||||
)
|
||||
|
||||
|
||||
class TestEffectViewportStability:
|
||||
"""Tests to catch effects that cause viewport instability."""
|
||||
|
||||
def test_null_display_stable_without_effects(self):
|
||||
"""NullDisplay should produce identical output without effects."""
|
||||
display = NullDisplay()
|
||||
display.init(80, 24)
|
||||
|
||||
buffer = ["test line " + "x" * 60 for _ in range(24)]
|
||||
|
||||
display.show(buffer)
|
||||
output1 = display._last_buffer
|
||||
|
||||
display.show(buffer)
|
||||
output2 = display._last_buffer
|
||||
|
||||
assert output1 == output2, (
|
||||
"NullDisplay output should be identical for identical inputs"
|
||||
)
|
||||
|
||||
def test_effect_chain_preserves_dimensions(self):
|
||||
"""Effect chain should preserve buffer dimensions."""
|
||||
from engine.effects.plugins.fade import FadeEffect
|
||||
from engine.effects.plugins.glitch import GlitchEffect
|
||||
from engine.effects.plugins.noise import NoiseEffect
|
||||
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
)
|
||||
|
||||
buffer = ["x" * 80 for _ in range(24)]
|
||||
original_len = len(buffer)
|
||||
original_widths = [len(line) for line in buffer]
|
||||
|
||||
effects = [NoiseEffect(), FadeEffect(), GlitchEffect()]
|
||||
|
||||
for effect in effects:
|
||||
buffer = effect.process(buffer, ctx)
|
||||
|
||||
# Check dimensions preserved (check VISIBLE length, not raw)
|
||||
# Effects may add ANSI codes which increase raw length but not visible width
|
||||
assert len(buffer) == original_len, (
|
||||
f"{effect.name} changed line count from {original_len} to {len(buffer)}"
|
||||
)
|
||||
for i, (orig_w, new_line) in enumerate(
|
||||
zip(original_widths, buffer, strict=False)
|
||||
):
|
||||
visible_len = len(strip_ansi(new_line))
|
||||
assert visible_len == orig_w, (
|
||||
f"{effect.name} changed line {i} visible width from {orig_w} to {visible_len}"
|
||||
)
|
||||
|
||||
|
||||
class TestEffectTestMatrix:
|
||||
"""Effect test matrix - test each effect for stability."""
|
||||
|
||||
@pytest.fixture
|
||||
def effect_names(self):
|
||||
"""List of all effect names to test."""
|
||||
return ["noise", "fade", "glitch", "firehose", "border"]
|
||||
|
||||
@pytest.fixture
|
||||
def stable_input_buffer(self):
|
||||
"""A predictable buffer for testing."""
|
||||
return [f"row{i:02d}" + " " * 70 for i in range(24)]
|
||||
|
||||
@pytest.mark.parametrize("effect_name", ["noise", "fade", "glitch"])
|
||||
def test_effect_preserves_buffer_dimensions(self, effect_name, stable_input_buffer):
|
||||
"""Each effect should preserve input buffer dimensions."""
|
||||
try:
|
||||
if effect_name == "border":
|
||||
# Border is handled differently
|
||||
pytest.skip("Border handled by display")
|
||||
else:
|
||||
effect_module = __import__(
|
||||
f"engine.effects.plugins.{effect_name}",
|
||||
fromlist=[f"{effect_name.title()}Effect"],
|
||||
)
|
||||
effect_class = getattr(effect_module, f"{effect_name.title()}Effect")
|
||||
effect = effect_class()
|
||||
except ImportError:
|
||||
pytest.skip(f"Effect {effect_name} not available")
|
||||
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
)
|
||||
|
||||
result = effect.process(stable_input_buffer, ctx)
|
||||
|
||||
# Check dimensions preserved (check VISIBLE length)
|
||||
assert len(result) == len(stable_input_buffer), (
|
||||
f"{effect_name} changed line count"
|
||||
)
|
||||
for i, (orig, new) in enumerate(zip(stable_input_buffer, result, strict=False)):
|
||||
visible_new = strip_ansi(new)
|
||||
assert len(visible_new) == len(orig), (
|
||||
f"{effect_name} changed line {i} visible length from {len(orig)} to {len(visible_new)}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("effect_name", ["noise", "fade", "glitch"])
|
||||
def test_effect_no_cursor_positioning(self, effect_name, stable_input_buffer):
|
||||
"""Effects should not use cursor positioning (causes display conflicts)."""
|
||||
try:
|
||||
effect_module = __import__(
|
||||
f"engine.effects.plugins.{effect_name}",
|
||||
fromlist=[f"{effect_name.title()}Effect"],
|
||||
)
|
||||
effect_class = getattr(effect_module, f"{effect_name.title()}Effect")
|
||||
effect = effect_class()
|
||||
except ImportError:
|
||||
pytest.skip(f"Effect {effect_name} not available")
|
||||
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
)
|
||||
|
||||
result = effect.process(stable_input_buffer, ctx)
|
||||
|
||||
cursor_pos_pattern = re.compile(r"\033\[[0-9]+;[0-9]+H")
|
||||
for i, line in enumerate(result):
|
||||
match = cursor_pos_pattern.search(line)
|
||||
assert match is None, (
|
||||
f"{effect_name} uses cursor positioning on line {i}: {repr(line[:50])}"
|
||||
)
|
||||
106
tests/test_hud.py
Normal file
106
tests/test_hud.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from engine.effects.performance import PerformanceMonitor, set_monitor
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
|
||||
def test_hud_effect_adds_hud_lines():
|
||||
"""Test that HUD effect adds HUD lines to the buffer."""
|
||||
from engine.effects.plugins.hud import HudEffect
|
||||
|
||||
set_monitor(PerformanceMonitor())
|
||||
|
||||
hud = HudEffect()
|
||||
hud.config.params["display_effect"] = "noise"
|
||||
hud.config.params["display_intensity"] = 0.5
|
||||
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=24,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
items=[],
|
||||
)
|
||||
|
||||
buf = [
|
||||
"A" * 80,
|
||||
"B" * 80,
|
||||
"C" * 80,
|
||||
]
|
||||
|
||||
result = hud.process(buf, ctx)
|
||||
|
||||
assert len(result) >= 3, f"Expected at least 3 lines, got {len(result)}"
|
||||
|
||||
first_line = result[0]
|
||||
assert "MAINLINE DEMO" in first_line, (
|
||||
f"HUD not found in first line: {first_line[:50]}"
|
||||
)
|
||||
|
||||
second_line = result[1]
|
||||
assert "EFFECT:" in second_line, f"Effect line not found: {second_line[:50]}"
|
||||
|
||||
print("First line:", result[0])
|
||||
print("Second line:", result[1])
|
||||
if len(result) > 2:
|
||||
print("Third line:", result[2])
|
||||
|
||||
|
||||
def test_hud_effect_shows_current_effect():
|
||||
"""Test that HUD displays the correct effect name."""
|
||||
from engine.effects.plugins.hud import HudEffect
|
||||
|
||||
set_monitor(PerformanceMonitor())
|
||||
|
||||
hud = HudEffect()
|
||||
hud.config.params["display_effect"] = "fade"
|
||||
hud.config.params["display_intensity"] = 0.75
|
||||
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=24,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
items=[],
|
||||
)
|
||||
|
||||
buf = ["X" * 80]
|
||||
result = hud.process(buf, ctx)
|
||||
|
||||
second_line = result[1]
|
||||
assert "fade" in second_line, f"Effect name 'fade' not found in: {second_line}"
|
||||
|
||||
|
||||
def test_hud_effect_shows_intensity():
|
||||
"""Test that HUD displays intensity percentage."""
|
||||
from engine.effects.plugins.hud import HudEffect
|
||||
|
||||
set_monitor(PerformanceMonitor())
|
||||
|
||||
hud = HudEffect()
|
||||
hud.config.params["display_effect"] = "glitch"
|
||||
hud.config.params["display_intensity"] = 0.8
|
||||
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=24,
|
||||
mic_excess=0.0,
|
||||
grad_offset=0.0,
|
||||
frame_number=0,
|
||||
has_message=False,
|
||||
items=[],
|
||||
)
|
||||
|
||||
buf = ["Y" * 80]
|
||||
result = hud.process(buf, ctx)
|
||||
|
||||
second_line = result[1]
|
||||
assert "80%" in second_line, f"Intensity 80% not found in: {second_line}"
|
||||
@@ -1,96 +0,0 @@
|
||||
"""
|
||||
Tests for engine.layers module.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from engine import layers
|
||||
|
||||
|
||||
class TestRenderMessageOverlay:
|
||||
"""Tests for render_message_overlay function."""
|
||||
|
||||
def test_no_message_returns_empty(self):
|
||||
"""Returns empty list when msg is None."""
|
||||
result, cache = layers.render_message_overlay(None, 80, 24, (None, None))
|
||||
assert result == []
|
||||
assert cache[0] is None
|
||||
|
||||
def test_message_returns_overlay_lines(self):
|
||||
"""Returns non-empty list when message is present."""
|
||||
msg = ("Test Title", "Test Body", time.monotonic())
|
||||
result, cache = layers.render_message_overlay(msg, 80, 24, (None, None))
|
||||
assert len(result) > 0
|
||||
assert cache[0] is not None
|
||||
|
||||
def test_cache_key_changes_with_text(self):
|
||||
"""Cache key changes when message text changes."""
|
||||
msg1 = ("Title1", "Body1", time.monotonic())
|
||||
msg2 = ("Title2", "Body2", time.monotonic())
|
||||
|
||||
_, cache1 = layers.render_message_overlay(msg1, 80, 24, (None, None))
|
||||
_, cache2 = layers.render_message_overlay(msg2, 80, 24, cache1)
|
||||
|
||||
assert cache1[0] != cache2[0]
|
||||
|
||||
def test_cache_reuse_avoids_recomputation(self):
|
||||
"""Cache is returned when same message is passed (interface test)."""
|
||||
msg = ("Same Title", "Same Body", time.monotonic())
|
||||
|
||||
result1, cache1 = layers.render_message_overlay(msg, 80, 24, (None, None))
|
||||
result2, cache2 = layers.render_message_overlay(msg, 80, 24, cache1)
|
||||
|
||||
assert len(result1) > 0
|
||||
assert len(result2) > 0
|
||||
assert cache1[0] == cache2[0]
|
||||
|
||||
|
||||
class TestRenderFirehose:
|
||||
"""Tests for render_firehose function."""
|
||||
|
||||
def test_no_firehose_returns_empty(self):
|
||||
"""Returns empty list when firehose height is 0."""
|
||||
items = [("Headline", "Source", "12:00")]
|
||||
result = layers.render_firehose(items, 80, 0, 24)
|
||||
assert result == []
|
||||
|
||||
def test_firehose_returns_lines(self):
|
||||
"""Returns lines when firehose height > 0."""
|
||||
items = [("Headline", "Source", "12:00")]
|
||||
result = layers.render_firehose(items, 80, 4, 24)
|
||||
assert len(result) == 4
|
||||
|
||||
def test_firehose_includes_ansi_escapes(self):
|
||||
"""Returns lines containing ANSI escape sequences."""
|
||||
items = [("Headline", "Source", "12:00")]
|
||||
result = layers.render_firehose(items, 80, 1, 24)
|
||||
assert "\033[" in result[0]
|
||||
|
||||
|
||||
class TestApplyGlitch:
|
||||
"""Tests for apply_glitch function."""
|
||||
|
||||
def test_empty_buffer_unchanged(self):
|
||||
"""Empty buffer is returned unchanged."""
|
||||
result = layers.apply_glitch([], 0, 0.0, 80)
|
||||
assert result == []
|
||||
|
||||
def test_buffer_length_preserved(self):
|
||||
"""Buffer length is preserved after glitch application."""
|
||||
buf = [f"\033[{i + 1};1Htest\033[K" for i in range(10)]
|
||||
result = layers.apply_glitch(buf, 0, 0.5, 80)
|
||||
assert len(result) == len(buf)
|
||||
|
||||
|
||||
class TestRenderTickerZone:
|
||||
"""Tests for render_ticker_zone function - focusing on interface."""
|
||||
|
||||
def test_returns_list(self):
|
||||
"""Returns a list of strings."""
|
||||
result, cache = layers.render_ticker_zone([], 0, 10, 80, {}, 0.0)
|
||||
assert isinstance(result, list)
|
||||
|
||||
def test_returns_dict_for_cache(self):
|
||||
"""Returns a dict for the noise cache."""
|
||||
result, cache = layers.render_ticker_zone([], 0, 10, 80, {}, 0.0)
|
||||
assert isinstance(cache, dict)
|
||||
@@ -1,149 +0,0 @@
|
||||
"""
|
||||
Tests for engine.mic module.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from unittest.mock import patch
|
||||
|
||||
from engine.events import MicLevelEvent
|
||||
|
||||
|
||||
class TestMicMonitorImport:
|
||||
"""Tests for module import behavior."""
|
||||
|
||||
def test_mic_monitor_imports_without_error(self):
|
||||
"""MicMonitor can be imported even without sounddevice."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
assert MicMonitor is not None
|
||||
|
||||
|
||||
class TestMicMonitorInit:
|
||||
"""Tests for MicMonitor initialization."""
|
||||
|
||||
def test_init_sets_threshold(self):
|
||||
"""Threshold is set correctly."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor(threshold_db=60)
|
||||
assert monitor.threshold_db == 60
|
||||
|
||||
def test_init_defaults(self):
|
||||
"""Default values are set correctly."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
assert monitor.threshold_db == 50
|
||||
|
||||
def test_init_db_starts_at_negative(self):
|
||||
"""_db starts at negative value."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
assert monitor.db == -99.0
|
||||
|
||||
|
||||
class TestMicMonitorProperties:
|
||||
"""Tests for MicMonitor properties."""
|
||||
|
||||
def test_excess_returns_positive_when_above_threshold(self):
|
||||
"""excess returns positive value when above threshold."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor(threshold_db=50)
|
||||
with patch.object(monitor, "_db", 60.0):
|
||||
assert monitor.excess == 10.0
|
||||
|
||||
def test_excess_returns_zero_when_below_threshold(self):
|
||||
"""excess returns zero when below threshold."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor(threshold_db=50)
|
||||
with patch.object(monitor, "_db", 40.0):
|
||||
assert monitor.excess == 0.0
|
||||
|
||||
|
||||
class TestMicMonitorAvailable:
|
||||
"""Tests for MicMonitor.available property."""
|
||||
|
||||
def test_available_is_bool(self):
|
||||
"""available returns a boolean."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
assert isinstance(monitor.available, bool)
|
||||
|
||||
|
||||
class TestMicMonitorStop:
|
||||
"""Tests for MicMonitor.stop method."""
|
||||
|
||||
def test_stop_does_nothing_when_no_stream(self):
|
||||
"""stop() does nothing if no stream exists."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
monitor.stop()
|
||||
assert monitor._stream is None
|
||||
|
||||
|
||||
class TestMicMonitorEventEmission:
|
||||
"""Tests for MicMonitor event emission."""
|
||||
|
||||
def test_subscribe_adds_callback(self):
|
||||
"""subscribe() adds a callback."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
def callback(e):
|
||||
return None
|
||||
|
||||
monitor.subscribe(callback)
|
||||
|
||||
assert callback in monitor._subscribers
|
||||
|
||||
def test_unsubscribe_removes_callback(self):
|
||||
"""unsubscribe() removes a callback."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
def callback(e):
|
||||
return None
|
||||
monitor.subscribe(callback)
|
||||
|
||||
monitor.unsubscribe(callback)
|
||||
|
||||
assert callback not in monitor._subscribers
|
||||
|
||||
def test_emit_calls_subscribers(self):
|
||||
"""_emit() calls all subscribers."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
received = []
|
||||
|
||||
def callback(event):
|
||||
received.append(event)
|
||||
|
||||
monitor.subscribe(callback)
|
||||
event = MicLevelEvent(
|
||||
db_level=60.0, excess_above_threshold=10.0, timestamp=datetime.now()
|
||||
)
|
||||
monitor._emit(event)
|
||||
|
||||
assert len(received) == 1
|
||||
assert received[0].db_level == 60.0
|
||||
|
||||
def test_emit_handles_subscriber_exception(self):
|
||||
"""_emit() handles exceptions in subscribers gracefully."""
|
||||
from engine.mic import MicMonitor
|
||||
|
||||
monitor = MicMonitor()
|
||||
|
||||
def bad_callback(event):
|
||||
raise RuntimeError("test")
|
||||
|
||||
monitor.subscribe(bad_callback)
|
||||
event = MicLevelEvent(
|
||||
db_level=60.0, excess_above_threshold=10.0, timestamp=datetime.now()
|
||||
)
|
||||
monitor._emit(event)
|
||||
131
tests/test_ntfy_integration.py
Normal file
131
tests/test_ntfy_integration.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""
|
||||
Integration tests for ntfy topics.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import urllib.request
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.ntfy
|
||||
class TestNtfyTopics:
|
||||
def test_cc_cmd_topic_exists_and_writable(self):
|
||||
"""Verify C&C CMD topic exists and accepts messages."""
|
||||
from engine.config import NTFY_CC_CMD_TOPIC
|
||||
|
||||
topic_url = NTFY_CC_CMD_TOPIC.replace("/json", "")
|
||||
test_message = f"test_{int(time.time())}"
|
||||
|
||||
req = urllib.request.Request(
|
||||
topic_url,
|
||||
data=test_message.encode("utf-8"),
|
||||
headers={
|
||||
"User-Agent": "mainline-test/0.1",
|
||||
"Content-Type": "text/plain",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
assert resp.status == 200
|
||||
except Exception as e:
|
||||
raise AssertionError(f"Failed to write to C&C CMD topic: {e}") from e
|
||||
|
||||
def test_cc_resp_topic_exists_and_writable(self):
|
||||
"""Verify C&C RESP topic exists and accepts messages."""
|
||||
from engine.config import NTFY_CC_RESP_TOPIC
|
||||
|
||||
topic_url = NTFY_CC_RESP_TOPIC.replace("/json", "")
|
||||
test_message = f"test_{int(time.time())}"
|
||||
|
||||
req = urllib.request.Request(
|
||||
topic_url,
|
||||
data=test_message.encode("utf-8"),
|
||||
headers={
|
||||
"User-Agent": "mainline-test/0.1",
|
||||
"Content-Type": "text/plain",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
assert resp.status == 200
|
||||
except Exception as e:
|
||||
raise AssertionError(f"Failed to write to C&C RESP topic: {e}") from e
|
||||
|
||||
def test_message_topic_exists_and_writable(self):
|
||||
"""Verify message topic exists and accepts messages."""
|
||||
from engine.config import NTFY_TOPIC
|
||||
|
||||
topic_url = NTFY_TOPIC.replace("/json", "")
|
||||
test_message = f"test_{int(time.time())}"
|
||||
|
||||
req = urllib.request.Request(
|
||||
topic_url,
|
||||
data=test_message.encode("utf-8"),
|
||||
headers={
|
||||
"User-Agent": "mainline-test/0.1",
|
||||
"Content-Type": "text/plain",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
assert resp.status == 200
|
||||
except Exception as e:
|
||||
raise AssertionError(f"Failed to write to message topic: {e}") from e
|
||||
|
||||
def test_cc_cmd_topic_readable(self):
|
||||
"""Verify we can read messages from C&C CMD topic."""
|
||||
from engine.config import NTFY_CC_CMD_TOPIC
|
||||
|
||||
test_message = f"integration_test_{int(time.time())}"
|
||||
topic_url = NTFY_CC_CMD_TOPIC.replace("/json", "")
|
||||
|
||||
req = urllib.request.Request(
|
||||
topic_url,
|
||||
data=test_message.encode("utf-8"),
|
||||
headers={
|
||||
"User-Agent": "mainline-test/0.1",
|
||||
"Content-Type": "text/plain",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
urllib.request.urlopen(req, timeout=10)
|
||||
except Exception as e:
|
||||
raise AssertionError(f"Failed to write to C&C CMD topic: {e}") from e
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
poll_url = f"{NTFY_CC_CMD_TOPIC}?poll=1&limit=1"
|
||||
req = urllib.request.Request(
|
||||
poll_url,
|
||||
headers={"User-Agent": "mainline-test/0.1"},
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
body = resp.read().decode("utf-8")
|
||||
if body.strip():
|
||||
data = json.loads(body.split("\n")[0])
|
||||
assert isinstance(data, dict)
|
||||
except Exception as e:
|
||||
raise AssertionError(f"Failed to read from C&C CMD topic: {e}") from e
|
||||
|
||||
def test_topics_are_different(self):
|
||||
"""Verify C&C CMD/RESP and message topics are different."""
|
||||
from engine.config import NTFY_CC_CMD_TOPIC, NTFY_CC_RESP_TOPIC, NTFY_TOPIC
|
||||
|
||||
assert NTFY_CC_CMD_TOPIC != NTFY_TOPIC
|
||||
assert NTFY_CC_RESP_TOPIC != NTFY_TOPIC
|
||||
assert NTFY_CC_CMD_TOPIC != NTFY_CC_RESP_TOPIC
|
||||
assert "_cc_cmd" in NTFY_CC_CMD_TOPIC
|
||||
assert "_cc_resp" in NTFY_CC_RESP_TOPIC
|
||||
185
tests/test_performance_regression.py
Normal file
185
tests/test_performance_regression.py
Normal file
@@ -0,0 +1,185 @@
|
||||
"""Performance regression tests for pipeline stages with realistic data volumes.
|
||||
|
||||
These tests verify that the pipeline maintains performance with large datasets
|
||||
by ensuring ViewportFilterStage prevents FontStage from rendering excessive items.
|
||||
|
||||
Uses pytest-benchmark for statistical benchmarking with automatic regression detection.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.data_sources.sources import SourceItem
|
||||
from engine.pipeline.adapters import FontStage, ViewportFilterStage
|
||||
from engine.pipeline.core import PipelineContext
|
||||
from engine.pipeline.params import PipelineParams
|
||||
|
||||
|
||||
class TestViewportFilterPerformance:
|
||||
"""Test ViewportFilterStage performance with realistic data volumes."""
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_filter_2000_items_to_viewport(self, benchmark):
|
||||
"""Benchmark: Filter 2000 items to viewport size.
|
||||
|
||||
Performance threshold: Must complete in < 1ms per iteration
|
||||
This tests the filtering overhead is negligible.
|
||||
"""
|
||||
# Create 2000 test items (more than real headline sources)
|
||||
test_items = [
|
||||
SourceItem(f"Headline {i}", f"source-{i % 10}", str(i)) for i in range(2000)
|
||||
]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams(viewport_height=24)
|
||||
|
||||
result = benchmark(stage.process, test_items, ctx)
|
||||
|
||||
# Verify result is correct - viewport filter takes first N items
|
||||
assert len(result) <= 24 # viewport height
|
||||
assert len(result) > 0
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_font_stage_with_filtered_items(self, benchmark):
|
||||
"""Benchmark: FontStage rendering filtered (5) items.
|
||||
|
||||
Performance threshold: Must complete in < 50ms per iteration
|
||||
This tests that filtering saves significant time by reducing FontStage work.
|
||||
"""
|
||||
# Create filtered items (what ViewportFilterStage outputs)
|
||||
filtered_items = [
|
||||
SourceItem(f"Headline {i}", "source", str(i))
|
||||
for i in range(5) # Filtered count
|
||||
]
|
||||
|
||||
font_stage = FontStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams()
|
||||
|
||||
result = benchmark(font_stage.process, filtered_items, ctx)
|
||||
|
||||
# Should render successfully
|
||||
assert result is not None
|
||||
assert isinstance(result, list)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_filter_reduces_work_by_288x(self):
|
||||
"""Verify ViewportFilterStage achieves expected performance improvement.
|
||||
|
||||
With 1438 items and 24-line viewport:
|
||||
- Without filter: FontStage renders all 1438 items
|
||||
- With filter: FontStage renders ~4 items (height-based)
|
||||
- Expected improvement: 1438 / 4 ≈ 360x
|
||||
"""
|
||||
test_items = [
|
||||
SourceItem(f"Headline {i}", "source", str(i)) for i in range(1438)
|
||||
]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams(viewport_height=24)
|
||||
|
||||
filtered = stage.process(test_items, ctx)
|
||||
improvement_factor = len(test_items) / len(filtered)
|
||||
|
||||
# Verify we get significant improvement (height-based filtering)
|
||||
assert 300 < improvement_factor < 500
|
||||
# Verify filtered count is ~4 (24 viewport / 6 rows per item)
|
||||
assert len(filtered) == 4
|
||||
|
||||
|
||||
class TestPipelinePerformanceWithRealData:
|
||||
"""Integration tests for full pipeline performance with large datasets."""
|
||||
|
||||
def test_pipeline_handles_large_item_count(self):
|
||||
"""Test that pipeline doesn't hang with 2000+ items due to filtering."""
|
||||
# Create large dataset
|
||||
large_items = [
|
||||
SourceItem(f"Headline {i}", f"source-{i % 5}", str(i)) for i in range(2000)
|
||||
]
|
||||
|
||||
filter_stage = ViewportFilterStage()
|
||||
font_stage = FontStage()
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams(viewport_height=24)
|
||||
|
||||
# Filter should reduce items quickly
|
||||
filtered = filter_stage.process(large_items, ctx)
|
||||
assert len(filtered) < len(large_items)
|
||||
|
||||
# FontStage should process filtered items quickly
|
||||
rendered = font_stage.process(filtered, ctx)
|
||||
assert rendered is not None
|
||||
|
||||
def test_multiple_viewports_filter_correctly(self):
|
||||
"""Test that filter respects different viewport configurations."""
|
||||
large_items = [
|
||||
SourceItem(f"Headline {i}", "source", str(i)) for i in range(1000)
|
||||
]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
|
||||
# Test different viewport heights
|
||||
test_cases = [
|
||||
(12, 12), # 12px height -> 12 items
|
||||
(24, 24), # 24px height -> 24 items
|
||||
(48, 48), # 48px height -> 48 items
|
||||
]
|
||||
|
||||
for viewport_height, expected_max_items in test_cases:
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams(viewport_height=viewport_height)
|
||||
|
||||
filtered = stage.process(large_items, ctx)
|
||||
|
||||
# Verify filtering is proportional to viewport
|
||||
assert len(filtered) <= expected_max_items + 1
|
||||
assert len(filtered) > 0
|
||||
|
||||
|
||||
class TestPerformanceRegressions:
|
||||
"""Tests that catch common performance regressions."""
|
||||
|
||||
def test_filter_doesnt_render_all_items(self):
|
||||
"""Regression test: Ensure filter doesn't accidentally render all items.
|
||||
|
||||
This would indicate that ViewportFilterStage is broken or bypassed.
|
||||
"""
|
||||
large_items = [
|
||||
SourceItem(f"Headline {i}", "source", str(i)) for i in range(1438)
|
||||
]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams()
|
||||
|
||||
filtered = stage.process(large_items, ctx)
|
||||
|
||||
# Should NOT have all items (regression detection)
|
||||
assert len(filtered) != len(large_items)
|
||||
# With height-based filtering, ~4 items fit in 24-row viewport (6 rows/item)
|
||||
assert len(filtered) == 4
|
||||
|
||||
def test_font_stage_doesnt_hang_with_filter(self):
|
||||
"""Regression test: FontStage shouldn't hang when receiving filtered data.
|
||||
|
||||
Previously, FontStage would render all items, causing 10+ second hangs.
|
||||
Now it should receive only ~5 items and complete quickly.
|
||||
"""
|
||||
# Simulate what happens after ViewportFilterStage
|
||||
filtered_items = [
|
||||
SourceItem(f"Headline {i}", "source", str(i))
|
||||
for i in range(5) # What filter outputs
|
||||
]
|
||||
|
||||
font_stage = FontStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = PipelineParams()
|
||||
|
||||
# Should complete instantly (not hang)
|
||||
result = font_stage.process(filtered_items, ctx)
|
||||
|
||||
# Verify it actually worked
|
||||
assert result is not None
|
||||
assert isinstance(result, list)
|
||||
1844
tests/test_pipeline.py
Normal file
1844
tests/test_pipeline.py
Normal file
File diff suppressed because it is too large
Load Diff
552
tests/test_pipeline_e2e.py
Normal file
552
tests/test_pipeline_e2e.py
Normal file
@@ -0,0 +1,552 @@
|
||||
"""
|
||||
End-to-end pipeline integration tests.
|
||||
|
||||
Verifies that data actually flows through every pipeline stage
|
||||
(source -> render -> effects -> display) using a queue-backed
|
||||
stub display to capture output frames.
|
||||
|
||||
These tests catch dead-code paths and wiring bugs that unit tests miss.
|
||||
"""
|
||||
|
||||
import queue
|
||||
from unittest.mock import patch
|
||||
|
||||
from engine.data_sources.sources import ListDataSource, SourceItem
|
||||
from engine.effects import EffectContext
|
||||
from engine.effects.types import EffectPlugin
|
||||
from engine.pipeline import Pipeline, PipelineConfig
|
||||
from engine.pipeline.adapters import (
|
||||
DataSourceStage,
|
||||
DisplayStage,
|
||||
EffectPluginStage,
|
||||
FontStage,
|
||||
SourceItemsToBufferStage,
|
||||
ViewportFilterStage,
|
||||
)
|
||||
from engine.pipeline.core import PipelineContext
|
||||
from engine.pipeline.params import PipelineParams
|
||||
|
||||
# ─── FIXTURES ────────────────────────────────────────────
|
||||
|
||||
|
||||
class QueueDisplay:
|
||||
"""Stub display that captures every frame into a queue.
|
||||
|
||||
Acts as a FIFO sink so tests can inspect exactly what
|
||||
the pipeline produced without any terminal or network I/O.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.frames: queue.Queue[list[str]] = queue.Queue()
|
||||
self.width = 80
|
||||
self.height = 24
|
||||
self._init_called = False
|
||||
|
||||
def init(self, width: int, height: int, reuse: bool = False) -> None:
|
||||
self.width = width
|
||||
self.height = height
|
||||
self._init_called = True
|
||||
|
||||
def show(self, buffer: list[str], border: bool = False) -> None:
|
||||
# Deep copy to prevent later mutations
|
||||
self.frames.put(list(buffer))
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
|
||||
def cleanup(self) -> None:
|
||||
pass
|
||||
|
||||
def get_dimensions(self) -> tuple[int, int]:
|
||||
return (self.width, self.height)
|
||||
|
||||
|
||||
class MarkerEffect(EffectPlugin):
|
||||
"""Effect that prepends a marker line to prove it ran.
|
||||
|
||||
Each MarkerEffect adds a unique tag so tests can verify
|
||||
which effects executed and in what order.
|
||||
"""
|
||||
|
||||
def __init__(self, tag: str = "MARKER"):
|
||||
self._tag = tag
|
||||
self.call_count = 0
|
||||
super().__init__()
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return f"marker-{self._tag}"
|
||||
|
||||
def configure(self, config: dict) -> None:
|
||||
pass
|
||||
|
||||
def process(self, buffer: list[str], ctx: EffectContext) -> list[str]:
|
||||
self.call_count += 1
|
||||
if buffer is None:
|
||||
return [f"[{self._tag}:EMPTY]"]
|
||||
return [f"[{self._tag}]"] + list(buffer)
|
||||
|
||||
|
||||
# ─── HELPERS ─────────────────────────────────────────────
|
||||
|
||||
|
||||
def _build_pipeline(
|
||||
items: list,
|
||||
effects: list[tuple[str, EffectPlugin]] | None = None,
|
||||
use_font_stage: bool = False,
|
||||
width: int = 80,
|
||||
height: int = 24,
|
||||
) -> tuple[Pipeline, QueueDisplay, PipelineContext]:
|
||||
"""Build a fully-wired pipeline with a QueueDisplay sink.
|
||||
|
||||
Args:
|
||||
items: Content items to feed into the source.
|
||||
effects: Optional list of (name, EffectPlugin) to add.
|
||||
use_font_stage: Use FontStage instead of SourceItemsToBufferStage.
|
||||
width: Viewport width.
|
||||
height: Viewport height.
|
||||
|
||||
Returns:
|
||||
(pipeline, queue_display, context) tuple.
|
||||
"""
|
||||
display = QueueDisplay()
|
||||
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = width
|
||||
params.viewport_height = height
|
||||
params.frame_number = 0
|
||||
ctx.params = params
|
||||
ctx.set("items", items)
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(enable_metrics=True),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
# Source stage
|
||||
source = ListDataSource(items, name="test-source")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test-source"))
|
||||
|
||||
# Render stage
|
||||
if use_font_stage:
|
||||
# FontStage requires viewport_filter stage which requires camera state
|
||||
from engine.camera import Camera
|
||||
from engine.pipeline.adapters import CameraClockStage, CameraStage
|
||||
|
||||
camera = Camera.scroll(speed=0.0)
|
||||
camera.set_canvas_size(200, 200)
|
||||
|
||||
# CameraClockStage updates camera state, must come before viewport_filter
|
||||
pipeline.add_stage(
|
||||
"camera_update", CameraClockStage(camera, name="camera-clock")
|
||||
)
|
||||
|
||||
# ViewportFilterStage requires camera.state
|
||||
pipeline.add_stage(
|
||||
"viewport_filter", ViewportFilterStage(name="viewport-filter")
|
||||
)
|
||||
|
||||
# FontStage converts items to buffer
|
||||
pipeline.add_stage("render", FontStage(name="font"))
|
||||
|
||||
# CameraStage applies viewport transformation to rendered buffer
|
||||
pipeline.add_stage("camera", CameraStage(camera, name="static"))
|
||||
else:
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
|
||||
|
||||
# Effect stages
|
||||
if effects:
|
||||
for effect_name, effect_plugin in effects:
|
||||
pipeline.add_stage(
|
||||
f"effect_{effect_name}",
|
||||
EffectPluginStage(effect_plugin, name=effect_name),
|
||||
)
|
||||
|
||||
# Display stage
|
||||
pipeline.add_stage("display", DisplayStage(display, name="queue"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
return pipeline, display, ctx
|
||||
|
||||
|
||||
# ─── TESTS: HAPPY PATH ──────────────────────────────────
|
||||
|
||||
|
||||
class TestPipelineE2EHappyPath:
|
||||
"""End-to-end: data flows source -> render -> display."""
|
||||
|
||||
def test_items_reach_display(self):
|
||||
"""Content items fed to source must appear in the display output."""
|
||||
items = [
|
||||
SourceItem(content="Hello World", source="test", timestamp="now"),
|
||||
SourceItem(content="Second Item", source="test", timestamp="now"),
|
||||
]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success, f"Pipeline failed: {result.error}"
|
||||
frame = display.frames.get(timeout=1)
|
||||
text = "\n".join(frame)
|
||||
assert "Hello World" in text
|
||||
assert "Second Item" in text
|
||||
|
||||
def test_pipeline_output_is_list_of_strings(self):
|
||||
"""Display must receive list[str], not raw SourceItems."""
|
||||
items = [SourceItem(content="Line one", source="s", timestamp="t")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
assert isinstance(frame, list)
|
||||
for line in frame:
|
||||
assert isinstance(line, str), f"Expected str, got {type(line)}: {line!r}"
|
||||
|
||||
def test_multiline_items_are_split(self):
|
||||
"""Items with newlines should be split into individual buffer lines."""
|
||||
items = [
|
||||
SourceItem(content="Line A\nLine B\nLine C", source="s", timestamp="t")
|
||||
]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
# Camera stage pads lines to viewport width, so check for substring match
|
||||
assert any("Line A" in line for line in frame)
|
||||
assert any("Line B" in line for line in frame)
|
||||
assert any("Line C" in line for line in frame)
|
||||
|
||||
def test_empty_source_produces_empty_buffer(self):
|
||||
"""An empty source should produce an empty (or blank) frame."""
|
||||
items = []
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
assert isinstance(frame, list)
|
||||
|
||||
def test_multiple_frames_are_independent(self):
|
||||
"""Each execute() call should produce a distinct frame."""
|
||||
items = [SourceItem(content="frame-content", source="s", timestamp="t")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
pipeline.execute(items)
|
||||
pipeline.execute(items)
|
||||
|
||||
f1 = display.frames.get(timeout=1)
|
||||
f2 = display.frames.get(timeout=1)
|
||||
assert f1 == f2 # Same input => same output
|
||||
assert display.frames.empty() # Exactly 2 frames
|
||||
|
||||
|
||||
# ─── TESTS: EFFECTS IN THE PIPELINE ─────────────────────
|
||||
|
||||
|
||||
class TestPipelineE2EEffects:
|
||||
"""End-to-end: effects process the buffer between render and display."""
|
||||
|
||||
def test_single_effect_modifies_output(self):
|
||||
"""A single effect should visibly modify the output frame."""
|
||||
items = [SourceItem(content="Original", source="s", timestamp="t")]
|
||||
marker = MarkerEffect("FX1")
|
||||
pipeline, display, ctx = _build_pipeline(items, effects=[("marker", marker)])
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
# Camera stage pads lines to viewport width, so check for substring match
|
||||
assert any("[FX1]" in line for line in frame), (
|
||||
f"Marker not found in frame: {frame}"
|
||||
)
|
||||
assert "Original" in "\n".join(frame)
|
||||
|
||||
def test_effect_chain_ordering(self):
|
||||
"""Multiple effects execute in the order they were added."""
|
||||
items = [SourceItem(content="data", source="s", timestamp="t")]
|
||||
fx_a = MarkerEffect("A")
|
||||
fx_b = MarkerEffect("B")
|
||||
pipeline, display, ctx = _build_pipeline(
|
||||
items, effects=[("alpha", fx_a), ("beta", fx_b)]
|
||||
)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
text = "\n".join(frame)
|
||||
# B runs after A, so B's marker is prepended last => appears first
|
||||
idx_a = text.index("[A]")
|
||||
idx_b = text.index("[B]")
|
||||
assert idx_b < idx_a, f"Expected [B] before [A], got: {frame}"
|
||||
|
||||
def test_effect_receives_list_of_strings(self):
|
||||
"""Effects must receive list[str] from the render stage."""
|
||||
items = [SourceItem(content="check-type", source="s", timestamp="t")]
|
||||
received_types = []
|
||||
|
||||
class TypeCheckEffect(EffectPlugin):
|
||||
@property
|
||||
def name(self):
|
||||
return "typecheck"
|
||||
|
||||
def configure(self, config):
|
||||
pass
|
||||
|
||||
def process(self, buffer, ctx):
|
||||
received_types.append(type(buffer).__name__)
|
||||
if isinstance(buffer, list):
|
||||
for item in buffer:
|
||||
received_types.append(type(item).__name__)
|
||||
return buffer
|
||||
|
||||
pipeline, display, ctx = _build_pipeline(
|
||||
items, effects=[("typecheck", TypeCheckEffect())]
|
||||
)
|
||||
|
||||
pipeline.execute(items)
|
||||
|
||||
assert received_types[0] == "list", f"Buffer type: {received_types[0]}"
|
||||
# All elements should be strings
|
||||
for t in received_types[1:]:
|
||||
assert t == "str", f"Buffer element type: {t}"
|
||||
|
||||
def test_disabled_effect_is_skipped(self):
|
||||
"""A disabled effect should not process data."""
|
||||
items = [SourceItem(content="data", source="s", timestamp="t")]
|
||||
marker = MarkerEffect("DISABLED")
|
||||
pipeline, display, ctx = _build_pipeline(
|
||||
items, effects=[("disabled-fx", marker)]
|
||||
)
|
||||
|
||||
# Disable the effect stage
|
||||
stage = pipeline.get_stage("effect_disabled-fx")
|
||||
stage.set_enabled(False)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
assert "[DISABLED]" not in frame, "Disabled effect should not run"
|
||||
assert marker.call_count == 0
|
||||
|
||||
|
||||
# ─── TESTS: STAGE EXECUTION ORDER & METRICS ─────────────
|
||||
|
||||
|
||||
class TestPipelineE2EStageOrder:
|
||||
"""Verify all stages execute and metrics are collected."""
|
||||
|
||||
def test_all_stages_appear_in_execution_order(self):
|
||||
"""Pipeline build must include source, render, and display."""
|
||||
items = [SourceItem(content="x", source="s", timestamp="t")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
order = pipeline.execution_order
|
||||
assert "source" in order
|
||||
assert "render" in order
|
||||
assert "display" in order
|
||||
|
||||
def test_execution_order_is_source_render_display(self):
|
||||
"""Source must come before render, render before display."""
|
||||
items = [SourceItem(content="x", source="s", timestamp="t")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
order = pipeline.execution_order
|
||||
assert order.index("source") < order.index("render")
|
||||
assert order.index("render") < order.index("display")
|
||||
|
||||
def test_effects_between_render_and_display(self):
|
||||
"""Effects must execute after render and before display."""
|
||||
items = [SourceItem(content="x", source="s", timestamp="t")]
|
||||
marker = MarkerEffect("MID")
|
||||
pipeline, display, ctx = _build_pipeline(items, effects=[("mid", marker)])
|
||||
|
||||
order = pipeline.execution_order
|
||||
render_idx = order.index("render")
|
||||
display_idx = order.index("display")
|
||||
effect_idx = order.index("effect_mid")
|
||||
assert render_idx < effect_idx < display_idx
|
||||
|
||||
def test_metrics_collected_for_all_stages(self):
|
||||
"""After execution, metrics should exist for every active stage."""
|
||||
items = [SourceItem(content="x", source="s", timestamp="t")]
|
||||
marker = MarkerEffect("M")
|
||||
pipeline, display, ctx = _build_pipeline(items, effects=[("m", marker)])
|
||||
|
||||
pipeline.execute(items)
|
||||
|
||||
summary = pipeline.get_metrics_summary()
|
||||
assert "stages" in summary
|
||||
stage_names = set(summary["stages"].keys())
|
||||
# All regular (non-overlay) stages should have metrics
|
||||
assert "source" in stage_names
|
||||
assert "render" in stage_names
|
||||
assert "queue" in stage_names # Display stage is named "queue" in the test
|
||||
assert "effect_m" in stage_names
|
||||
|
||||
|
||||
# ─── TESTS: FONT STAGE DATAFLOW ─────────────────────────
|
||||
|
||||
|
||||
class TestFontStageDataflow:
|
||||
"""Verify FontStage correctly renders content through make_block.
|
||||
|
||||
These tests expose the tuple-unpacking bug in FontStage.process()
|
||||
where make_block returns (lines, color, meta_idx) but the code
|
||||
does result.extend(block) instead of result.extend(block[0]).
|
||||
"""
|
||||
|
||||
def test_font_stage_unpacks_make_block_correctly(self):
|
||||
"""FontStage must produce list[str] output, not mixed types."""
|
||||
items = [
|
||||
SourceItem(content="Test Headline", source="test-src", timestamp="12345")
|
||||
]
|
||||
|
||||
# Mock make_block to return its documented signature
|
||||
mock_lines = [" RENDERED LINE 1", " RENDERED LINE 2", "", " meta info"]
|
||||
mock_return = (mock_lines, "\033[38;5;46m", 3)
|
||||
|
||||
with patch("engine.render.make_block", return_value=mock_return):
|
||||
pipeline, display, ctx = _build_pipeline(items, use_font_stage=True)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success, f"Pipeline failed: {result.error}"
|
||||
frame = display.frames.get(timeout=1)
|
||||
|
||||
# Every element in the frame must be a string
|
||||
for i, line in enumerate(frame):
|
||||
assert isinstance(line, str), (
|
||||
f"Frame line {i} is {type(line).__name__}: {line!r} "
|
||||
f"(FontStage likely extended with raw tuple)"
|
||||
)
|
||||
|
||||
def test_font_stage_output_contains_rendered_content(self):
|
||||
"""FontStage output should contain the rendered lines, not color codes."""
|
||||
items = [SourceItem(content="My Headline", source="src", timestamp="0")]
|
||||
|
||||
mock_lines = [" BIG BLOCK TEXT", " MORE TEXT", "", " ░ src · 0"]
|
||||
mock_return = (mock_lines, "\033[38;5;46m", 3)
|
||||
|
||||
with patch("engine.render.make_block", return_value=mock_return):
|
||||
pipeline, display, ctx = _build_pipeline(items, use_font_stage=True)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
text = "\n".join(frame)
|
||||
assert "BIG BLOCK TEXT" in text
|
||||
assert "MORE TEXT" in text
|
||||
|
||||
def test_font_stage_does_not_leak_color_codes_as_lines(self):
|
||||
"""The ANSI color code from make_block must NOT appear as a frame line."""
|
||||
items = [SourceItem(content="Headline", source="s", timestamp="0")]
|
||||
|
||||
color_code = "\033[38;5;46m"
|
||||
mock_return = ([" rendered"], color_code, 0)
|
||||
|
||||
with patch("engine.render.make_block", return_value=mock_return):
|
||||
pipeline, display, ctx = _build_pipeline(items, use_font_stage=True)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
# The color code itself should not be a standalone line
|
||||
assert color_code not in frame, (
|
||||
f"Color code leaked as a frame line: {frame}"
|
||||
)
|
||||
# The meta_row_index (int) should not be a line either
|
||||
for line in frame:
|
||||
assert not isinstance(line, int), f"Integer leaked into frame: {line}"
|
||||
|
||||
def test_font_stage_handles_multiple_items(self):
|
||||
"""FontStage should render each item through make_block."""
|
||||
items = [
|
||||
SourceItem(content="First", source="a", timestamp="1"),
|
||||
SourceItem(content="Second", source="b", timestamp="2"),
|
||||
]
|
||||
|
||||
call_count = 0
|
||||
|
||||
def mock_make_block(title, src, ts, w):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return ([f" [{title}]"], "\033[0m", 0)
|
||||
|
||||
with patch("engine.render.make_block", side_effect=mock_make_block):
|
||||
pipeline, display, ctx = _build_pipeline(items, use_font_stage=True)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
assert call_count == 2, f"make_block called {call_count} times, expected 2"
|
||||
frame = display.frames.get(timeout=1)
|
||||
text = "\n".join(frame)
|
||||
assert "[First]" in text
|
||||
assert "[Second]" in text
|
||||
|
||||
|
||||
# ─── TESTS: MIRROR OF app.py ASSEMBLY ───────────────────
|
||||
|
||||
|
||||
class TestAppPipelineAssembly:
|
||||
"""Verify the pipeline as assembled by app.py works end-to-end.
|
||||
|
||||
This mirrors how run_pipeline_mode() builds the pipeline but
|
||||
without any network or terminal dependencies.
|
||||
"""
|
||||
|
||||
def test_demo_preset_pipeline_produces_output(self):
|
||||
"""Simulates the 'demo' preset pipeline with stub data."""
|
||||
# Simulate what app.py does for the demo preset
|
||||
items = [
|
||||
("Breaking: Test passes", "UnitTest", "1234567890"),
|
||||
("Update: Coverage improves", "CI", "1234567891"),
|
||||
]
|
||||
|
||||
display = QueueDisplay()
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = 80
|
||||
params.viewport_height = 24
|
||||
params.frame_number = 0
|
||||
ctx.params = params
|
||||
ctx.set("items", items)
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(enable_metrics=True),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
# Mirror app.py: ListDataSource -> SourceItemsToBufferStage -> display
|
||||
source = ListDataSource(items, name="headlines")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="headlines"))
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
|
||||
pipeline.add_stage("display", DisplayStage(display, name="queue"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success, f"Pipeline failed: {result.error}"
|
||||
assert not display.frames.empty(), "Display received no frames"
|
||||
|
||||
frame = display.frames.get(timeout=1)
|
||||
assert isinstance(frame, list)
|
||||
assert len(frame) > 0
|
||||
# All lines must be strings
|
||||
for line in frame:
|
||||
assert isinstance(line, str)
|
||||
171
tests/test_pipeline_introspection.py
Normal file
171
tests/test_pipeline_introspection.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""
|
||||
Tests for PipelineIntrospectionSource.
|
||||
"""
|
||||
|
||||
from engine.data_sources.pipeline_introspection import PipelineIntrospectionSource
|
||||
|
||||
|
||||
class TestPipelineIntrospectionSource:
|
||||
"""Tests for PipelineIntrospectionSource."""
|
||||
|
||||
def test_basic_init(self):
|
||||
"""Source initializes with defaults."""
|
||||
source = PipelineIntrospectionSource()
|
||||
assert source.name == "pipeline-inspect"
|
||||
assert source.is_dynamic is True
|
||||
assert source.frame == 0
|
||||
assert source.ready is False
|
||||
|
||||
def test_init_with_params(self):
|
||||
"""Source initializes with custom params."""
|
||||
source = PipelineIntrospectionSource(viewport_width=100, viewport_height=40)
|
||||
assert source.viewport_width == 100
|
||||
assert source.viewport_height == 40
|
||||
|
||||
def test_inlet_outlet_types(self):
|
||||
"""Source has correct inlet/outlet types."""
|
||||
source = PipelineIntrospectionSource()
|
||||
from engine.pipeline.core import DataType
|
||||
|
||||
assert DataType.NONE in source.inlet_types
|
||||
assert DataType.SOURCE_ITEMS in source.outlet_types
|
||||
|
||||
def test_fetch_returns_items(self):
|
||||
"""fetch() returns SourceItem list."""
|
||||
source = PipelineIntrospectionSource()
|
||||
items = source.fetch()
|
||||
assert len(items) == 1
|
||||
assert items[0].source == "pipeline-inspect"
|
||||
|
||||
def test_fetch_increments_frame(self):
|
||||
"""fetch() increments frame counter when ready."""
|
||||
source = PipelineIntrospectionSource()
|
||||
assert source.frame == 0
|
||||
|
||||
# Set pipeline first to make source ready
|
||||
class MockPipeline:
|
||||
stages = {}
|
||||
execution_order = []
|
||||
|
||||
def get_metrics_summary(self):
|
||||
return {"avg_ms": 10.0, "fps": 60, "stages": {}}
|
||||
|
||||
def get_frame_times(self):
|
||||
return [10.0, 12.0, 11.0]
|
||||
|
||||
source.set_pipeline(MockPipeline())
|
||||
assert source.ready is True
|
||||
|
||||
source.fetch()
|
||||
assert source.frame == 1
|
||||
source.fetch()
|
||||
assert source.frame == 2
|
||||
|
||||
def test_get_items(self):
|
||||
"""get_items() returns list of SourceItems."""
|
||||
source = PipelineIntrospectionSource()
|
||||
items = source.get_items()
|
||||
assert isinstance(items, list)
|
||||
assert len(items) > 0
|
||||
assert items[0].source == "pipeline-inspect"
|
||||
|
||||
def test_set_pipeline(self):
|
||||
"""set_pipeline() marks source as ready."""
|
||||
source = PipelineIntrospectionSource()
|
||||
assert source.ready is False
|
||||
|
||||
class MockPipeline:
|
||||
stages = {}
|
||||
execution_order = []
|
||||
|
||||
def get_metrics_summary(self):
|
||||
return {"avg_ms": 10.0, "fps": 60, "stages": {}}
|
||||
|
||||
def get_frame_times(self):
|
||||
return [10.0, 12.0, 11.0]
|
||||
|
||||
source.set_pipeline(MockPipeline())
|
||||
assert source.ready is True
|
||||
|
||||
|
||||
class TestPipelineIntrospectionRender:
|
||||
"""Tests for rendering methods."""
|
||||
|
||||
def test_render_header_no_pipeline(self):
|
||||
"""_render_header returns default when no pipeline."""
|
||||
source = PipelineIntrospectionSource()
|
||||
lines = source._render_header()
|
||||
assert len(lines) == 1
|
||||
assert "PIPELINE INTROSPECTION" in lines[0]
|
||||
|
||||
def test_render_bar(self):
|
||||
"""_render_bar creates correct bar."""
|
||||
source = PipelineIntrospectionSource()
|
||||
bar = source._render_bar(50, 10)
|
||||
assert len(bar) == 10
|
||||
assert bar.count("█") == 5
|
||||
assert bar.count("░") == 5
|
||||
|
||||
def test_render_bar_zero(self):
|
||||
"""_render_bar handles zero percentage."""
|
||||
source = PipelineIntrospectionSource()
|
||||
bar = source._render_bar(0, 10)
|
||||
assert bar == "░" * 10
|
||||
|
||||
def test_render_bar_full(self):
|
||||
"""_render_bar handles 100%."""
|
||||
source = PipelineIntrospectionSource()
|
||||
bar = source._render_bar(100, 10)
|
||||
assert bar == "█" * 10
|
||||
|
||||
def test_render_sparkline(self):
|
||||
"""_render_sparkline creates sparkline."""
|
||||
source = PipelineIntrospectionSource()
|
||||
values = [1.0, 2.0, 3.0, 4.0, 5.0]
|
||||
sparkline = source._render_sparkline(values, 10)
|
||||
assert len(sparkline) == 10
|
||||
|
||||
def test_render_sparkline_empty(self):
|
||||
"""_render_sparkline handles empty values."""
|
||||
source = PipelineIntrospectionSource()
|
||||
sparkline = source._render_sparkline([], 10)
|
||||
assert sparkline == " " * 10
|
||||
|
||||
def test_render_footer_no_pipeline(self):
|
||||
"""_render_footer shows collecting data when no pipeline."""
|
||||
source = PipelineIntrospectionSource()
|
||||
lines = source._render_footer()
|
||||
assert len(lines) >= 2
|
||||
|
||||
|
||||
class TestPipelineIntrospectionFull:
|
||||
"""Integration tests."""
|
||||
|
||||
def test_render_empty(self):
|
||||
"""_render works when not ready."""
|
||||
source = PipelineIntrospectionSource()
|
||||
lines = source._render()
|
||||
assert len(lines) > 0
|
||||
assert "PIPELINE INTROSPECTION" in lines[0]
|
||||
|
||||
def test_render_with_mock_pipeline(self):
|
||||
"""_render works with mock pipeline."""
|
||||
source = PipelineIntrospectionSource()
|
||||
|
||||
class MockStage:
|
||||
category = "source"
|
||||
name = "test"
|
||||
|
||||
class MockPipeline:
|
||||
stages = {"test": MockStage()}
|
||||
execution_order = ["test"]
|
||||
|
||||
def get_metrics_summary(self):
|
||||
return {"stages": {"test": {"avg_ms": 1.5}}, "avg_ms": 2.0, "fps": 60}
|
||||
|
||||
def get_frame_times(self):
|
||||
return [1.0, 2.0, 3.0]
|
||||
|
||||
source.set_pipeline(MockPipeline())
|
||||
lines = source._render()
|
||||
assert len(lines) > 0
|
||||
167
tests/test_pipeline_introspection_demo.py
Normal file
167
tests/test_pipeline_introspection_demo.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
Tests for PipelineIntrospectionDemo.
|
||||
"""
|
||||
|
||||
from engine.pipeline.pipeline_introspection_demo import (
|
||||
DemoConfig,
|
||||
DemoPhase,
|
||||
PhaseState,
|
||||
PipelineIntrospectionDemo,
|
||||
)
|
||||
|
||||
|
||||
class MockPipeline:
|
||||
"""Mock pipeline for testing."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MockEffectConfig:
|
||||
"""Mock effect config."""
|
||||
|
||||
def __init__(self):
|
||||
self.enabled = False
|
||||
self.intensity = 0.5
|
||||
|
||||
|
||||
class MockEffect:
|
||||
"""Mock effect for testing."""
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.config = MockEffectConfig()
|
||||
|
||||
|
||||
class MockRegistry:
|
||||
"""Mock effect registry."""
|
||||
|
||||
def __init__(self, effects):
|
||||
self._effects = {e.name: e for e in effects}
|
||||
|
||||
def get(self, name):
|
||||
return self._effects.get(name)
|
||||
|
||||
|
||||
class TestDemoPhase:
|
||||
"""Tests for DemoPhase enum."""
|
||||
|
||||
def test_phases_exist(self):
|
||||
"""All three phases exist."""
|
||||
assert DemoPhase.PHASE_1_TOGGLE is not None
|
||||
assert DemoPhase.PHASE_2_LFO is not None
|
||||
assert DemoPhase.PHASE_3_SHARED_LFO is not None
|
||||
|
||||
|
||||
class TestDemoConfig:
|
||||
"""Tests for DemoConfig."""
|
||||
|
||||
def test_defaults(self):
|
||||
"""Default config has sensible values."""
|
||||
config = DemoConfig()
|
||||
assert config.effect_cycle_duration == 3.0
|
||||
assert config.gap_duration == 1.0
|
||||
assert config.lfo_duration == 4.0
|
||||
assert config.phase_2_effect_duration == 4.0
|
||||
assert config.phase_3_lfo_duration == 6.0
|
||||
|
||||
|
||||
class TestPhaseState:
|
||||
"""Tests for PhaseState."""
|
||||
|
||||
def test_defaults(self):
|
||||
"""PhaseState initializes correctly."""
|
||||
state = PhaseState(phase=DemoPhase.PHASE_1_TOGGLE, start_time=0.0)
|
||||
assert state.phase == DemoPhase.PHASE_1_TOGGLE
|
||||
assert state.start_time == 0.0
|
||||
assert state.current_effect_index == 0
|
||||
|
||||
|
||||
class TestPipelineIntrospectionDemo:
|
||||
"""Tests for PipelineIntrospectionDemo."""
|
||||
|
||||
def test_basic_init(self):
|
||||
"""Demo initializes with defaults."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
assert demo.phase == DemoPhase.PHASE_1_TOGGLE
|
||||
assert demo.effect_names == ["noise", "fade", "glitch", "firehose"]
|
||||
|
||||
def test_init_with_custom_effects(self):
|
||||
"""Demo initializes with custom effects."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None, effect_names=["noise", "fade"])
|
||||
assert demo.effect_names == ["noise", "fade"]
|
||||
|
||||
def test_phase_display(self):
|
||||
"""phase_display returns correct string."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
assert "Phase 1" in demo.phase_display
|
||||
|
||||
def test_shared_oscillator_created(self):
|
||||
"""Shared oscillator is created."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
assert demo.shared_oscillator is not None
|
||||
assert demo.shared_oscillator.name == "demo-lfo"
|
||||
|
||||
|
||||
class TestPipelineIntrospectionDemoUpdate:
|
||||
"""Tests for update method."""
|
||||
|
||||
def test_update_returns_dict(self):
|
||||
"""update() returns a dict with expected keys."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
result = demo.update()
|
||||
assert "phase" in result
|
||||
assert "phase_display" in result
|
||||
assert "effect_states" in result
|
||||
|
||||
def test_update_phase_1_structure(self):
|
||||
"""Phase 1 has correct structure."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
result = demo.update()
|
||||
assert result["phase"] == "PHASE_1_TOGGLE"
|
||||
assert "current_effect" in result
|
||||
|
||||
def test_effect_states_structure(self):
|
||||
"""effect_states has correct structure."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
result = demo.update()
|
||||
states = result["effect_states"]
|
||||
for name in demo.effect_names:
|
||||
assert name in states
|
||||
assert "enabled" in states[name]
|
||||
assert "intensity" in states[name]
|
||||
|
||||
|
||||
class TestPipelineIntrospectionDemoPhases:
|
||||
"""Tests for phase transitions."""
|
||||
|
||||
def test_phase_1_initial(self):
|
||||
"""Starts in phase 1."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
assert demo.phase == DemoPhase.PHASE_1_TOGGLE
|
||||
|
||||
def test_shared_oscillator_not_started_initially(self):
|
||||
"""Shared oscillator not started in phase 1."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
assert demo.shared_oscillator is not None
|
||||
# The oscillator.start() is called when transitioning to phase 3
|
||||
|
||||
|
||||
class TestPipelineIntrospectionDemoCleanup:
|
||||
"""Tests for cleanup method."""
|
||||
|
||||
def test_cleanup_no_error(self):
|
||||
"""cleanup() runs without error."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
demo.cleanup() # Should not raise
|
||||
|
||||
def test_cleanup_resets_effects(self):
|
||||
"""cleanup() resets effects."""
|
||||
demo = PipelineIntrospectionDemo(pipeline=None)
|
||||
demo._apply_effect_states(
|
||||
{
|
||||
"noise": {"enabled": True, "intensity": 1.0},
|
||||
"fade": {"enabled": True, "intensity": 1.0},
|
||||
}
|
||||
)
|
||||
demo.cleanup()
|
||||
# If we had a mock registry, we could verify effects were reset
|
||||
113
tests/test_pipeline_metrics_sensor.py
Normal file
113
tests/test_pipeline_metrics_sensor.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""
|
||||
Tests for PipelineMetricsSensor.
|
||||
"""
|
||||
|
||||
from engine.sensors.pipeline_metrics import PipelineMetricsSensor
|
||||
|
||||
|
||||
class MockPipeline:
|
||||
"""Mock pipeline for testing."""
|
||||
|
||||
def __init__(self, metrics=None):
|
||||
self._metrics = metrics or {}
|
||||
|
||||
def get_metrics_summary(self):
|
||||
return self._metrics
|
||||
|
||||
|
||||
class TestPipelineMetricsSensor:
|
||||
"""Tests for PipelineMetricsSensor."""
|
||||
|
||||
def test_basic_init(self):
|
||||
"""Sensor initializes with defaults."""
|
||||
sensor = PipelineMetricsSensor()
|
||||
assert sensor.name == "pipeline"
|
||||
assert sensor.available is False
|
||||
|
||||
def test_init_with_pipeline(self):
|
||||
"""Sensor initializes with pipeline."""
|
||||
mock = MockPipeline()
|
||||
sensor = PipelineMetricsSensor(mock)
|
||||
assert sensor.available is True
|
||||
|
||||
def test_set_pipeline(self):
|
||||
"""set_pipeline() updates pipeline."""
|
||||
sensor = PipelineMetricsSensor()
|
||||
assert sensor.available is False
|
||||
sensor.set_pipeline(MockPipeline())
|
||||
assert sensor.available is True
|
||||
|
||||
def test_read_no_pipeline(self):
|
||||
"""read() returns None when no pipeline."""
|
||||
sensor = PipelineMetricsSensor()
|
||||
assert sensor.read() is None
|
||||
|
||||
def test_read_with_metrics(self):
|
||||
"""read() returns sensor value with metrics."""
|
||||
mock = MockPipeline(
|
||||
{
|
||||
"total_ms": 18.5,
|
||||
"fps": 54.0,
|
||||
"avg_ms": 18.5,
|
||||
"min_ms": 15.0,
|
||||
"max_ms": 22.0,
|
||||
"stages": {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}},
|
||||
}
|
||||
)
|
||||
sensor = PipelineMetricsSensor(mock)
|
||||
val = sensor.read()
|
||||
assert val is not None
|
||||
assert val.sensor_name == "pipeline"
|
||||
assert val.value == 18.5
|
||||
|
||||
def test_read_with_error(self):
|
||||
"""read() returns None when metrics have error."""
|
||||
mock = MockPipeline({"error": "No metrics collected"})
|
||||
sensor = PipelineMetricsSensor(mock)
|
||||
assert sensor.read() is None
|
||||
|
||||
def test_get_stage_timing(self):
|
||||
"""get_stage_timing() returns stage timing."""
|
||||
mock = MockPipeline(
|
||||
{
|
||||
"stages": {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}},
|
||||
}
|
||||
)
|
||||
sensor = PipelineMetricsSensor(mock)
|
||||
assert sensor.get_stage_timing("render") == 12.0
|
||||
assert sensor.get_stage_timing("noise") == 3.0
|
||||
assert sensor.get_stage_timing("nonexistent") == 0.0
|
||||
|
||||
def test_get_stage_timing_no_pipeline(self):
|
||||
"""get_stage_timing() returns 0 when no pipeline."""
|
||||
sensor = PipelineMetricsSensor()
|
||||
assert sensor.get_stage_timing("test") == 0.0
|
||||
|
||||
def test_get_all_timings(self):
|
||||
"""get_all_timings() returns all stage timings."""
|
||||
mock = MockPipeline(
|
||||
{
|
||||
"stages": {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}},
|
||||
}
|
||||
)
|
||||
sensor = PipelineMetricsSensor(mock)
|
||||
timings = sensor.get_all_timings()
|
||||
assert timings == {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}}
|
||||
|
||||
def test_get_frame_history(self):
|
||||
"""get_frame_history() returns frame times."""
|
||||
MockPipeline()
|
||||
|
||||
class MockPipelineWithFrames:
|
||||
def get_frame_times(self):
|
||||
return [1.0, 2.0, 3.0]
|
||||
|
||||
sensor = PipelineMetricsSensor(MockPipelineWithFrames())
|
||||
history = sensor.get_frame_history()
|
||||
assert history == [1.0, 2.0, 3.0]
|
||||
|
||||
def test_start_stop(self):
|
||||
"""start() and stop() work."""
|
||||
sensor = PipelineMetricsSensor()
|
||||
assert sensor.start() is True
|
||||
sensor.stop() # Should not raise
|
||||
259
tests/test_pipeline_mutation_commands.py
Normal file
259
tests/test_pipeline_mutation_commands.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Integration tests for pipeline mutation commands via WebSocket/UI panel.
|
||||
|
||||
Tests the mutation API through the command interface.
|
||||
"""
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
from engine.app.pipeline_runner import _handle_pipeline_mutation
|
||||
from engine.pipeline import Pipeline
|
||||
from engine.pipeline.ui import UIConfig, UIPanel
|
||||
|
||||
|
||||
class TestPipelineMutationCommands:
|
||||
"""Test pipeline mutation commands through the mutation API."""
|
||||
|
||||
def test_can_hot_swap_existing_stage(self):
|
||||
"""Test can_hot_swap returns True for existing, non-critical stage."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a test stage
|
||||
mock_stage = Mock()
|
||||
mock_stage.capabilities = {"test_capability"}
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
pipeline._capability_map = {"test_capability": ["test_stage"]}
|
||||
|
||||
# Test that we can check hot-swap capability
|
||||
result = pipeline.can_hot_swap("test_stage")
|
||||
assert result is True
|
||||
|
||||
def test_can_hot_swap_nonexistent_stage(self):
|
||||
"""Test can_hot_swap returns False for non-existent stage."""
|
||||
pipeline = Pipeline()
|
||||
result = pipeline.can_hot_swap("nonexistent_stage")
|
||||
assert result is False
|
||||
|
||||
def test_can_hot_swap_minimum_capability(self):
|
||||
"""Test can_hot_swap with minimum capability stage."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a source stage (minimum capability)
|
||||
mock_stage = Mock()
|
||||
mock_stage.capabilities = {"source"}
|
||||
pipeline.add_stage("source", mock_stage)
|
||||
pipeline._capability_map = {"source": ["source"]}
|
||||
|
||||
# Initialize pipeline to trigger capability validation
|
||||
pipeline._initialized = True
|
||||
|
||||
# Source is the only provider of minimum capability
|
||||
result = pipeline.can_hot_swap("source")
|
||||
# Should be False because it's the sole provider of a minimum capability
|
||||
assert result is False
|
||||
|
||||
def test_cleanup_stage(self):
|
||||
"""Test cleanup_stage calls cleanup on specific stage."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a stage with a mock cleanup method
|
||||
mock_stage = Mock()
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
|
||||
# Cleanup the specific stage
|
||||
pipeline.cleanup_stage("test_stage")
|
||||
|
||||
# Verify cleanup was called
|
||||
mock_stage.cleanup.assert_called_once()
|
||||
|
||||
def test_cleanup_stage_nonexistent(self):
|
||||
"""Test cleanup_stage on non-existent stage doesn't crash."""
|
||||
pipeline = Pipeline()
|
||||
pipeline.cleanup_stage("nonexistent_stage")
|
||||
# Should not raise an exception
|
||||
|
||||
def test_remove_stage_rebuilds_execution_order(self):
|
||||
"""Test that remove_stage rebuilds execution order."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add two independent stages
|
||||
stage1 = Mock()
|
||||
stage1.capabilities = {"source"}
|
||||
stage1.dependencies = set()
|
||||
stage1.stage_dependencies = [] # Add empty list for stage dependencies
|
||||
|
||||
stage2 = Mock()
|
||||
stage2.capabilities = {"render.output"}
|
||||
stage2.dependencies = set() # No dependencies
|
||||
stage2.stage_dependencies = [] # No stage dependencies
|
||||
|
||||
pipeline.add_stage("stage1", stage1)
|
||||
pipeline.add_stage("stage2", stage2)
|
||||
|
||||
# Build pipeline to establish execution order
|
||||
pipeline._initialized = True
|
||||
pipeline._capability_map = {"source": ["stage1"], "render.output": ["stage2"]}
|
||||
pipeline._execution_order = ["stage1", "stage2"]
|
||||
|
||||
# Remove stage1
|
||||
pipeline.remove_stage("stage1")
|
||||
|
||||
# Verify execution order was rebuilt
|
||||
assert "stage1" not in pipeline._execution_order
|
||||
assert "stage2" in pipeline._execution_order
|
||||
|
||||
def test_handle_pipeline_mutation_remove_stage(self):
|
||||
"""Test _handle_pipeline_mutation with remove_stage command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a mock stage
|
||||
mock_stage = Mock()
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
|
||||
# Create remove command
|
||||
command = {"action": "remove_stage", "stage": "test_stage"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled and stage was removed
|
||||
assert result is True
|
||||
assert "test_stage" not in pipeline._stages
|
||||
|
||||
def test_handle_pipeline_mutation_swap_stages(self):
|
||||
"""Test _handle_pipeline_mutation with swap_stages command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add two mock stages
|
||||
stage1 = Mock()
|
||||
stage2 = Mock()
|
||||
pipeline.add_stage("stage1", stage1)
|
||||
pipeline.add_stage("stage2", stage2)
|
||||
|
||||
# Create swap command
|
||||
command = {"action": "swap_stages", "stage1": "stage1", "stage2": "stage2"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled
|
||||
assert result is True
|
||||
|
||||
def test_handle_pipeline_mutation_enable_stage(self):
|
||||
"""Test _handle_pipeline_mutation with enable_stage command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a mock stage with set_enabled method
|
||||
mock_stage = Mock()
|
||||
mock_stage.set_enabled = Mock()
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
|
||||
# Create enable command
|
||||
command = {"action": "enable_stage", "stage": "test_stage"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled
|
||||
assert result is True
|
||||
mock_stage.set_enabled.assert_called_once_with(True)
|
||||
|
||||
def test_handle_pipeline_mutation_disable_stage(self):
|
||||
"""Test _handle_pipeline_mutation with disable_stage command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a mock stage with set_enabled method
|
||||
mock_stage = Mock()
|
||||
mock_stage.set_enabled = Mock()
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
|
||||
# Create disable command
|
||||
command = {"action": "disable_stage", "stage": "test_stage"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled
|
||||
assert result is True
|
||||
mock_stage.set_enabled.assert_called_once_with(False)
|
||||
|
||||
def test_handle_pipeline_mutation_cleanup_stage(self):
|
||||
"""Test _handle_pipeline_mutation with cleanup_stage command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a mock stage
|
||||
mock_stage = Mock()
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
|
||||
# Create cleanup command
|
||||
command = {"action": "cleanup_stage", "stage": "test_stage"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled and cleanup was called
|
||||
assert result is True
|
||||
mock_stage.cleanup.assert_called_once()
|
||||
|
||||
def test_handle_pipeline_mutation_can_hot_swap(self):
|
||||
"""Test _handle_pipeline_mutation with can_hot_swap command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add a mock stage
|
||||
mock_stage = Mock()
|
||||
mock_stage.capabilities = {"test"}
|
||||
pipeline.add_stage("test_stage", mock_stage)
|
||||
pipeline._capability_map = {"test": ["test_stage"]}
|
||||
|
||||
# Create can_hot_swap command
|
||||
command = {"action": "can_hot_swap", "stage": "test_stage"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled
|
||||
assert result is True
|
||||
|
||||
def test_handle_pipeline_mutation_move_stage(self):
|
||||
"""Test _handle_pipeline_mutation with move_stage command."""
|
||||
pipeline = Pipeline()
|
||||
|
||||
# Add two mock stages
|
||||
stage1 = Mock()
|
||||
stage2 = Mock()
|
||||
pipeline.add_stage("stage1", stage1)
|
||||
pipeline.add_stage("stage2", stage2)
|
||||
|
||||
# Initialize execution order
|
||||
pipeline._execution_order = ["stage1", "stage2"]
|
||||
|
||||
# Create move command to move stage1 after stage2
|
||||
command = {"action": "move_stage", "stage": "stage1", "after": "stage2"}
|
||||
|
||||
# Handle the mutation
|
||||
result = _handle_pipeline_mutation(pipeline, command)
|
||||
|
||||
# Verify it was handled (result might be True or False depending on validation)
|
||||
# The key is that the command was processed
|
||||
assert result in (True, False)
|
||||
|
||||
def test_ui_panel_execute_command_mutation_actions(self):
|
||||
"""Test UI panel execute_command with mutation actions."""
|
||||
ui_panel = UIPanel(UIConfig())
|
||||
|
||||
# Test that mutation actions return False (not handled by UI panel)
|
||||
# These should be handled by the WebSocket command handler instead
|
||||
mutation_actions = [
|
||||
{"action": "remove_stage", "stage": "test"},
|
||||
{"action": "swap_stages", "stage1": "a", "stage2": "b"},
|
||||
{"action": "enable_stage", "stage": "test"},
|
||||
{"action": "disable_stage", "stage": "test"},
|
||||
{"action": "cleanup_stage", "stage": "test"},
|
||||
{"action": "can_hot_swap", "stage": "test"},
|
||||
]
|
||||
|
||||
for command in mutation_actions:
|
||||
result = ui_panel.execute_command(command)
|
||||
assert result is False, (
|
||||
f"Mutation action {command['action']} should not be handled by UI panel"
|
||||
)
|
||||
405
tests/test_pipeline_rebuild.py
Normal file
405
tests/test_pipeline_rebuild.py
Normal file
@@ -0,0 +1,405 @@
|
||||
"""
|
||||
Integration tests for pipeline hot-rebuild and state preservation.
|
||||
|
||||
Tests:
|
||||
1. Viewport size control via --viewport flag
|
||||
2. NullDisplay recording and save/load functionality
|
||||
3. Pipeline state preservation during hot-rebuild
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from engine.display import DisplayRegistry
|
||||
from engine.display.backends.null import NullDisplay
|
||||
from engine.display.backends.replay import ReplayDisplay
|
||||
from engine.effects import get_registry
|
||||
from engine.fetch import load_cache
|
||||
from engine.pipeline import Pipeline, PipelineConfig, PipelineContext
|
||||
from engine.pipeline.adapters import (
|
||||
EffectPluginStage,
|
||||
FontStage,
|
||||
ViewportFilterStage,
|
||||
create_stage_from_display,
|
||||
create_stage_from_effect,
|
||||
)
|
||||
from engine.pipeline.params import PipelineParams
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def viewport_dims():
|
||||
"""Small viewport dimensions for testing."""
|
||||
return (40, 15)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def items():
|
||||
"""Load cached source items."""
|
||||
items = load_cache()
|
||||
if not items:
|
||||
pytest.skip("No fixture cache available")
|
||||
return items
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def null_display(viewport_dims):
|
||||
"""Create a NullDisplay for testing."""
|
||||
display = DisplayRegistry.create("null")
|
||||
display.init(viewport_dims[0], viewport_dims[1])
|
||||
return display
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pipeline_with_null_display(items, null_display):
|
||||
"""Create a pipeline with NullDisplay for testing."""
|
||||
import engine.effects.plugins as effects_plugins
|
||||
|
||||
effects_plugins.discover_plugins()
|
||||
|
||||
width, height = null_display.width, null_display.height
|
||||
params = PipelineParams()
|
||||
params.viewport_width = width
|
||||
params.viewport_height = height
|
||||
|
||||
config = PipelineConfig(
|
||||
source="fixture",
|
||||
display="null",
|
||||
camera="scroll",
|
||||
effects=["noise", "fade"],
|
||||
)
|
||||
|
||||
pipeline = Pipeline(config=config, context=PipelineContext())
|
||||
|
||||
from engine.camera import Camera
|
||||
from engine.data_sources.sources import ListDataSource
|
||||
from engine.pipeline.adapters import CameraClockStage, CameraStage, DataSourceStage
|
||||
|
||||
list_source = ListDataSource(items, name="fixture")
|
||||
pipeline.add_stage("source", DataSourceStage(list_source, name="fixture"))
|
||||
|
||||
# Add camera stages (required by ViewportFilterStage)
|
||||
camera = Camera.scroll(speed=0.3)
|
||||
camera.set_canvas_size(200, 200)
|
||||
pipeline.add_stage("camera_update", CameraClockStage(camera, name="camera-clock"))
|
||||
pipeline.add_stage("camera", CameraStage(camera, name="scroll"))
|
||||
|
||||
pipeline.add_stage("viewport_filter", ViewportFilterStage(name="viewport-filter"))
|
||||
pipeline.add_stage("font", FontStage(name="font"))
|
||||
|
||||
effect_registry = get_registry()
|
||||
for effect_name in config.effects:
|
||||
effect = effect_registry.get(effect_name)
|
||||
if effect:
|
||||
pipeline.add_stage(
|
||||
f"effect_{effect_name}",
|
||||
create_stage_from_effect(effect, effect_name),
|
||||
)
|
||||
|
||||
pipeline.add_stage("display", create_stage_from_display(null_display, "null"))
|
||||
pipeline.build()
|
||||
|
||||
if not pipeline.initialize():
|
||||
pytest.fail("Failed to initialize pipeline")
|
||||
|
||||
ctx = pipeline.context
|
||||
ctx.params = params
|
||||
ctx.set("display", null_display)
|
||||
ctx.set("items", items)
|
||||
ctx.set("pipeline", pipeline)
|
||||
ctx.set("pipeline_order", pipeline.execution_order)
|
||||
ctx.set("camera_y", 0)
|
||||
|
||||
yield pipeline, params, null_display
|
||||
|
||||
pipeline.cleanup()
|
||||
null_display.cleanup()
|
||||
|
||||
|
||||
class TestNullDisplayRecording:
|
||||
"""Tests for NullDisplay recording functionality."""
|
||||
|
||||
def test_null_display_initialization(self, viewport_dims):
|
||||
"""NullDisplay initializes with correct dimensions."""
|
||||
display = NullDisplay()
|
||||
display.init(viewport_dims[0], viewport_dims[1])
|
||||
assert display.width == viewport_dims[0]
|
||||
assert display.height == viewport_dims[1]
|
||||
|
||||
def test_start_stop_recording(self, null_display):
|
||||
"""NullDisplay can start and stop recording."""
|
||||
assert not null_display._is_recording
|
||||
|
||||
null_display.start_recording()
|
||||
assert null_display._is_recording is True
|
||||
|
||||
null_display.stop_recording()
|
||||
assert null_display._is_recording is False
|
||||
|
||||
def test_record_frames(self, null_display, pipeline_with_null_display):
|
||||
"""NullDisplay records frames when recording is enabled."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
display.start_recording()
|
||||
assert len(display._recorded_frames) == 0
|
||||
|
||||
for frame in range(5):
|
||||
params.frame_number = frame
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
|
||||
assert len(display._recorded_frames) == 5
|
||||
|
||||
def test_get_frames(self, null_display, pipeline_with_null_display):
|
||||
"""NullDisplay.get_frames() returns recorded buffers."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
display.start_recording()
|
||||
|
||||
for frame in range(3):
|
||||
params.frame_number = frame
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
|
||||
frames = display.get_frames()
|
||||
assert len(frames) == 3
|
||||
assert all(isinstance(f, list) for f in frames)
|
||||
|
||||
def test_clear_recording(self, null_display, pipeline_with_null_display):
|
||||
"""NullDisplay.clear_recording() clears recorded frames."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
display.start_recording()
|
||||
for frame in range(3):
|
||||
params.frame_number = frame
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
|
||||
assert len(display._recorded_frames) == 3
|
||||
|
||||
display.clear_recording()
|
||||
assert len(display._recorded_frames) == 0
|
||||
|
||||
def test_save_load_recording(self, null_display, pipeline_with_null_display):
|
||||
"""NullDisplay can save and load recordings."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
display.start_recording()
|
||||
for frame in range(3):
|
||||
params.frame_number = frame
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
display.save_recording(temp_path)
|
||||
|
||||
with open(temp_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
assert data["version"] == 1
|
||||
assert data["display"] == "null"
|
||||
assert data["frame_count"] == 3
|
||||
assert len(data["frames"]) == 3
|
||||
|
||||
display2 = NullDisplay()
|
||||
display2.load_recording(temp_path)
|
||||
assert len(display2._recorded_frames) == 3
|
||||
|
||||
finally:
|
||||
Path(temp_path).unlink(missing_ok=True)
|
||||
|
||||
|
||||
class TestReplayDisplay:
|
||||
"""Tests for ReplayDisplay functionality."""
|
||||
|
||||
def test_replay_display_initialization(self, viewport_dims):
|
||||
"""ReplayDisplay initializes correctly."""
|
||||
display = ReplayDisplay()
|
||||
display.init(viewport_dims[0], viewport_dims[1])
|
||||
assert display.width == viewport_dims[0]
|
||||
assert display.height == viewport_dims[1]
|
||||
|
||||
def test_set_and_get_frames(self):
|
||||
"""ReplayDisplay can set and retrieve frames."""
|
||||
display = ReplayDisplay()
|
||||
frames = [
|
||||
{"buffer": ["line1", "line2"], "width": 40, "height": 15},
|
||||
{"buffer": ["line3", "line4"], "width": 40, "height": 15},
|
||||
]
|
||||
display.set_frames(frames)
|
||||
|
||||
frame = display.get_next_frame()
|
||||
assert frame == ["line1", "line2"]
|
||||
|
||||
frame = display.get_next_frame()
|
||||
assert frame == ["line3", "line4"]
|
||||
|
||||
frame = display.get_next_frame()
|
||||
assert frame is None
|
||||
|
||||
def test_replay_loop_mode(self):
|
||||
"""ReplayDisplay can loop playback."""
|
||||
display = ReplayDisplay()
|
||||
display.set_loop(True)
|
||||
frames = [
|
||||
{"buffer": ["frame1"], "width": 40, "height": 15},
|
||||
{"buffer": ["frame2"], "width": 40, "height": 15},
|
||||
]
|
||||
display.set_frames(frames)
|
||||
|
||||
assert display.get_next_frame() == ["frame1"]
|
||||
assert display.get_next_frame() == ["frame2"]
|
||||
assert display.get_next_frame() == ["frame1"]
|
||||
assert display.get_next_frame() == ["frame2"]
|
||||
|
||||
def test_replay_seek_and_reset(self):
|
||||
"""ReplayDisplay supports seek and reset."""
|
||||
display = ReplayDisplay()
|
||||
frames = [
|
||||
{"buffer": [f"frame{i}"], "width": 40, "height": 15} for i in range(5)
|
||||
]
|
||||
display.set_frames(frames)
|
||||
|
||||
display.seek(3)
|
||||
assert display.get_next_frame() == ["frame3"]
|
||||
|
||||
display.reset()
|
||||
assert display.get_next_frame() == ["frame0"]
|
||||
|
||||
|
||||
class TestPipelineHotRebuild:
|
||||
"""Tests for pipeline hot-rebuild and state preservation."""
|
||||
|
||||
def test_pipeline_runs_with_null_display(self, pipeline_with_null_display):
|
||||
"""Pipeline executes successfully with NullDisplay."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
for frame in range(5):
|
||||
params.frame_number = frame
|
||||
pipeline.context.params = params
|
||||
result = pipeline.execute([])
|
||||
|
||||
assert result.success
|
||||
assert display._last_buffer is not None
|
||||
|
||||
def test_effect_toggle_during_execution(self, pipeline_with_null_display):
|
||||
"""Effects can be toggled during pipeline execution."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
params.frame_number = 0
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
buffer1 = display._last_buffer
|
||||
|
||||
fade_stage = pipeline.get_stage("effect_fade")
|
||||
assert fade_stage is not None
|
||||
assert isinstance(fade_stage, EffectPluginStage)
|
||||
|
||||
fade_stage._enabled = False
|
||||
fade_stage._effect.config.enabled = False
|
||||
|
||||
params.frame_number = 1
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
buffer2 = display._last_buffer
|
||||
|
||||
assert buffer1 != buffer2
|
||||
|
||||
def test_state_preservation_across_rebuild(self, pipeline_with_null_display):
|
||||
"""Pipeline state is preserved across hot-rebuild events."""
|
||||
pipeline, params, display = pipeline_with_null_display
|
||||
|
||||
for frame in range(5):
|
||||
params.frame_number = frame
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
|
||||
camera_y_before = pipeline.context.get("camera_y")
|
||||
|
||||
fade_stage = pipeline.get_stage("effect_fade")
|
||||
if fade_stage and isinstance(fade_stage, EffectPluginStage):
|
||||
fade_stage.set_enabled(not fade_stage.is_enabled())
|
||||
fade_stage._effect.config.enabled = fade_stage.is_enabled()
|
||||
|
||||
params.frame_number = 5
|
||||
pipeline.context.params = params
|
||||
pipeline.execute([])
|
||||
|
||||
pipeline.context.get("camera_y")
|
||||
|
||||
assert camera_y_before is not None
|
||||
|
||||
|
||||
class TestViewportControl:
|
||||
"""Tests for viewport size control."""
|
||||
|
||||
def test_viewport_dimensions_applied(self, items):
|
||||
"""Viewport dimensions are correctly applied to pipeline."""
|
||||
width, height = 40, 15
|
||||
|
||||
display = DisplayRegistry.create("null")
|
||||
display.init(width, height)
|
||||
|
||||
params = PipelineParams()
|
||||
params.viewport_width = width
|
||||
params.viewport_height = height
|
||||
|
||||
config = PipelineConfig(
|
||||
source="fixture",
|
||||
display="null",
|
||||
camera="scroll",
|
||||
effects=[],
|
||||
)
|
||||
|
||||
pipeline = Pipeline(config=config, context=PipelineContext())
|
||||
|
||||
from engine.camera import Camera
|
||||
from engine.data_sources.sources import ListDataSource
|
||||
from engine.pipeline.adapters import (
|
||||
CameraClockStage,
|
||||
CameraStage,
|
||||
DataSourceStage,
|
||||
)
|
||||
|
||||
list_source = ListDataSource(items, name="fixture")
|
||||
pipeline.add_stage("source", DataSourceStage(list_source, name="fixture"))
|
||||
|
||||
# Add camera stages (required by ViewportFilterStage)
|
||||
camera = Camera.scroll(speed=0.3)
|
||||
camera.set_canvas_size(200, 200)
|
||||
pipeline.add_stage(
|
||||
"camera_update", CameraClockStage(camera, name="camera-clock")
|
||||
)
|
||||
pipeline.add_stage("camera", CameraStage(camera, name="scroll"))
|
||||
|
||||
pipeline.add_stage(
|
||||
"viewport_filter", ViewportFilterStage(name="viewport-filter")
|
||||
)
|
||||
pipeline.add_stage("font", FontStage(name="font"))
|
||||
pipeline.add_stage("display", create_stage_from_display(display, "null"))
|
||||
pipeline.build()
|
||||
|
||||
assert pipeline.initialize()
|
||||
|
||||
ctx = pipeline.context
|
||||
ctx.params = params
|
||||
ctx.set("display", display)
|
||||
ctx.set("items", items)
|
||||
ctx.set("pipeline", pipeline)
|
||||
ctx.set("camera_y", 0)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
assert display._last_buffer is not None
|
||||
|
||||
pipeline.cleanup()
|
||||
display.cleanup()
|
||||
@@ -1,301 +0,0 @@
|
||||
"""
|
||||
Tests for engine.render module.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from engine import config, render
|
||||
|
||||
|
||||
class TestDefaultGradients:
|
||||
"""Tests for default gradient fallback functions."""
|
||||
|
||||
def test_default_green_gradient_length(self):
|
||||
"""_default_green_gradient returns 12 colors."""
|
||||
gradient = render._default_green_gradient()
|
||||
assert len(gradient) == 12
|
||||
|
||||
def test_default_green_gradient_is_list(self):
|
||||
"""_default_green_gradient returns a list."""
|
||||
gradient = render._default_green_gradient()
|
||||
assert isinstance(gradient, list)
|
||||
|
||||
def test_default_green_gradient_all_strings(self):
|
||||
"""_default_green_gradient returns list of ANSI code strings."""
|
||||
gradient = render._default_green_gradient()
|
||||
assert all(isinstance(code, str) for code in gradient)
|
||||
|
||||
def test_default_magenta_gradient_length(self):
|
||||
"""_default_magenta_gradient returns 12 colors."""
|
||||
gradient = render._default_magenta_gradient()
|
||||
assert len(gradient) == 12
|
||||
|
||||
def test_default_magenta_gradient_is_list(self):
|
||||
"""_default_magenta_gradient returns a list."""
|
||||
gradient = render._default_magenta_gradient()
|
||||
assert isinstance(gradient, list)
|
||||
|
||||
def test_default_magenta_gradient_all_strings(self):
|
||||
"""_default_magenta_gradient returns list of ANSI code strings."""
|
||||
gradient = render._default_magenta_gradient()
|
||||
assert all(isinstance(code, str) for code in gradient)
|
||||
|
||||
|
||||
class TestLrGradientUsesActiveTheme:
|
||||
"""Tests for lr_gradient using active theme."""
|
||||
|
||||
def test_lr_gradient_uses_active_theme_when_cols_none(self):
|
||||
"""lr_gradient uses ACTIVE_THEME.main_gradient when cols=None."""
|
||||
# Save original state
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
# Set a theme
|
||||
config.set_active_theme("green")
|
||||
|
||||
# Create simple test data
|
||||
rows = ["text"]
|
||||
|
||||
# Call without cols parameter (cols=None)
|
||||
result = render.lr_gradient(rows, offset=0.0)
|
||||
|
||||
# Should not raise and should return colored output
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 1
|
||||
# Should have ANSI codes (no plain "text")
|
||||
assert result[0] != "text"
|
||||
finally:
|
||||
# Restore original state
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_lr_gradient_fallback_when_no_theme(self):
|
||||
"""lr_gradient uses fallback green when ACTIVE_THEME is None."""
|
||||
# Save original state
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
# Clear the theme
|
||||
config.ACTIVE_THEME = None
|
||||
|
||||
# Create simple test data
|
||||
rows = ["text"]
|
||||
|
||||
# Call without cols parameter (should use fallback)
|
||||
result = render.lr_gradient(rows, offset=0.0)
|
||||
|
||||
# Should not raise and should return colored output
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 1
|
||||
# Should have ANSI codes (no plain "text")
|
||||
assert result[0] != "text"
|
||||
finally:
|
||||
# Restore original state
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_lr_gradient_explicit_cols_parameter_still_works(self):
|
||||
"""lr_gradient with explicit cols parameter overrides theme."""
|
||||
# Custom gradient
|
||||
custom_cols = ["\033[38;5;1m", "\033[38;5;2m"] * 6
|
||||
|
||||
rows = ["xy"]
|
||||
result = render.lr_gradient(rows, offset=0.0, cols=custom_cols)
|
||||
|
||||
# Should use the provided cols
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 1
|
||||
|
||||
def test_lr_gradient_respects_cols_parameter_name(self):
|
||||
"""lr_gradient accepts cols as keyword argument."""
|
||||
custom_cols = ["\033[38;5;1m", "\033[38;5;2m"] * 6
|
||||
|
||||
rows = ["xy"]
|
||||
# Call with cols as keyword
|
||||
result = render.lr_gradient(rows, offset=0.0, cols=custom_cols)
|
||||
|
||||
assert isinstance(result, list)
|
||||
|
||||
|
||||
class TestLrGradientBasicFunctionality:
|
||||
"""Tests to ensure lr_gradient basic functionality still works."""
|
||||
|
||||
def test_lr_gradient_colors_non_space_chars(self):
|
||||
"""lr_gradient colors non-space characters."""
|
||||
rows = ["hello"]
|
||||
|
||||
# Set a theme for the test
|
||||
original_theme = config.ACTIVE_THEME
|
||||
try:
|
||||
config.set_active_theme("green")
|
||||
result = render.lr_gradient(rows, offset=0.0)
|
||||
|
||||
# Result should have ANSI codes
|
||||
assert any("\033[" in r for r in result), "Expected ANSI codes in result"
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_lr_gradient_preserves_spaces(self):
|
||||
"""lr_gradient preserves spaces in output."""
|
||||
rows = ["a b c"]
|
||||
|
||||
original_theme = config.ACTIVE_THEME
|
||||
try:
|
||||
config.set_active_theme("green")
|
||||
result = render.lr_gradient(rows, offset=0.0)
|
||||
|
||||
# Spaces should be preserved (not colored)
|
||||
assert " " in result[0]
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_lr_gradient_empty_rows(self):
|
||||
"""lr_gradient handles empty rows correctly."""
|
||||
rows = [""]
|
||||
|
||||
original_theme = config.ACTIVE_THEME
|
||||
try:
|
||||
config.set_active_theme("green")
|
||||
result = render.lr_gradient(rows, offset=0.0)
|
||||
|
||||
assert result == [""]
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_lr_gradient_multiple_rows(self):
|
||||
"""lr_gradient handles multiple rows."""
|
||||
rows = ["row1", "row2", "row3"]
|
||||
|
||||
original_theme = config.ACTIVE_THEME
|
||||
try:
|
||||
config.set_active_theme("green")
|
||||
result = render.lr_gradient(rows, offset=0.0)
|
||||
|
||||
assert len(result) == 3
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
|
||||
class TestMsgGradient:
|
||||
"""Tests for msg_gradient function (message/ntfy overlay coloring)."""
|
||||
|
||||
def test_msg_gradient_uses_active_theme(self):
|
||||
"""msg_gradient uses ACTIVE_THEME.message_gradient when theme is set."""
|
||||
# Save original state
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
# Set a theme
|
||||
config.set_active_theme("green")
|
||||
|
||||
# Create simple test data
|
||||
rows = ["MESSAGE"]
|
||||
|
||||
# Call msg_gradient
|
||||
result = render.msg_gradient(rows, offset=0.0)
|
||||
|
||||
# Should return colored output using theme's message_gradient
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 1
|
||||
# Should have ANSI codes from the message gradient
|
||||
assert result[0] != "MESSAGE"
|
||||
assert "\033[" in result[0]
|
||||
finally:
|
||||
# Restore original state
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_msg_gradient_fallback_when_no_theme(self):
|
||||
"""msg_gradient uses fallback magenta when ACTIVE_THEME is None."""
|
||||
# Save original state
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
# Clear the theme
|
||||
config.ACTIVE_THEME = None
|
||||
|
||||
# Create simple test data
|
||||
rows = ["MESSAGE"]
|
||||
|
||||
# Call msg_gradient
|
||||
result = render.msg_gradient(rows, offset=0.0)
|
||||
|
||||
# Should return colored output using default magenta
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 1
|
||||
# Should have ANSI codes
|
||||
assert result[0] != "MESSAGE"
|
||||
assert "\033[" in result[0]
|
||||
finally:
|
||||
# Restore original state
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_msg_gradient_returns_colored_rows(self):
|
||||
"""msg_gradient returns properly colored rows with animation offset."""
|
||||
# Save original state
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
# Set a theme
|
||||
config.set_active_theme("orange")
|
||||
|
||||
rows = ["NTFY", "ALERT"]
|
||||
|
||||
# Call with offset
|
||||
result = render.msg_gradient(rows, offset=0.5)
|
||||
|
||||
# Should return same number of rows
|
||||
assert len(result) == 2
|
||||
# Both should be colored
|
||||
assert all("\033[" in r for r in result)
|
||||
# Should not be the original text
|
||||
assert result != rows
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_msg_gradient_different_themes_produce_different_results(self):
|
||||
"""msg_gradient produces different colors for different themes."""
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
rows = ["TEST"]
|
||||
|
||||
# Get result with green theme
|
||||
config.set_active_theme("green")
|
||||
result_green = render.msg_gradient(rows, offset=0.0)
|
||||
|
||||
# Get result with orange theme
|
||||
config.set_active_theme("orange")
|
||||
result_orange = render.msg_gradient(rows, offset=0.0)
|
||||
|
||||
# Results should be different (different message gradients)
|
||||
assert result_green != result_orange
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_msg_gradient_preserves_spacing(self):
|
||||
"""msg_gradient preserves spaces in rows."""
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
config.set_active_theme("purple")
|
||||
rows = ["M E S S A G E"]
|
||||
|
||||
result = render.msg_gradient(rows, offset=0.0)
|
||||
|
||||
# Spaces should be preserved
|
||||
assert " " in result[0]
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
|
||||
def test_msg_gradient_empty_rows(self):
|
||||
"""msg_gradient handles empty rows correctly."""
|
||||
original_theme = config.ACTIVE_THEME
|
||||
|
||||
try:
|
||||
config.set_active_theme("green")
|
||||
rows = [""]
|
||||
|
||||
result = render.msg_gradient(rows, offset=0.0)
|
||||
|
||||
# Empty row should stay empty
|
||||
assert result == [""]
|
||||
finally:
|
||||
config.ACTIVE_THEME = original_theme
|
||||
473
tests/test_sensors.py
Normal file
473
tests/test_sensors.py
Normal file
@@ -0,0 +1,473 @@
|
||||
"""
|
||||
Tests for the sensor framework.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from engine.sensors import Sensor, SensorRegistry, SensorStage, SensorValue
|
||||
|
||||
|
||||
class TestSensorValue:
|
||||
"""Tests for SensorValue dataclass."""
|
||||
|
||||
def test_create_sensor_value(self):
|
||||
"""SensorValue stores sensor data correctly."""
|
||||
value = SensorValue(
|
||||
sensor_name="mic",
|
||||
value=42.5,
|
||||
timestamp=1234567890.0,
|
||||
unit="dB",
|
||||
)
|
||||
|
||||
assert value.sensor_name == "mic"
|
||||
assert value.value == 42.5
|
||||
assert value.timestamp == 1234567890.0
|
||||
assert value.unit == "dB"
|
||||
|
||||
|
||||
class DummySensor(Sensor):
|
||||
"""Dummy sensor for testing."""
|
||||
|
||||
def __init__(self, name: str = "dummy", value: float = 1.0):
|
||||
self.name = name
|
||||
self.unit = "units"
|
||||
self._value = value
|
||||
|
||||
def start(self) -> bool:
|
||||
return True
|
||||
|
||||
def stop(self) -> None:
|
||||
pass
|
||||
|
||||
def read(self) -> SensorValue | None:
|
||||
return SensorValue(
|
||||
sensor_name=self.name,
|
||||
value=self._value,
|
||||
timestamp=time.time(),
|
||||
unit=self.unit,
|
||||
)
|
||||
|
||||
|
||||
class TestSensorRegistry:
|
||||
"""Tests for SensorRegistry."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry before each test."""
|
||||
SensorRegistry._sensors.clear()
|
||||
SensorRegistry._started = False
|
||||
|
||||
def test_register_sensor(self):
|
||||
"""SensorRegistry registers sensors."""
|
||||
sensor = DummySensor()
|
||||
SensorRegistry.register(sensor)
|
||||
|
||||
assert SensorRegistry.get("dummy") is sensor
|
||||
|
||||
def test_list_sensors(self):
|
||||
"""SensorRegistry lists registered sensors."""
|
||||
SensorRegistry.register(DummySensor("a"))
|
||||
SensorRegistry.register(DummySensor("b"))
|
||||
|
||||
sensors = SensorRegistry.list_sensors()
|
||||
assert "a" in sensors
|
||||
assert "b" in sensors
|
||||
|
||||
def test_read_all(self):
|
||||
"""SensorRegistry reads all sensor values."""
|
||||
SensorRegistry.register(DummySensor("a", 1.0))
|
||||
SensorRegistry.register(DummySensor("b", 2.0))
|
||||
|
||||
values = SensorRegistry.read_all()
|
||||
assert values["a"] == 1.0
|
||||
assert values["b"] == 2.0
|
||||
|
||||
|
||||
class TestSensorStage:
|
||||
"""Tests for SensorStage pipeline adapter."""
|
||||
|
||||
def setup_method(self):
|
||||
SensorRegistry._sensors.clear()
|
||||
SensorRegistry._started = False
|
||||
|
||||
def test_sensor_stage_capabilities(self):
|
||||
"""SensorStage declares correct capabilities."""
|
||||
sensor = DummySensor("mic")
|
||||
stage = SensorStage(sensor)
|
||||
|
||||
assert "sensor.mic" in stage.capabilities
|
||||
|
||||
def test_sensor_stage_process(self):
|
||||
"""SensorStage reads sensor and stores in context."""
|
||||
from engine.pipeline.core import PipelineContext
|
||||
|
||||
sensor = DummySensor("test", 42.0)
|
||||
stage = SensorStage(sensor, "test")
|
||||
|
||||
ctx = PipelineContext()
|
||||
result = stage.process(None, ctx)
|
||||
|
||||
assert ctx.get_state("sensor.test") == 42.0
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestApplyParamBindings:
|
||||
"""Tests for sensor param bindings."""
|
||||
|
||||
def test_no_bindings_returns_original(self):
|
||||
"""Effect without bindings returns original config."""
|
||||
from engine.effects.types import (
|
||||
EffectConfig,
|
||||
EffectPlugin,
|
||||
apply_param_bindings,
|
||||
)
|
||||
|
||||
class TestEffect(EffectPlugin):
|
||||
name = "test"
|
||||
config = EffectConfig()
|
||||
|
||||
def process(self, buf, ctx):
|
||||
return buf
|
||||
|
||||
def configure(self, config):
|
||||
pass
|
||||
|
||||
effect = TestEffect()
|
||||
ctx = object()
|
||||
|
||||
result = apply_param_bindings(effect, ctx)
|
||||
assert result is effect.config
|
||||
|
||||
def test_bindings_read_sensor_values(self):
|
||||
"""Param bindings read sensor values from context."""
|
||||
from engine.effects.types import (
|
||||
EffectConfig,
|
||||
EffectPlugin,
|
||||
apply_param_bindings,
|
||||
)
|
||||
|
||||
class TestEffect(EffectPlugin):
|
||||
name = "test"
|
||||
config = EffectConfig(intensity=1.0)
|
||||
param_bindings = {
|
||||
"intensity": {"sensor": "mic", "transform": "linear"},
|
||||
}
|
||||
|
||||
def process(self, buf, ctx):
|
||||
return buf
|
||||
|
||||
def configure(self, config):
|
||||
pass
|
||||
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
effect = TestEffect()
|
||||
ctx = EffectContext(
|
||||
terminal_width=80,
|
||||
terminal_height=24,
|
||||
scroll_cam=0,
|
||||
ticker_height=20,
|
||||
)
|
||||
ctx.set_state("sensor.mic", 0.8)
|
||||
|
||||
result = apply_param_bindings(effect, ctx)
|
||||
assert "intensity_sensor" in result.params
|
||||
|
||||
|
||||
class TestSensorLifecycle:
|
||||
"""Tests for sensor start/stop lifecycle."""
|
||||
|
||||
def setup_method(self):
|
||||
SensorRegistry._sensors.clear()
|
||||
SensorRegistry._started = False
|
||||
|
||||
def test_start_all(self):
|
||||
"""SensorRegistry starts all sensors."""
|
||||
started = []
|
||||
|
||||
class StatefulSensor(Sensor):
|
||||
name = "stateful"
|
||||
|
||||
def start(self) -> bool:
|
||||
started.append("start")
|
||||
return True
|
||||
|
||||
def stop(self) -> None:
|
||||
started.append("stop")
|
||||
|
||||
def read(self) -> SensorValue | None:
|
||||
return SensorValue("stateful", 1.0, 0.0)
|
||||
|
||||
SensorRegistry.register(StatefulSensor())
|
||||
SensorRegistry.start_all()
|
||||
|
||||
assert "start" in started
|
||||
assert SensorRegistry._started is True
|
||||
|
||||
def test_stop_all(self):
|
||||
"""SensorRegistry stops all sensors."""
|
||||
stopped = []
|
||||
|
||||
class StatefulSensor(Sensor):
|
||||
name = "stateful"
|
||||
|
||||
def start(self) -> bool:
|
||||
return True
|
||||
|
||||
def stop(self) -> None:
|
||||
stopped.append("stop")
|
||||
|
||||
def read(self) -> SensorValue | None:
|
||||
return SensorValue("stateful", 1.0, 0.0)
|
||||
|
||||
SensorRegistry.register(StatefulSensor())
|
||||
SensorRegistry.start_all()
|
||||
SensorRegistry.stop_all()
|
||||
|
||||
assert "stop" in stopped
|
||||
assert SensorRegistry._started is False
|
||||
|
||||
def test_unavailable_sensor(self):
|
||||
"""Unavailable sensor returns None from read."""
|
||||
|
||||
class UnavailableSensor(Sensor):
|
||||
name = "unavailable"
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
return False
|
||||
|
||||
def start(self) -> bool:
|
||||
return False
|
||||
|
||||
def stop(self) -> None:
|
||||
pass
|
||||
|
||||
def read(self) -> SensorValue | None:
|
||||
return None
|
||||
|
||||
sensor = UnavailableSensor()
|
||||
assert sensor.available is False
|
||||
assert sensor.read() is None
|
||||
|
||||
|
||||
class TestTransforms:
|
||||
"""Tests for sensor value transforms."""
|
||||
|
||||
def test_exponential_transform(self):
|
||||
"""Exponential transform squares the value."""
|
||||
from engine.effects.types import (
|
||||
EffectConfig,
|
||||
EffectPlugin,
|
||||
apply_param_bindings,
|
||||
)
|
||||
|
||||
class TestEffect(EffectPlugin):
|
||||
name = "test"
|
||||
config = EffectConfig(intensity=1.0)
|
||||
param_bindings = {
|
||||
"intensity": {"sensor": "mic", "transform": "exponential"},
|
||||
}
|
||||
|
||||
def process(self, buf, ctx):
|
||||
return buf
|
||||
|
||||
def configure(self, config):
|
||||
pass
|
||||
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
effect = TestEffect()
|
||||
ctx = EffectContext(80, 24, 0, 20)
|
||||
ctx.set_state("sensor.mic", 0.5)
|
||||
|
||||
result = apply_param_bindings(effect, ctx)
|
||||
# 0.5^2 = 0.25, then scaled: 0.5 + 0.25*0.5 = 0.625
|
||||
assert result.intensity != effect.config.intensity
|
||||
|
||||
def test_inverse_transform(self):
|
||||
"""Inverse transform inverts the value."""
|
||||
from engine.effects.types import (
|
||||
EffectConfig,
|
||||
EffectPlugin,
|
||||
apply_param_bindings,
|
||||
)
|
||||
|
||||
class TestEffect(EffectPlugin):
|
||||
name = "test"
|
||||
config = EffectConfig(intensity=1.0)
|
||||
param_bindings = {
|
||||
"intensity": {"sensor": "mic", "transform": "inverse"},
|
||||
}
|
||||
|
||||
def process(self, buf, ctx):
|
||||
return buf
|
||||
|
||||
def configure(self, config):
|
||||
pass
|
||||
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
effect = TestEffect()
|
||||
ctx = EffectContext(80, 24, 0, 20)
|
||||
ctx.set_state("sensor.mic", 0.8)
|
||||
|
||||
result = apply_param_bindings(effect, ctx)
|
||||
# 1.0 - 0.8 = 0.2
|
||||
assert abs(result.params["intensity_sensor"] - 0.2) < 0.001
|
||||
|
||||
def test_threshold_transform(self):
|
||||
"""Threshold transform applies binary threshold."""
|
||||
from engine.effects.types import (
|
||||
EffectConfig,
|
||||
EffectPlugin,
|
||||
apply_param_bindings,
|
||||
)
|
||||
|
||||
class TestEffect(EffectPlugin):
|
||||
name = "test"
|
||||
config = EffectConfig(intensity=1.0)
|
||||
param_bindings = {
|
||||
"intensity": {
|
||||
"sensor": "mic",
|
||||
"transform": "threshold",
|
||||
"threshold": 0.5,
|
||||
},
|
||||
}
|
||||
|
||||
def process(self, buf, ctx):
|
||||
return buf
|
||||
|
||||
def configure(self, config):
|
||||
pass
|
||||
|
||||
from engine.effects.types import EffectContext
|
||||
|
||||
effect = TestEffect()
|
||||
ctx = EffectContext(80, 24, 0, 20)
|
||||
|
||||
# Above threshold
|
||||
ctx.set_state("sensor.mic", 0.8)
|
||||
result = apply_param_bindings(effect, ctx)
|
||||
assert result.params["intensity_sensor"] == 1.0
|
||||
|
||||
# Below threshold
|
||||
ctx.set_state("sensor.mic", 0.3)
|
||||
result = apply_param_bindings(effect, ctx)
|
||||
assert result.params["intensity_sensor"] == 0.0
|
||||
|
||||
|
||||
class TestOscillatorSensor:
|
||||
"""Tests for OscillatorSensor."""
|
||||
|
||||
def setup_method(self):
|
||||
SensorRegistry._sensors.clear()
|
||||
SensorRegistry._started = False
|
||||
|
||||
def test_sine_waveform(self):
|
||||
"""Oscillator generates sine wave."""
|
||||
from engine.sensors.oscillator import OscillatorSensor
|
||||
|
||||
osc = OscillatorSensor(name="test", waveform="sine", frequency=1.0)
|
||||
osc.start()
|
||||
|
||||
values = [osc.read().value for _ in range(10)]
|
||||
assert all(0 <= v <= 1 for v in values)
|
||||
|
||||
def test_square_waveform(self):
|
||||
"""Oscillator generates square wave."""
|
||||
from engine.sensors.oscillator import OscillatorSensor
|
||||
|
||||
osc = OscillatorSensor(name="test", waveform="square", frequency=10.0)
|
||||
osc.start()
|
||||
|
||||
values = [osc.read().value for _ in range(10)]
|
||||
assert all(v in (0.0, 1.0) for v in values)
|
||||
|
||||
def test_waveform_types(self):
|
||||
"""All waveform types work."""
|
||||
from engine.sensors.oscillator import OscillatorSensor
|
||||
|
||||
for wf in ["sine", "square", "sawtooth", "triangle", "noise"]:
|
||||
osc = OscillatorSensor(name=wf, waveform=wf, frequency=1.0)
|
||||
osc.start()
|
||||
val = osc.read()
|
||||
assert val is not None
|
||||
assert 0 <= val.value <= 1
|
||||
|
||||
def test_invalid_waveform_raises(self):
|
||||
"""Invalid waveform returns None."""
|
||||
from engine.sensors.oscillator import OscillatorSensor
|
||||
|
||||
osc = OscillatorSensor(waveform="invalid")
|
||||
osc.start()
|
||||
val = osc.read()
|
||||
assert val is None
|
||||
|
||||
def test_sensor_driven_oscillator(self):
|
||||
"""Oscillator can be driven by another sensor."""
|
||||
from engine.sensors.oscillator import OscillatorSensor
|
||||
|
||||
class ModSensor(Sensor):
|
||||
name = "mod"
|
||||
|
||||
def start(self) -> bool:
|
||||
return True
|
||||
|
||||
def stop(self) -> None:
|
||||
pass
|
||||
|
||||
def read(self) -> SensorValue | None:
|
||||
return SensorValue("mod", 0.5, 0.0)
|
||||
|
||||
SensorRegistry.register(ModSensor())
|
||||
|
||||
osc = OscillatorSensor(
|
||||
name="lfo", waveform="sine", frequency=0.1, input_sensor="mod"
|
||||
)
|
||||
osc.start()
|
||||
|
||||
val = osc.read()
|
||||
assert val is not None
|
||||
assert 0 <= val.value <= 1
|
||||
|
||||
|
||||
class TestMicSensor:
|
||||
"""Tests for MicSensor."""
|
||||
|
||||
def setup_method(self):
|
||||
SensorRegistry._sensors.clear()
|
||||
SensorRegistry._started = False
|
||||
|
||||
def test_mic_sensor_creation(self):
|
||||
"""MicSensor can be created."""
|
||||
from engine.sensors.mic import MicSensor
|
||||
|
||||
sensor = MicSensor()
|
||||
assert sensor.name == "mic"
|
||||
assert sensor.unit == "dB"
|
||||
|
||||
def test_mic_sensor_custom_name(self):
|
||||
"""MicSensor can have custom name."""
|
||||
from engine.sensors.mic import MicSensor
|
||||
|
||||
sensor = MicSensor(name="my_mic")
|
||||
assert sensor.name == "my_mic"
|
||||
|
||||
def test_mic_sensor_start(self):
|
||||
"""MicSensor.start returns bool."""
|
||||
from engine.sensors.mic import MicSensor
|
||||
|
||||
sensor = MicSensor()
|
||||
result = sensor.start()
|
||||
assert isinstance(result, bool)
|
||||
|
||||
def test_mic_sensor_read_returns_value_or_none(self):
|
||||
"""MicSensor.read returns SensorValue or None."""
|
||||
from engine.sensors.mic import MicSensor
|
||||
|
||||
sensor = MicSensor()
|
||||
sensor.start()
|
||||
# May be None if no mic available
|
||||
result = sensor.read()
|
||||
# Just check it doesn't raise - result depends on system
|
||||
assert result is None or isinstance(result, SensorValue)
|
||||
223
tests/test_streaming.py
Normal file
223
tests/test_streaming.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""
|
||||
Tests for streaming protocol utilities.
|
||||
"""
|
||||
|
||||
from engine.display.streaming import (
|
||||
FrameDiff,
|
||||
MessageType,
|
||||
apply_diff,
|
||||
compress_frame,
|
||||
compute_diff,
|
||||
decode_binary_message,
|
||||
decode_diff_message,
|
||||
decode_rle,
|
||||
decompress_frame,
|
||||
encode_binary_message,
|
||||
encode_diff_message,
|
||||
encode_rle,
|
||||
should_use_diff,
|
||||
)
|
||||
|
||||
|
||||
class TestFrameDiff:
|
||||
"""Tests for FrameDiff computation."""
|
||||
|
||||
def test_compute_diff_all_changed(self):
|
||||
"""compute_diff detects all changed lines."""
|
||||
old = ["a", "b", "c"]
|
||||
new = ["x", "y", "z"]
|
||||
|
||||
diff = compute_diff(old, new)
|
||||
|
||||
assert len(diff.changed_lines) == 3
|
||||
assert diff.width == 1
|
||||
assert diff.height == 3
|
||||
|
||||
def test_compute_diff_no_changes(self):
|
||||
"""compute_diff returns empty for identical buffers."""
|
||||
old = ["a", "b", "c"]
|
||||
new = ["a", "b", "c"]
|
||||
|
||||
diff = compute_diff(old, new)
|
||||
|
||||
assert len(diff.changed_lines) == 0
|
||||
|
||||
def test_compute_diff_partial_changes(self):
|
||||
"""compute_diff detects partial changes."""
|
||||
old = ["a", "b", "c"]
|
||||
new = ["a", "x", "c"]
|
||||
|
||||
diff = compute_diff(old, new)
|
||||
|
||||
assert len(diff.changed_lines) == 1
|
||||
assert diff.changed_lines[0] == (1, "x")
|
||||
|
||||
def test_compute_diff_new_lines(self):
|
||||
"""compute_diff detects new lines added."""
|
||||
old = ["a", "b"]
|
||||
new = ["a", "b", "c"]
|
||||
|
||||
diff = compute_diff(old, new)
|
||||
|
||||
assert len(diff.changed_lines) == 1
|
||||
assert diff.changed_lines[0] == (2, "c")
|
||||
|
||||
def test_compute_diff_empty_old(self):
|
||||
"""compute_diff handles empty old buffer."""
|
||||
old = []
|
||||
new = ["a", "b", "c"]
|
||||
|
||||
diff = compute_diff(old, new)
|
||||
|
||||
assert len(diff.changed_lines) == 3
|
||||
|
||||
|
||||
class TestRLE:
|
||||
"""Tests for run-length encoding."""
|
||||
|
||||
def test_encode_rle_no_repeats(self):
|
||||
"""encode_rle handles no repeated lines."""
|
||||
lines = [(0, "a"), (1, "b"), (2, "c")]
|
||||
|
||||
encoded = encode_rle(lines)
|
||||
|
||||
assert len(encoded) == 3
|
||||
assert encoded[0] == (0, "a", 1)
|
||||
assert encoded[1] == (1, "b", 1)
|
||||
assert encoded[2] == (2, "c", 1)
|
||||
|
||||
def test_encode_rle_with_repeats(self):
|
||||
"""encode_rle compresses repeated lines."""
|
||||
lines = [(0, "a"), (1, "a"), (2, "a"), (3, "b")]
|
||||
|
||||
encoded = encode_rle(lines)
|
||||
|
||||
assert len(encoded) == 2
|
||||
assert encoded[0] == (0, "a", 3)
|
||||
assert encoded[1] == (3, "b", 1)
|
||||
|
||||
def test_decode_rle(self):
|
||||
"""decode_rle reconstructs original lines."""
|
||||
encoded = [(0, "a", 3), (3, "b", 1)]
|
||||
|
||||
decoded = decode_rle(encoded)
|
||||
|
||||
assert decoded == [(0, "a"), (1, "a"), (2, "a"), (3, "b")]
|
||||
|
||||
def test_encode_decode_roundtrip(self):
|
||||
"""encode/decode is lossless."""
|
||||
original = [(i, f"line{i % 3}") for i in range(10)]
|
||||
encoded = encode_rle(original)
|
||||
decoded = decode_rle(encoded)
|
||||
|
||||
assert decoded == original
|
||||
|
||||
|
||||
class TestCompression:
|
||||
"""Tests for frame compression."""
|
||||
|
||||
def test_compress_decompress(self):
|
||||
"""compress_frame is lossless."""
|
||||
buffer = [f"Line {i:02d}" for i in range(24)]
|
||||
|
||||
compressed = compress_frame(buffer)
|
||||
decompressed = decompress_frame(compressed, 24)
|
||||
|
||||
assert decompressed == buffer
|
||||
|
||||
def test_compress_empty(self):
|
||||
"""compress_frame handles empty buffer."""
|
||||
compressed = compress_frame([])
|
||||
decompressed = decompress_frame(compressed, 0)
|
||||
|
||||
assert decompressed == []
|
||||
|
||||
|
||||
class TestBinaryProtocol:
|
||||
"""Tests for binary message encoding."""
|
||||
|
||||
def test_encode_decode_message(self):
|
||||
"""encode_binary_message is lossless."""
|
||||
payload = b"test payload"
|
||||
|
||||
encoded = encode_binary_message(MessageType.FULL_FRAME, 80, 24, payload)
|
||||
msg_type, width, height, decoded_payload = decode_binary_message(encoded)
|
||||
|
||||
assert msg_type == MessageType.FULL_FRAME
|
||||
assert width == 80
|
||||
assert height == 24
|
||||
assert decoded_payload == payload
|
||||
|
||||
def test_encode_decode_all_types(self):
|
||||
"""All message types encode correctly."""
|
||||
for msg_type in MessageType:
|
||||
payload = b"test"
|
||||
encoded = encode_binary_message(msg_type, 80, 24, payload)
|
||||
decoded_type, _, _, _ = decode_binary_message(encoded)
|
||||
assert decoded_type == msg_type
|
||||
|
||||
|
||||
class TestDiffProtocol:
|
||||
"""Tests for diff message encoding."""
|
||||
|
||||
def test_encode_decode_diff(self):
|
||||
"""encode_diff_message is lossless."""
|
||||
diff = FrameDiff(width=80, height=24, changed_lines=[(0, "a"), (5, "b")])
|
||||
|
||||
payload = encode_diff_message(diff)
|
||||
decoded = decode_diff_message(payload)
|
||||
|
||||
assert decoded == diff.changed_lines
|
||||
|
||||
|
||||
class TestApplyDiff:
|
||||
"""Tests for applying diffs."""
|
||||
|
||||
def test_apply_diff(self):
|
||||
"""apply_diff reconstructs new buffer."""
|
||||
old_buffer = ["a", "b", "c", "d"]
|
||||
diff = FrameDiff(width=1, height=4, changed_lines=[(1, "x"), (2, "y")])
|
||||
|
||||
new_buffer = apply_diff(old_buffer, diff)
|
||||
|
||||
assert new_buffer == ["a", "x", "y", "d"]
|
||||
|
||||
def test_apply_diff_new_lines(self):
|
||||
"""apply_diff handles new lines."""
|
||||
old_buffer = ["a", "b"]
|
||||
diff = FrameDiff(width=1, height=4, changed_lines=[(2, "c"), (3, "d")])
|
||||
|
||||
new_buffer = apply_diff(old_buffer, diff)
|
||||
|
||||
assert new_buffer == ["a", "b", "c", "d"]
|
||||
|
||||
|
||||
class TestShouldUseDiff:
|
||||
"""Tests for diff threshold decision."""
|
||||
|
||||
def test_uses_diff_when_small_changes(self):
|
||||
"""should_use_diff returns True when few changes."""
|
||||
old = ["a"] * 100
|
||||
new = ["a"] * 95 + ["b"] * 5
|
||||
|
||||
assert should_use_diff(old, new, threshold=0.3) is True
|
||||
|
||||
def test_uses_full_when_many_changes(self):
|
||||
"""should_use_diff returns False when many changes."""
|
||||
old = ["a"] * 100
|
||||
new = ["b"] * 100
|
||||
|
||||
assert should_use_diff(old, new, threshold=0.3) is False
|
||||
|
||||
def test_uses_diff_at_threshold(self):
|
||||
"""should_use_diff handles threshold boundary."""
|
||||
old = ["a"] * 100
|
||||
new = ["a"] * 70 + ["b"] * 30
|
||||
|
||||
result = should_use_diff(old, new, threshold=0.3)
|
||||
assert result is True or result is False # At boundary
|
||||
|
||||
def test_returns_false_for_empty(self):
|
||||
"""should_use_diff returns False for empty buffers."""
|
||||
assert should_use_diff([], ["a", "b"]) is False
|
||||
assert should_use_diff(["a", "b"], []) is False
|
||||
@@ -1,169 +0,0 @@
|
||||
"""
|
||||
Tests for engine.themes module.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from engine import themes
|
||||
|
||||
|
||||
class TestThemeConstruction:
|
||||
"""Tests for Theme class initialization."""
|
||||
|
||||
def test_theme_construction(self):
|
||||
"""Theme stores name and gradients correctly."""
|
||||
main_grad = ["color1", "color2", "color3"]
|
||||
msg_grad = ["msg1", "msg2", "msg3"]
|
||||
theme = themes.Theme("test_theme", main_grad, msg_grad)
|
||||
|
||||
assert theme.name == "test_theme"
|
||||
assert theme.main_gradient == main_grad
|
||||
assert theme.message_gradient == msg_grad
|
||||
|
||||
|
||||
class TestGradientLength:
|
||||
"""Tests for gradient length validation."""
|
||||
|
||||
def test_gradient_length_green(self):
|
||||
"""Green theme has exactly 12 colors in each gradient."""
|
||||
green = themes.THEME_REGISTRY["green"]
|
||||
assert len(green.main_gradient) == 12
|
||||
assert len(green.message_gradient) == 12
|
||||
|
||||
def test_gradient_length_orange(self):
|
||||
"""Orange theme has exactly 12 colors in each gradient."""
|
||||
orange = themes.THEME_REGISTRY["orange"]
|
||||
assert len(orange.main_gradient) == 12
|
||||
assert len(orange.message_gradient) == 12
|
||||
|
||||
def test_gradient_length_purple(self):
|
||||
"""Purple theme has exactly 12 colors in each gradient."""
|
||||
purple = themes.THEME_REGISTRY["purple"]
|
||||
assert len(purple.main_gradient) == 12
|
||||
assert len(purple.message_gradient) == 12
|
||||
|
||||
|
||||
class TestThemeRegistry:
|
||||
"""Tests for THEME_REGISTRY dictionary."""
|
||||
|
||||
def test_theme_registry_has_three_themes(self):
|
||||
"""Registry contains exactly three themes: green, orange, purple."""
|
||||
assert len(themes.THEME_REGISTRY) == 3
|
||||
assert set(themes.THEME_REGISTRY.keys()) == {"green", "orange", "purple"}
|
||||
|
||||
def test_registry_values_are_themes(self):
|
||||
"""All registry values are Theme instances."""
|
||||
for theme_id, theme in themes.THEME_REGISTRY.items():
|
||||
assert isinstance(theme, themes.Theme)
|
||||
assert theme.name == theme_id
|
||||
|
||||
|
||||
class TestGetTheme:
|
||||
"""Tests for get_theme function."""
|
||||
|
||||
def test_get_theme_valid_green(self):
|
||||
"""get_theme('green') returns correct green Theme."""
|
||||
green = themes.get_theme("green")
|
||||
assert isinstance(green, themes.Theme)
|
||||
assert green.name == "green"
|
||||
|
||||
def test_get_theme_valid_orange(self):
|
||||
"""get_theme('orange') returns correct orange Theme."""
|
||||
orange = themes.get_theme("orange")
|
||||
assert isinstance(orange, themes.Theme)
|
||||
assert orange.name == "orange"
|
||||
|
||||
def test_get_theme_valid_purple(self):
|
||||
"""get_theme('purple') returns correct purple Theme."""
|
||||
purple = themes.get_theme("purple")
|
||||
assert isinstance(purple, themes.Theme)
|
||||
assert purple.name == "purple"
|
||||
|
||||
def test_get_theme_invalid(self):
|
||||
"""get_theme with invalid ID raises KeyError."""
|
||||
with pytest.raises(KeyError):
|
||||
themes.get_theme("invalid_theme")
|
||||
|
||||
def test_get_theme_invalid_none(self):
|
||||
"""get_theme with None raises KeyError."""
|
||||
with pytest.raises(KeyError):
|
||||
themes.get_theme(None)
|
||||
|
||||
|
||||
class TestGreenTheme:
|
||||
"""Tests for green theme specific values."""
|
||||
|
||||
def test_green_theme_unchanged(self):
|
||||
"""Green theme maintains original color sequence."""
|
||||
green = themes.get_theme("green")
|
||||
|
||||
# Expected main gradient: 231→195→123→118→82→46→40→34→28→22→22(dim)→235
|
||||
expected_main = [231, 195, 123, 118, 82, 46, 40, 34, 28, 22, 22, 235]
|
||||
# Expected msg gradient: 231→225→219→213→207→201→165→161→125→89→89(dim)→235
|
||||
expected_msg = [231, 225, 219, 213, 207, 201, 165, 161, 125, 89, 89, 235]
|
||||
|
||||
assert green.main_gradient == expected_main
|
||||
assert green.message_gradient == expected_msg
|
||||
|
||||
def test_green_theme_name(self):
|
||||
"""Green theme has correct name."""
|
||||
green = themes.get_theme("green")
|
||||
assert green.name == "green"
|
||||
|
||||
|
||||
class TestOrangeTheme:
|
||||
"""Tests for orange theme specific values."""
|
||||
|
||||
def test_orange_theme_unchanged(self):
|
||||
"""Orange theme maintains original color sequence."""
|
||||
orange = themes.get_theme("orange")
|
||||
|
||||
# Expected main gradient: 231→215→209→208→202→166→130→94→58→94→94(dim)→235
|
||||
expected_main = [231, 215, 209, 208, 202, 166, 130, 94, 58, 94, 94, 235]
|
||||
# Expected msg gradient: 231→195→33→27→21→21→21→18→18→18→18(dim)→235
|
||||
expected_msg = [231, 195, 33, 27, 21, 21, 21, 18, 18, 18, 18, 235]
|
||||
|
||||
assert orange.main_gradient == expected_main
|
||||
assert orange.message_gradient == expected_msg
|
||||
|
||||
def test_orange_theme_name(self):
|
||||
"""Orange theme has correct name."""
|
||||
orange = themes.get_theme("orange")
|
||||
assert orange.name == "orange"
|
||||
|
||||
|
||||
class TestPurpleTheme:
|
||||
"""Tests for purple theme specific values."""
|
||||
|
||||
def test_purple_theme_unchanged(self):
|
||||
"""Purple theme maintains original color sequence."""
|
||||
purple = themes.get_theme("purple")
|
||||
|
||||
# Expected main gradient: 231→225→177→171→165→135→129→93→57→57→57(dim)→235
|
||||
expected_main = [231, 225, 177, 171, 165, 135, 129, 93, 57, 57, 57, 235]
|
||||
# Expected msg gradient: 231→226→226→220→220→184→184→178→178→172→172(dim)→235
|
||||
expected_msg = [231, 226, 226, 220, 220, 184, 184, 178, 178, 172, 172, 235]
|
||||
|
||||
assert purple.main_gradient == expected_main
|
||||
assert purple.message_gradient == expected_msg
|
||||
|
||||
def test_purple_theme_name(self):
|
||||
"""Purple theme has correct name."""
|
||||
purple = themes.get_theme("purple")
|
||||
assert purple.name == "purple"
|
||||
|
||||
|
||||
class TestThemeDataOnly:
|
||||
"""Tests to ensure themes module has no problematic imports."""
|
||||
|
||||
def test_themes_module_imports(self):
|
||||
"""themes module should be data-only without config/render imports."""
|
||||
import inspect
|
||||
source = inspect.getsource(themes)
|
||||
# Verify no imports of config or render (look for actual import statements)
|
||||
lines = source.split('\n')
|
||||
import_lines = [line for line in lines if line.strip().startswith('import ') or line.strip().startswith('from ')]
|
||||
# Filter out empty and comment lines
|
||||
import_lines = [line for line in import_lines if line.strip() and not line.strip().startswith('#')]
|
||||
# Should have no import lines
|
||||
assert len(import_lines) == 0, f"Found unexpected imports: {import_lines}"
|
||||
206
tests/test_tint_acceptance.py
Normal file
206
tests/test_tint_acceptance.py
Normal file
@@ -0,0 +1,206 @@
|
||||
"""Integration test: TintEffect in the pipeline."""
|
||||
|
||||
import queue
|
||||
|
||||
from engine.data_sources.sources import ListDataSource, SourceItem
|
||||
from engine.effects.plugins.tint import TintEffect
|
||||
from engine.effects.types import EffectConfig
|
||||
from engine.pipeline import Pipeline, PipelineConfig
|
||||
from engine.pipeline.adapters import (
|
||||
DataSourceStage,
|
||||
DisplayStage,
|
||||
EffectPluginStage,
|
||||
SourceItemsToBufferStage,
|
||||
)
|
||||
from engine.pipeline.core import PipelineContext
|
||||
from engine.pipeline.params import PipelineParams
|
||||
|
||||
|
||||
class QueueDisplay:
|
||||
"""Stub display that captures every frame into a queue."""
|
||||
|
||||
def __init__(self):
|
||||
self.frames: queue.Queue[list[str]] = queue.Queue()
|
||||
self.width = 80
|
||||
self.height = 24
|
||||
self._init_called = False
|
||||
|
||||
def init(self, width: int, height: int, reuse: bool = False) -> None:
|
||||
self.width = width
|
||||
self.height = height
|
||||
self._init_called = True
|
||||
|
||||
def show(self, buffer: list[str], border: bool = False) -> None:
|
||||
self.frames.put(list(buffer))
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
|
||||
def cleanup(self) -> None:
|
||||
pass
|
||||
|
||||
def get_dimensions(self) -> tuple[int, int]:
|
||||
return (self.width, self.height)
|
||||
|
||||
|
||||
def _build_pipeline(
|
||||
items: list[SourceItem],
|
||||
tint_config: EffectConfig | None = None,
|
||||
width: int = 80,
|
||||
height: int = 24,
|
||||
) -> tuple[Pipeline, QueueDisplay, PipelineContext]:
|
||||
"""Build pipeline: source -> render -> tint effect -> display."""
|
||||
display = QueueDisplay()
|
||||
|
||||
ctx = PipelineContext()
|
||||
params = PipelineParams()
|
||||
params.viewport_width = width
|
||||
params.viewport_height = height
|
||||
params.frame_number = 0
|
||||
ctx.params = params
|
||||
ctx.set("items", items)
|
||||
|
||||
pipeline = Pipeline(
|
||||
config=PipelineConfig(enable_metrics=True),
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
# Source
|
||||
source = ListDataSource(items, name="test-source")
|
||||
pipeline.add_stage("source", DataSourceStage(source, name="test-source"))
|
||||
|
||||
# Render (simple)
|
||||
pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
|
||||
|
||||
# Tint effect
|
||||
tint_effect = TintEffect()
|
||||
if tint_config is not None:
|
||||
tint_effect.configure(tint_config)
|
||||
pipeline.add_stage("tint", EffectPluginStage(tint_effect, name="tint"))
|
||||
|
||||
# Display
|
||||
pipeline.add_stage("display", DisplayStage(display, name="queue"))
|
||||
|
||||
pipeline.build()
|
||||
pipeline.initialize()
|
||||
|
||||
return pipeline, display, ctx
|
||||
|
||||
|
||||
class TestTintAcceptance:
|
||||
"""Test TintEffect in a full pipeline."""
|
||||
|
||||
def test_tint_applies_default_color(self):
|
||||
"""Default tint should apply ANSI color codes to output."""
|
||||
items = [SourceItem(content="Hello World", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success, f"Pipeline failed: {result.error}"
|
||||
frame = display.frames.get(timeout=1)
|
||||
|
||||
text = "\n".join(frame)
|
||||
assert "\033[" in text, f"Expected ANSI codes in frame: {frame}"
|
||||
assert "Hello World" in text
|
||||
|
||||
def test_tint_applies_red_color(self):
|
||||
"""Configured red tint should produce red ANSI code (196-197)."""
|
||||
items = [SourceItem(content="Red Text", source="test", timestamp="0")]
|
||||
config = EffectConfig(
|
||||
enabled=True,
|
||||
intensity=1.0,
|
||||
params={"r": 255, "g": 0, "b": 0, "a": 0.8},
|
||||
)
|
||||
pipeline, display, ctx = _build_pipeline(items, tint_config=config)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
line = frame[0]
|
||||
|
||||
# Should contain red ANSI code (196 or 197 in 256 color)
|
||||
assert "\033[38;5;196m" in line or "\033[38;5;197m" in line, (
|
||||
f"Missing red tint: {line}"
|
||||
)
|
||||
assert "Red Text" in line
|
||||
|
||||
def test_tint_disabled_does_nothing(self):
|
||||
"""Disabled tint stage should pass through buffer unchanged."""
|
||||
items = [SourceItem(content="Plain Text", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
# Disable the tint stage
|
||||
stage = pipeline.get_stage("tint")
|
||||
stage.set_enabled(False)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
text = "\n".join(frame)
|
||||
|
||||
# Should contain Plain Text with NO ANSI color codes
|
||||
assert "Plain Text" in text
|
||||
assert "\033[" not in text, f"Unexpected ANSI codes in frame: {frame}"
|
||||
|
||||
def test_tint_zero_transparency(self):
|
||||
"""Alpha=0 should pass through buffer unchanged (no tint)."""
|
||||
items = [SourceItem(content="Transparent", source="test", timestamp="0")]
|
||||
config = EffectConfig(
|
||||
enabled=True,
|
||||
intensity=1.0,
|
||||
params={"r": 255, "g": 128, "b": 64, "a": 0.0},
|
||||
)
|
||||
pipeline, display, ctx = _build_pipeline(items, tint_config=config)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
text = "\n".join(frame)
|
||||
|
||||
assert "Transparent" in text
|
||||
assert "\033[" not in text, f"Expected no ANSI codes with alpha=0: {frame}"
|
||||
|
||||
def test_tint_with_multiples_lines(self):
|
||||
"""Tint should apply to all non-empty lines."""
|
||||
items = [
|
||||
SourceItem(content="Line1\nLine2\n\nLine4", source="test", timestamp="0")
|
||||
]
|
||||
config = EffectConfig(
|
||||
enabled=True,
|
||||
intensity=1.0,
|
||||
params={"r": 0, "g": 255, "b": 0, "a": 0.7},
|
||||
)
|
||||
pipeline, display, ctx = _build_pipeline(items, tint_config=config)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
|
||||
# All non-empty lines should have green ANSI codes
|
||||
green_codes = ["\033[38;5;", "m"]
|
||||
for line in frame:
|
||||
if line.strip():
|
||||
assert green_codes[0] in line and green_codes[1] in line, (
|
||||
f"Missing green tint: {line}"
|
||||
)
|
||||
else:
|
||||
assert line == "", f"Empty lines should be exactly empty: {line}"
|
||||
|
||||
def test_tint_preserves_empty_lines(self):
|
||||
"""Empty lines should remain empty (no ANSI codes)."""
|
||||
items = [SourceItem(content="A\n\nB", source="test", timestamp="0")]
|
||||
pipeline, display, ctx = _build_pipeline(items)
|
||||
|
||||
result = pipeline.execute(items)
|
||||
|
||||
assert result.success
|
||||
frame = display.frames.get(timeout=1)
|
||||
|
||||
assert frame[0].strip() != ""
|
||||
assert frame[1] == "" # Empty line unchanged
|
||||
assert frame[2].strip() != ""
|
||||
125
tests/test_tint_effect.py
Normal file
125
tests/test_tint_effect.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import pytest
|
||||
|
||||
from engine.effects.plugins.tint import TintEffect
|
||||
from engine.effects.types import EffectConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def effect():
|
||||
return TintEffect()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def effect_with_params(r=255, g=128, b=64, a=0.5):
|
||||
e = TintEffect()
|
||||
config = EffectConfig(
|
||||
enabled=True,
|
||||
intensity=1.0,
|
||||
params={"r": r, "g": g, "b": b, "a": a},
|
||||
)
|
||||
e.configure(config)
|
||||
return e
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_context():
|
||||
class MockContext:
|
||||
terminal_width = 80
|
||||
terminal_height = 24
|
||||
|
||||
def get_state(self, key):
|
||||
return None
|
||||
|
||||
return MockContext()
|
||||
|
||||
|
||||
class TestTintEffect:
|
||||
def test_name(self, effect):
|
||||
assert effect.name == "tint"
|
||||
|
||||
def test_enabled_by_default(self, effect):
|
||||
assert effect.config.enabled is True
|
||||
|
||||
def test_returns_input_when_empty(self, effect, mock_context):
|
||||
result = effect.process([], mock_context)
|
||||
assert result == []
|
||||
|
||||
def test_returns_input_when_transparency_zero(
|
||||
self, effect_with_params, mock_context
|
||||
):
|
||||
effect_with_params.config.params["a"] = 0.0
|
||||
buf = ["hello world"]
|
||||
result = effect_with_params.process(buf, mock_context)
|
||||
assert result == buf
|
||||
|
||||
def test_applies_tint_to_plain_text(self, effect_with_params, mock_context):
|
||||
buf = ["hello world"]
|
||||
result = effect_with_params.process(buf, mock_context)
|
||||
assert len(result) == 1
|
||||
assert "\033[" in result[0] # Has ANSI codes
|
||||
assert "hello world" in result[0]
|
||||
|
||||
def test_tint_preserves_content(self, effect_with_params, mock_context):
|
||||
buf = ["hello world", "test line"]
|
||||
result = effect_with_params.process(buf, mock_context)
|
||||
assert "hello world" in result[0]
|
||||
assert "test line" in result[1]
|
||||
|
||||
def test_rgb_to_ansi256_black(self, effect):
|
||||
assert effect._rgb_to_ansi256(0, 0, 0) == 16
|
||||
|
||||
def test_rgb_to_ansi256_white(self, effect):
|
||||
assert effect._rgb_to_ansi256(255, 255, 255) == 231
|
||||
|
||||
def test_rgb_to_ansi256_red(self, effect):
|
||||
color = effect._rgb_to_ansi256(255, 0, 0)
|
||||
assert 196 <= color <= 197 # Red in 256 color
|
||||
|
||||
def test_rgb_to_ansi256_green(self, effect):
|
||||
color = effect._rgb_to_ansi256(0, 255, 0)
|
||||
assert 34 <= color <= 46
|
||||
|
||||
def test_rgb_to_ansi256_blue(self, effect):
|
||||
color = effect._rgb_to_ansi256(0, 0, 255)
|
||||
assert 20 <= color <= 33
|
||||
|
||||
def test_configure_updates_params(self, effect):
|
||||
config = EffectConfig(
|
||||
enabled=True,
|
||||
intensity=1.0,
|
||||
params={"r": 100, "g": 150, "b": 200, "a": 0.8},
|
||||
)
|
||||
effect.configure(config)
|
||||
assert effect.config.params["r"] == 100
|
||||
assert effect.config.params["g"] == 150
|
||||
assert effect.config.params["b"] == 200
|
||||
assert effect.config.params["a"] == 0.8
|
||||
|
||||
def test_clamp_rgb_values(self, effect_with_params, mock_context):
|
||||
effect_with_params.config.params["r"] = 300
|
||||
effect_with_params.config.params["g"] = -10
|
||||
effect_with_params.config.params["b"] = 1.5
|
||||
buf = ["test"]
|
||||
result = effect_with_params.process(buf, mock_context)
|
||||
assert "\033[" in result[0]
|
||||
|
||||
def test_clamp_alpha_above_one(self, effect_with_params, mock_context):
|
||||
effect_with_params.config.params["a"] = 1.5
|
||||
buf = ["test"]
|
||||
result = effect_with_params.process(buf, mock_context)
|
||||
assert "\033[" in result[0]
|
||||
|
||||
def test_preserves_empty_lines(self, effect_with_params, mock_context):
|
||||
buf = ["hello", "", "world"]
|
||||
result = effect_with_params.process(buf, mock_context)
|
||||
assert result[1] == ""
|
||||
|
||||
def test_inlet_types_includes_text_buffer(self, effect):
|
||||
from engine.pipeline.core import DataType
|
||||
|
||||
assert DataType.TEXT_BUFFER in effect.inlet_types
|
||||
|
||||
def test_outlet_types_includes_text_buffer(self, effect):
|
||||
from engine.pipeline.core import DataType
|
||||
|
||||
assert DataType.TEXT_BUFFER in effect.outlet_types
|
||||
115
tests/test_translate.py
Normal file
115
tests/test_translate.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Tests for engine.translate module.
|
||||
"""
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from engine.translate import (
|
||||
_translate_cached,
|
||||
detect_location_language,
|
||||
translate_headline,
|
||||
)
|
||||
|
||||
|
||||
def clear_translate_cache():
|
||||
"""Clear the LRU cache between tests."""
|
||||
_translate_cached.cache_clear()
|
||||
|
||||
|
||||
class TestDetectLocationLanguage:
|
||||
"""Tests for detect_location_language function."""
|
||||
|
||||
def test_returns_none_for_unknown_location(self):
|
||||
"""Returns None when no location pattern matches."""
|
||||
result = detect_location_language("Breaking news about technology")
|
||||
assert result is None
|
||||
|
||||
def test_detects_berlin(self):
|
||||
"""Detects Berlin location."""
|
||||
result = detect_location_language("Berlin police arrest protesters")
|
||||
assert result == "de"
|
||||
|
||||
def test_detects_paris(self):
|
||||
"""Detects Paris location."""
|
||||
result = detect_location_language("Paris fashion week begins")
|
||||
assert result == "fr"
|
||||
|
||||
def test_detects_tokyo(self):
|
||||
"""Detects Tokyo location."""
|
||||
result = detect_location_language("Tokyo stocks rise")
|
||||
assert result == "ja"
|
||||
|
||||
def test_detects_berlin_again(self):
|
||||
"""Detects Berlin location again."""
|
||||
result = detect_location_language("Berlin marathon set to begin")
|
||||
assert result == "de"
|
||||
|
||||
def test_case_insensitive(self):
|
||||
"""Detection is case insensitive."""
|
||||
result = detect_location_language("BERLIN SUMMER FESTIVAL")
|
||||
assert result == "de"
|
||||
|
||||
def test_returns_first_match(self):
|
||||
"""Returns first matching pattern."""
|
||||
result = detect_location_language("Berlin in Paris for the event")
|
||||
assert result == "de"
|
||||
|
||||
|
||||
class TestTranslateHeadline:
|
||||
"""Tests for translate_headline function."""
|
||||
|
||||
def test_returns_translated_text(self):
|
||||
"""Returns translated text from cache."""
|
||||
clear_translate_cache()
|
||||
with patch("engine.translate.translate_headline") as mock_fn:
|
||||
mock_fn.return_value = "Translated title"
|
||||
from engine.translate import translate_headline as th
|
||||
|
||||
result = th("Original title", "de")
|
||||
assert result == "Translated title"
|
||||
|
||||
def test_uses_cached_result(self):
|
||||
"""Translation uses LRU cache."""
|
||||
clear_translate_cache()
|
||||
result1 = translate_headline("Test unique", "es")
|
||||
result2 = translate_headline("Test unique", "es")
|
||||
assert result1 == result2
|
||||
|
||||
|
||||
class TestTranslateCached:
|
||||
"""Tests for _translate_cached function."""
|
||||
|
||||
def test_translation_network_error(self):
|
||||
"""Network error returns original text."""
|
||||
clear_translate_cache()
|
||||
with patch("engine.translate.urllib.request.urlopen") as mock_urlopen:
|
||||
mock_urlopen.side_effect = Exception("Network error")
|
||||
|
||||
result = _translate_cached("Hello world", "de")
|
||||
|
||||
assert result == "Hello world"
|
||||
|
||||
def test_translation_invalid_json(self):
|
||||
"""Invalid JSON returns original text."""
|
||||
clear_translate_cache()
|
||||
with patch("engine.translate.urllib.request.urlopen") as mock_urlopen:
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = b"invalid json"
|
||||
mock_urlopen.return_value = mock_response
|
||||
|
||||
result = _translate_cached("Hello", "de")
|
||||
|
||||
assert result == "Hello"
|
||||
|
||||
def test_translation_empty_response(self):
|
||||
"""Empty translation response returns original text."""
|
||||
clear_translate_cache()
|
||||
with patch("engine.translate.urllib.request.urlopen") as mock_urlopen:
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = json.dumps([[[""], None, "de"], None])
|
||||
mock_urlopen.return_value = mock_response
|
||||
|
||||
result = _translate_cached("Hello", "de")
|
||||
|
||||
assert result == "Hello"
|
||||
184
tests/test_ui_panel.py
Normal file
184
tests/test_ui_panel.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""
|
||||
Tests for UIPanel.
|
||||
"""
|
||||
|
||||
from engine.pipeline.ui import StageControl, UIConfig, UIPanel
|
||||
|
||||
|
||||
class MockStage:
|
||||
"""Mock stage for testing."""
|
||||
|
||||
def __init__(self, name, category="effect"):
|
||||
self.name = name
|
||||
self.category = category
|
||||
self._enabled = True
|
||||
|
||||
def is_enabled(self):
|
||||
return self._enabled
|
||||
|
||||
|
||||
class TestUIPanel:
|
||||
"""Tests for UIPanel."""
|
||||
|
||||
def test_init(self):
|
||||
"""UIPanel initializes with default config."""
|
||||
panel = UIPanel()
|
||||
assert panel.config.panel_width == 24
|
||||
assert panel.config.stage_list_height == 12
|
||||
assert panel.scroll_offset == 0
|
||||
assert panel.selected_stage is None
|
||||
|
||||
def test_register_stage(self):
|
||||
"""register_stage adds a stage control."""
|
||||
panel = UIPanel()
|
||||
stage = MockStage("noise")
|
||||
panel.register_stage(stage, enabled=True)
|
||||
assert "noise" in panel.stages
|
||||
ctrl = panel.stages["noise"]
|
||||
assert ctrl.name == "noise"
|
||||
assert ctrl.enabled is True
|
||||
assert ctrl.selected is False
|
||||
|
||||
def test_select_stage(self):
|
||||
"""select_stage sets selection."""
|
||||
panel = UIPanel()
|
||||
stage1 = MockStage("noise")
|
||||
stage2 = MockStage("fade")
|
||||
panel.register_stage(stage1)
|
||||
panel.register_stage(stage2)
|
||||
panel.select_stage("fade")
|
||||
assert panel.selected_stage == "fade"
|
||||
assert panel.stages["fade"].selected is True
|
||||
assert panel.stages["noise"].selected is False
|
||||
|
||||
def test_toggle_stage(self):
|
||||
"""toggle_stage flips enabled state."""
|
||||
panel = UIPanel()
|
||||
stage = MockStage("glitch")
|
||||
panel.register_stage(stage, enabled=True)
|
||||
result = panel.toggle_stage("glitch")
|
||||
assert result is False
|
||||
assert panel.stages["glitch"].enabled is False
|
||||
result = panel.toggle_stage("glitch")
|
||||
assert result is True
|
||||
|
||||
def test_get_enabled_stages(self):
|
||||
"""get_enabled_stages returns only enabled stage names."""
|
||||
panel = UIPanel()
|
||||
panel.register_stage(MockStage("noise"), enabled=True)
|
||||
panel.register_stage(MockStage("fade"), enabled=False)
|
||||
panel.register_stage(MockStage("glitch"), enabled=True)
|
||||
enabled = panel.get_enabled_stages()
|
||||
assert set(enabled) == {"noise", "glitch"}
|
||||
|
||||
def test_scroll_stages(self):
|
||||
"""scroll_stages moves the view."""
|
||||
panel = UIPanel(UIConfig(stage_list_height=3))
|
||||
for i in range(10):
|
||||
panel.register_stage(MockStage(f"stage{i}"))
|
||||
assert panel.scroll_offset == 0
|
||||
panel.scroll_stages(1)
|
||||
assert panel.scroll_offset == 1
|
||||
panel.scroll_stages(-1)
|
||||
assert panel.scroll_offset == 0
|
||||
# Clamp at max
|
||||
panel.scroll_stages(100)
|
||||
assert panel.scroll_offset == 7 # 10 - 3 = 7
|
||||
|
||||
def test_render_produces_lines(self):
|
||||
"""render produces list of strings of correct width."""
|
||||
panel = UIPanel(UIConfig(panel_width=20))
|
||||
panel.register_stage(MockStage("noise"), enabled=True)
|
||||
panel.register_stage(MockStage("fade"), enabled=False)
|
||||
panel.select_stage("noise")
|
||||
lines = panel.render(80, 24)
|
||||
# All lines should be exactly panel_width chars (20)
|
||||
for line in lines:
|
||||
assert len(line) == 20
|
||||
# Should have header, stage rows, separator, params area, footer
|
||||
assert len(lines) >= 5
|
||||
|
||||
def test_process_key_event_space_toggles_stage(self):
|
||||
"""process_key_event with space toggles UI panel visibility."""
|
||||
panel = UIPanel()
|
||||
stage = MockStage("glitch")
|
||||
panel.register_stage(stage, enabled=True)
|
||||
panel.select_stage("glitch")
|
||||
# Space should now toggle UI panel visibility, not stage
|
||||
assert panel._show_panel is True
|
||||
handled = panel.process_key_event(" ")
|
||||
assert handled is True
|
||||
assert panel._show_panel is False
|
||||
# Pressing space again should show panel
|
||||
handled = panel.process_key_event(" ")
|
||||
assert panel._show_panel is True
|
||||
|
||||
def test_process_key_event_space_does_not_toggle_in_picker(self):
|
||||
"""Space should not toggle UI panel when preset picker is active."""
|
||||
panel = UIPanel()
|
||||
panel._show_panel = True
|
||||
panel._show_preset_picker = True
|
||||
handled = panel.process_key_event(" ")
|
||||
assert handled is False # Not handled when picker active
|
||||
assert panel._show_panel is True # Unchanged
|
||||
|
||||
def test_process_key_event_s_selects_next(self):
|
||||
"""process_key_event with s cycles selection."""
|
||||
panel = UIPanel()
|
||||
panel.register_stage(MockStage("noise"))
|
||||
panel.register_stage(MockStage("fade"))
|
||||
panel.register_stage(MockStage("glitch"))
|
||||
panel.select_stage("noise")
|
||||
handled = panel.process_key_event("s")
|
||||
assert handled is True
|
||||
assert panel.selected_stage == "fade"
|
||||
|
||||
def test_process_key_event_hjkl_navigation(self):
|
||||
"""process_key_event with HJKL keys."""
|
||||
panel = UIPanel()
|
||||
stage = MockStage("noise")
|
||||
panel.register_stage(stage)
|
||||
panel.select_stage("noise")
|
||||
|
||||
# J or Down should scroll or adjust param
|
||||
assert panel.scroll_stages(1) is None # Just test it doesn't error
|
||||
# H or Left should adjust param (when param selected)
|
||||
panel.selected_stage = "noise"
|
||||
panel._focused_param = "intensity"
|
||||
panel.stages["noise"].params["intensity"] = 0.5
|
||||
|
||||
# Left/H should decrease
|
||||
handled = panel.process_key_event("h")
|
||||
assert handled is True
|
||||
# L or Right should increase
|
||||
handled = panel.process_key_event("l")
|
||||
assert handled is True
|
||||
|
||||
# K should scroll up
|
||||
panel.selected_stage = None
|
||||
handled = panel.process_key_event("k")
|
||||
assert handled is True
|
||||
|
||||
def test_set_event_callback(self):
|
||||
"""set_event_callback registers callback."""
|
||||
panel = UIPanel()
|
||||
called = []
|
||||
|
||||
def callback(stage_name, enabled):
|
||||
called.append((stage_name, enabled))
|
||||
|
||||
panel.set_event_callback("stage_toggled", callback)
|
||||
panel.toggle_stage("test") # No stage, won't trigger
|
||||
# Simulate toggle through event
|
||||
panel._emit_event("stage_toggled", stage_name="noise", enabled=False)
|
||||
assert called == [("noise", False)]
|
||||
|
||||
def test_register_stage_returns_control(self):
|
||||
"""register_stage should return the StageControl instance."""
|
||||
panel = UIPanel()
|
||||
stage = MockStage("noise_effect")
|
||||
control = panel.register_stage(stage, enabled=True)
|
||||
assert control is not None
|
||||
assert isinstance(control, StageControl)
|
||||
assert control.name == "noise_effect"
|
||||
assert control.enabled is True
|
||||
252
tests/test_viewport_filter_performance.py
Normal file
252
tests/test_viewport_filter_performance.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""Integration tests for ViewportFilterStage with realistic data volumes.
|
||||
|
||||
These tests verify that the ViewportFilterStage effectively reduces the number
|
||||
of items processed by FontStage, preventing the 10+ second hangs observed with
|
||||
large headline sources.
|
||||
"""
|
||||
|
||||
from engine.data_sources.sources import SourceItem
|
||||
from engine.pipeline.adapters import ViewportFilterStage
|
||||
from engine.pipeline.core import PipelineContext
|
||||
|
||||
|
||||
class MockParams:
|
||||
"""Mock parameters object for testing."""
|
||||
|
||||
def __init__(self, viewport_width: int = 80, viewport_height: int = 24):
|
||||
self.viewport_width = viewport_width
|
||||
self.viewport_height = viewport_height
|
||||
|
||||
|
||||
class TestViewportFilterStage:
|
||||
"""Test ViewportFilterStage filtering behavior."""
|
||||
|
||||
def test_filter_stage_exists(self):
|
||||
"""Verify ViewportFilterStage can be instantiated."""
|
||||
stage = ViewportFilterStage()
|
||||
assert stage is not None
|
||||
assert stage.name == "viewport-filter"
|
||||
|
||||
def test_filter_stage_properties(self):
|
||||
"""Verify ViewportFilterStage has correct type properties."""
|
||||
stage = ViewportFilterStage()
|
||||
from engine.pipeline.core import DataType
|
||||
|
||||
assert DataType.SOURCE_ITEMS in stage.inlet_types
|
||||
assert DataType.SOURCE_ITEMS in stage.outlet_types
|
||||
|
||||
def test_filter_large_item_count_to_viewport(self):
|
||||
"""Test filtering 1438 items (like real headlines) to viewport size."""
|
||||
# Create 1438 test items (matching real headline source)
|
||||
test_items = [
|
||||
SourceItem(f"Headline {i}", f"source-{i % 5}", str(i)) for i in range(1438)
|
||||
]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_width=80, viewport_height=24)
|
||||
|
||||
# Filter items
|
||||
filtered = stage.process(test_items, ctx)
|
||||
|
||||
# Verify filtering reduced item count significantly
|
||||
assert len(filtered) < len(test_items)
|
||||
assert len(filtered) <= 5 # 24 height / 6 lines per item + 1
|
||||
assert len(filtered) > 0 # Must return at least 1 item
|
||||
|
||||
def test_filter_respects_viewport_height(self):
|
||||
"""Test that filter respects different viewport heights."""
|
||||
test_items = [SourceItem(f"Headline {i}", "source", str(i)) for i in range(100)]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
|
||||
# Test with different viewport heights
|
||||
for height in [12, 24, 48]:
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_height=height)
|
||||
|
||||
filtered = stage.process(test_items, ctx)
|
||||
expected_max = max(1, height // 6 + 1)
|
||||
|
||||
assert len(filtered) <= expected_max
|
||||
assert len(filtered) > 0
|
||||
|
||||
def test_filter_handles_empty_list(self):
|
||||
"""Test filter handles empty input gracefully."""
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams()
|
||||
|
||||
result = stage.process([], ctx)
|
||||
|
||||
assert result == []
|
||||
|
||||
def test_filter_handles_none(self):
|
||||
"""Test filter handles None input gracefully."""
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams()
|
||||
|
||||
result = stage.process(None, ctx)
|
||||
|
||||
assert result is None
|
||||
|
||||
def test_filter_performance_improvement(self):
|
||||
"""Verify significant performance improvement (288x reduction)."""
|
||||
# With 1438 items and 24-line viewport:
|
||||
# - Without filter: FontStage renders all 1438 items
|
||||
# - With filter: FontStage renders only ~5 items
|
||||
# - Improvement: 1438 / 3 = ~479x fewer items to render
|
||||
# (layout-based filtering is more precise than old estimate)
|
||||
|
||||
test_items = [
|
||||
SourceItem(f"Headline {i}", "source", str(i)) for i in range(1438)
|
||||
]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_height=24)
|
||||
|
||||
filtered = stage.process(test_items, ctx)
|
||||
improvement_factor = len(test_items) / len(filtered)
|
||||
|
||||
# Verify we get significant improvement (360x with 4 items vs 1438)
|
||||
assert improvement_factor > 300
|
||||
assert 300 < improvement_factor < 500
|
||||
|
||||
|
||||
class TestViewportFilterIntegration:
|
||||
"""Test ViewportFilterStage in pipeline context."""
|
||||
|
||||
def test_filter_output_is_source_items(self):
|
||||
"""Verify filter output can be consumed by FontStage."""
|
||||
from engine.pipeline.adapters import FontStage
|
||||
|
||||
test_items = [
|
||||
SourceItem("Test Headline", "test-source", "123") for _ in range(10)
|
||||
]
|
||||
|
||||
filter_stage = ViewportFilterStage()
|
||||
font_stage = FontStage()
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams()
|
||||
|
||||
# Filter items
|
||||
filtered = filter_stage.process(test_items, ctx)
|
||||
|
||||
# Verify filtered output is compatible with FontStage
|
||||
assert isinstance(filtered, list)
|
||||
assert all(isinstance(item, SourceItem) for item in filtered)
|
||||
|
||||
# FontStage should accept the filtered items
|
||||
# (This would throw if types were incompatible)
|
||||
result = font_stage.process(filtered, ctx)
|
||||
assert result is not None
|
||||
|
||||
def test_filter_preserves_item_order(self):
|
||||
"""Verify filter preserves order of first N items."""
|
||||
test_items = [SourceItem(f"Headline {i}", "source", str(i)) for i in range(20)]
|
||||
|
||||
stage = ViewportFilterStage()
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_height=24)
|
||||
|
||||
filtered = stage.process(test_items, ctx)
|
||||
|
||||
# Verify we kept the first N items in order
|
||||
for i, item in enumerate(filtered):
|
||||
assert item.content == f"Headline {i}"
|
||||
|
||||
|
||||
class TestViewportResize:
|
||||
"""Test ViewportFilterStage handles viewport resize correctly."""
|
||||
|
||||
def test_layout_recomputes_on_width_change(self):
|
||||
"""Test that layout is recomputed when viewport_width changes."""
|
||||
stage = ViewportFilterStage()
|
||||
# Use long headlines that will wrap differently at different widths
|
||||
items = [
|
||||
SourceItem(
|
||||
f"This is a very long headline number {i} that will definitely wrap at narrow widths",
|
||||
"test",
|
||||
str(i),
|
||||
)
|
||||
for i in range(50)
|
||||
]
|
||||
|
||||
# Initial render at 80 cols
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_width=80, viewport_height=24)
|
||||
ctx.set("camera_y", 0)
|
||||
|
||||
stage.process(items, ctx)
|
||||
cached_layout_80 = stage._layout.copy()
|
||||
|
||||
# Resize to 40 cols - layout should recompute
|
||||
ctx.params.viewport_width = 40
|
||||
stage.process(items, ctx)
|
||||
cached_layout_40 = stage._layout.copy()
|
||||
|
||||
# With narrower viewport, items wrap to more lines
|
||||
# So the cumulative heights should be different
|
||||
assert cached_layout_40 != cached_layout_80, (
|
||||
"Layout should recompute when viewport_width changes"
|
||||
)
|
||||
|
||||
def test_layout_recomputes_on_height_change(self):
|
||||
"""Test that visible items change when viewport_height changes."""
|
||||
stage = ViewportFilterStage()
|
||||
items = [SourceItem(f"Headline {i}", "test", str(i)) for i in range(100)]
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.set("camera_y", 0)
|
||||
|
||||
# Small viewport - fewer items visible
|
||||
ctx.params = MockParams(viewport_width=80, viewport_height=12)
|
||||
result_small = stage.process(items, ctx)
|
||||
|
||||
# Larger viewport - more items visible
|
||||
ctx.params.viewport_height = 48
|
||||
result_large = stage.process(items, ctx)
|
||||
|
||||
# With larger viewport, more items should be visible
|
||||
assert len(result_large) >= len(result_small)
|
||||
|
||||
def test_camera_y_propagates_to_filter(self):
|
||||
"""Test that camera_y is read from context."""
|
||||
stage = ViewportFilterStage()
|
||||
items = [SourceItem(f"Headline {i}", "test", str(i)) for i in range(100)]
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_width=80, viewport_height=24)
|
||||
|
||||
# Camera at y=0
|
||||
ctx.set("camera_y", 0)
|
||||
result_at_0 = stage.process(items, ctx)
|
||||
|
||||
# Camera at y=100
|
||||
ctx.set("camera_y", 100)
|
||||
result_at_100 = stage.process(items, ctx)
|
||||
|
||||
# With different camera positions, different items should be visible
|
||||
# (unless items are very short)
|
||||
first_item_at_0 = result_at_0[0].content if result_at_0 else None
|
||||
first_item_at_100 = result_at_100[0].content if result_at_100 else None
|
||||
|
||||
# The items at different positions should be different
|
||||
assert first_item_at_0 != first_item_at_100 or first_item_at_0 is None
|
||||
|
||||
def test_resize_handles_edge_case_small_width(self):
|
||||
"""Test that very narrow viewport doesn't crash."""
|
||||
stage = ViewportFilterStage()
|
||||
items = [SourceItem("Short", "test", "1")]
|
||||
|
||||
ctx = PipelineContext()
|
||||
ctx.params = MockParams(viewport_width=10, viewport_height=5)
|
||||
ctx.set("camera_y", 0)
|
||||
|
||||
# Should not crash with very narrow viewport
|
||||
result = stage.process(items, ctx)
|
||||
assert result is not None
|
||||
assert len(result) > 0
|
||||
31
tests/test_vis_offset.py
Normal file
31
tests/test_vis_offset.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from engine.effects.legacy import vis_offset, vis_trunc
|
||||
|
||||
|
||||
def test_vis_offset_no_change():
|
||||
"""vis_offset with offset 0 returns original."""
|
||||
result = vis_offset("hello", 0)
|
||||
assert result == "hello"
|
||||
|
||||
|
||||
def test_vis_offset_trims_start():
|
||||
"""vis_offset skips first N characters."""
|
||||
result = vis_offset("hello world", 6)
|
||||
assert result == "world"
|
||||
|
||||
|
||||
def test_vis_offset_handles_ansi():
|
||||
"""vis_offset handles ANSI codes correctly."""
|
||||
result = vis_offset("\033[31mhello\033[0m", 3)
|
||||
assert result == "lo\x1b[0m" or "lo" in result
|
||||
|
||||
|
||||
def test_vis_offset_greater_than_length():
|
||||
"""vis_offset with offset > length returns empty-ish."""
|
||||
result = vis_offset("hi", 10)
|
||||
assert result == ""
|
||||
|
||||
|
||||
def test_vis_trunc_still_works():
|
||||
"""Ensure vis_trunc still works after changes."""
|
||||
result = vis_trunc("hello world", 5)
|
||||
assert result == "hello"
|
||||
395
tests/test_websocket.py
Normal file
395
tests/test_websocket.py
Normal file
@@ -0,0 +1,395 @@
|
||||
"""
|
||||
Tests for engine.display.backends.websocket module.
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from engine.display.backends.websocket import WebSocketDisplay
|
||||
|
||||
|
||||
class TestWebSocketDisplayImport:
|
||||
"""Test that websocket module can be imported."""
|
||||
|
||||
def test_import_does_not_error(self):
|
||||
"""Module imports without error."""
|
||||
from engine.display import backends
|
||||
|
||||
assert backends is not None
|
||||
|
||||
|
||||
class TestWebSocketDisplayInit:
|
||||
"""Tests for WebSocketDisplay initialization."""
|
||||
|
||||
def test_default_init(self):
|
||||
"""Default initialization sets correct defaults."""
|
||||
with patch("engine.display.backends.websocket.websockets", None):
|
||||
display = WebSocketDisplay()
|
||||
assert display.host == "0.0.0.0"
|
||||
assert display.port == 8765
|
||||
assert display.http_port == 8766
|
||||
assert display.width == 80
|
||||
assert display.height == 24
|
||||
|
||||
def test_custom_init(self):
|
||||
"""Custom initialization uses provided values."""
|
||||
with patch("engine.display.backends.websocket.websockets", None):
|
||||
display = WebSocketDisplay(host="localhost", port=9000, http_port=9001)
|
||||
assert display.host == "localhost"
|
||||
assert display.port == 9000
|
||||
assert display.http_port == 9001
|
||||
|
||||
def test_is_available_when_websockets_present(self):
|
||||
"""is_available returns True when websockets is available."""
|
||||
pytest.importorskip("websockets")
|
||||
display = WebSocketDisplay()
|
||||
assert display.is_available() is True
|
||||
|
||||
@pytest.mark.skipif(
|
||||
pytest.importorskip("websockets") is not None, reason="websockets is available"
|
||||
)
|
||||
def test_is_available_when_websockets_missing(self):
|
||||
"""is_available returns False when websockets is not available."""
|
||||
display = WebSocketDisplay()
|
||||
assert display.is_available() is False
|
||||
|
||||
|
||||
class TestWebSocketDisplayProtocol:
|
||||
"""Test that WebSocketDisplay satisfies Display protocol."""
|
||||
|
||||
def test_websocket_display_is_display(self):
|
||||
"""WebSocketDisplay satisfies Display protocol."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
assert hasattr(display, "init")
|
||||
assert hasattr(display, "show")
|
||||
assert hasattr(display, "clear")
|
||||
assert hasattr(display, "cleanup")
|
||||
|
||||
|
||||
class TestWebSocketDisplayMethods:
|
||||
"""Tests for WebSocketDisplay methods."""
|
||||
|
||||
def test_init_stores_dimensions(self):
|
||||
"""init stores terminal dimensions."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
display.init(100, 40)
|
||||
assert display.width == 100
|
||||
assert display.height == 40
|
||||
|
||||
@pytest.mark.skip(reason="port binding conflict in CI environment")
|
||||
def test_client_count_initially_zero(self):
|
||||
"""client_count returns 0 when no clients connected."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
assert display.client_count() == 0
|
||||
|
||||
def test_get_ws_port(self):
|
||||
"""get_ws_port returns configured port."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay(port=9000)
|
||||
assert display.get_ws_port() == 9000
|
||||
|
||||
def test_get_http_port(self):
|
||||
"""get_http_port returns configured port."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay(http_port=9001)
|
||||
assert display.get_http_port() == 9001
|
||||
|
||||
def test_frame_delay_defaults_to_zero(self):
|
||||
"""get_frame_delay returns 0 by default."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
assert display.get_frame_delay() == 0.0
|
||||
|
||||
def test_set_frame_delay(self):
|
||||
"""set_frame_delay stores the value."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
display.set_frame_delay(0.05)
|
||||
assert display.get_frame_delay() == 0.05
|
||||
|
||||
|
||||
class TestWebSocketDisplayCallbacks:
|
||||
"""Tests for WebSocketDisplay callback methods."""
|
||||
|
||||
def test_set_client_connected_callback(self):
|
||||
"""set_client_connected_callback stores callback."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
callback = MagicMock()
|
||||
display.set_client_connected_callback(callback)
|
||||
assert display._client_connected_callback is callback
|
||||
|
||||
def test_set_client_disconnected_callback(self):
|
||||
"""set_client_disconnected_callback stores callback."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
callback = MagicMock()
|
||||
display.set_client_disconnected_callback(callback)
|
||||
assert display._client_disconnected_callback is callback
|
||||
|
||||
|
||||
class TestWebSocketDisplayUnavailable:
|
||||
"""Tests when WebSocket support is unavailable."""
|
||||
|
||||
@pytest.mark.skipif(
|
||||
pytest.importorskip("websockets") is not None, reason="websockets is available"
|
||||
)
|
||||
def test_start_server_noop_when_unavailable(self):
|
||||
"""start_server does nothing when websockets unavailable."""
|
||||
display = WebSocketDisplay()
|
||||
display.start_server()
|
||||
assert display._server_thread is None
|
||||
|
||||
@pytest.mark.skipif(
|
||||
pytest.importorskip("websockets") is not None, reason="websockets is available"
|
||||
)
|
||||
def test_start_http_server_noop_when_unavailable(self):
|
||||
"""start_http_server does nothing when websockets unavailable."""
|
||||
display = WebSocketDisplay()
|
||||
display.start_http_server()
|
||||
assert display._http_thread is None
|
||||
|
||||
@pytest.mark.skipif(
|
||||
pytest.importorskip("websockets") is not None, reason="websockets is available"
|
||||
)
|
||||
def test_show_noops_when_unavailable(self):
|
||||
"""show does nothing when websockets unavailable."""
|
||||
display = WebSocketDisplay()
|
||||
display.show(["line1", "line2"])
|
||||
|
||||
|
||||
class TestWebSocketUIPanelIntegration:
|
||||
"""Tests for WebSocket-UIPanel integration for remote control."""
|
||||
|
||||
def test_set_controller_stores_controller(self):
|
||||
"""set_controller stores the controller reference."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
mock_controller = MagicMock()
|
||||
display.set_controller(mock_controller)
|
||||
assert display._controller is mock_controller
|
||||
|
||||
def test_set_command_callback_stores_callback(self):
|
||||
"""set_command_callback stores the callback."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
callback = MagicMock()
|
||||
display.set_command_callback(callback)
|
||||
assert display._command_callback is callback
|
||||
|
||||
def test_get_state_snapshot_returns_none_without_controller(self):
|
||||
"""_get_state_snapshot returns None when no controller is set."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
assert display._get_state_snapshot() is None
|
||||
|
||||
def test_get_state_snapshot_returns_controller_state(self):
|
||||
"""_get_state_snapshot returns state from controller."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
|
||||
# Create mock controller with expected attributes
|
||||
mock_controller = MagicMock()
|
||||
mock_controller.stages = {
|
||||
"test_stage": MagicMock(
|
||||
enabled=True, params={"intensity": 0.5}, selected=False
|
||||
)
|
||||
}
|
||||
mock_controller._current_preset = "demo"
|
||||
mock_controller._presets = ["demo", "test"]
|
||||
mock_controller.selected_stage = "test_stage"
|
||||
|
||||
display.set_controller(mock_controller)
|
||||
state = display._get_state_snapshot()
|
||||
|
||||
assert state is not None
|
||||
assert "stages" in state
|
||||
assert "test_stage" in state["stages"]
|
||||
assert state["stages"]["test_stage"]["enabled"] is True
|
||||
assert state["stages"]["test_stage"]["params"] == {"intensity": 0.5}
|
||||
assert state["preset"] == "demo"
|
||||
assert state["presets"] == ["demo", "test"]
|
||||
assert state["selected_stage"] == "test_stage"
|
||||
|
||||
def test_get_state_snapshot_handles_missing_attributes(self):
|
||||
"""_get_state_snapshot handles controller without all attributes."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
|
||||
# Create mock controller without stages attribute using spec
|
||||
# This prevents MagicMock from auto-creating the attribute
|
||||
mock_controller = MagicMock(spec=[]) # Empty spec means no attributes
|
||||
|
||||
display.set_controller(mock_controller)
|
||||
state = display._get_state_snapshot()
|
||||
|
||||
assert state == {}
|
||||
|
||||
def test_broadcast_state_sends_to_clients(self):
|
||||
"""broadcast_state sends state update to all connected clients."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
|
||||
# Mock client with send method
|
||||
mock_client = MagicMock()
|
||||
mock_client.send = MagicMock()
|
||||
display._clients.add(mock_client)
|
||||
|
||||
test_state = {"test": "state"}
|
||||
display.broadcast_state(test_state)
|
||||
|
||||
# Verify send was called with JSON containing state
|
||||
mock_client.send.assert_called_once()
|
||||
call_args = mock_client.send.call_args[0][0]
|
||||
assert '"type": "state"' in call_args
|
||||
assert '"test"' in call_args
|
||||
|
||||
def test_broadcast_state_noop_when_no_clients(self):
|
||||
"""broadcast_state does nothing when no clients connected."""
|
||||
with patch("engine.display.backends.websocket.websockets", MagicMock()):
|
||||
display = WebSocketDisplay()
|
||||
display._clients.clear()
|
||||
|
||||
# Should not raise error
|
||||
display.broadcast_state({"test": "state"})
|
||||
|
||||
|
||||
class TestWebSocketHTTPServerPath:
|
||||
"""Tests for WebSocket HTTP server client directory path calculation."""
|
||||
|
||||
def test_client_dir_path_calculation(self):
|
||||
"""Client directory path is correctly calculated from websocket.py location."""
|
||||
import os
|
||||
|
||||
# Use the actual websocket.py file location, not the test file
|
||||
websocket_module = __import__(
|
||||
"engine.display.backends.websocket", fromlist=["WebSocketDisplay"]
|
||||
)
|
||||
websocket_file = websocket_module.__file__
|
||||
parts = websocket_file.split(os.sep)
|
||||
|
||||
if "engine" in parts:
|
||||
engine_idx = parts.index("engine")
|
||||
project_root = os.sep.join(parts[:engine_idx])
|
||||
client_dir = os.path.join(project_root, "client")
|
||||
else:
|
||||
# Fallback calculation (shouldn't happen in normal test runs)
|
||||
client_dir = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(websocket_file)))
|
||||
),
|
||||
"client",
|
||||
)
|
||||
|
||||
# Verify the client directory exists and contains expected files
|
||||
assert os.path.exists(client_dir), f"Client directory not found: {client_dir}"
|
||||
assert "index.html" in os.listdir(client_dir), (
|
||||
"index.html not found in client directory"
|
||||
)
|
||||
assert "editor.html" in os.listdir(client_dir), (
|
||||
"editor.html not found in client directory"
|
||||
)
|
||||
|
||||
# Verify the path is correct (should be .../Mainline/client)
|
||||
assert client_dir.endswith("client"), (
|
||||
f"Client dir should end with 'client': {client_dir}"
|
||||
)
|
||||
assert "Mainline" in client_dir, (
|
||||
f"Client dir should contain 'Mainline': {client_dir}"
|
||||
)
|
||||
|
||||
def test_http_server_directory_serves_client_files(self):
|
||||
"""HTTP server directory correctly serves client files."""
|
||||
import os
|
||||
|
||||
# Use the actual websocket.py file location, not the test file
|
||||
websocket_module = __import__(
|
||||
"engine.display.backends.websocket", fromlist=["WebSocketDisplay"]
|
||||
)
|
||||
websocket_file = websocket_module.__file__
|
||||
parts = websocket_file.split(os.sep)
|
||||
|
||||
if "engine" in parts:
|
||||
engine_idx = parts.index("engine")
|
||||
project_root = os.sep.join(parts[:engine_idx])
|
||||
client_dir = os.path.join(project_root, "client")
|
||||
else:
|
||||
client_dir = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(websocket_file)))
|
||||
),
|
||||
"client",
|
||||
)
|
||||
|
||||
# Verify the handler would be able to serve files from this directory
|
||||
# We can't actually instantiate the handler without a valid request,
|
||||
# but we can verify the directory is accessible
|
||||
assert os.access(client_dir, os.R_OK), (
|
||||
f"Client directory not readable: {client_dir}"
|
||||
)
|
||||
|
||||
# Verify key files exist
|
||||
index_path = os.path.join(client_dir, "index.html")
|
||||
editor_path = os.path.join(client_dir, "editor.html")
|
||||
|
||||
assert os.path.exists(index_path), f"index.html not found at: {index_path}"
|
||||
assert os.path.exists(editor_path), f"editor.html not found at: {editor_path}"
|
||||
|
||||
# Verify files are readable
|
||||
assert os.access(index_path, os.R_OK), "index.html not readable"
|
||||
assert os.access(editor_path, os.R_OK), "editor.html not readable"
|
||||
|
||||
def test_old_buggy_path_does_not_find_client_directory(self):
|
||||
"""The old buggy path (3 dirname calls) should NOT find the client directory.
|
||||
|
||||
This test verifies that the old buggy behavior would have failed.
|
||||
The old code used:
|
||||
client_dir = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "client"
|
||||
)
|
||||
|
||||
This would resolve to: .../engine/client (which doesn't exist)
|
||||
Instead of: .../Mainline/client (which does exist)
|
||||
"""
|
||||
import os
|
||||
|
||||
# Use the actual websocket.py file location
|
||||
websocket_module = __import__(
|
||||
"engine.display.backends.websocket", fromlist=["WebSocketDisplay"]
|
||||
)
|
||||
websocket_file = websocket_module.__file__
|
||||
|
||||
# OLD BUGGY CODE: 3 dirname calls
|
||||
old_buggy_client_dir = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(websocket_file))), "client"
|
||||
)
|
||||
|
||||
# This path should NOT exist (it's the buggy path)
|
||||
assert not os.path.exists(old_buggy_client_dir), (
|
||||
f"Old buggy path should not exist: {old_buggy_client_dir}\n"
|
||||
f"If this assertion fails, the bug may have been fixed elsewhere or "
|
||||
f"the test needs updating."
|
||||
)
|
||||
|
||||
# The buggy path should be .../engine/client, not .../Mainline/client
|
||||
assert old_buggy_client_dir.endswith("engine/client"), (
|
||||
f"Old buggy path should end with 'engine/client': {old_buggy_client_dir}"
|
||||
)
|
||||
|
||||
# Verify that going up one more level (4 dirname calls) finds the correct path
|
||||
correct_client_dir = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(websocket_file)))
|
||||
),
|
||||
"client",
|
||||
)
|
||||
assert os.path.exists(correct_client_dir), (
|
||||
f"Correct path should exist: {correct_client_dir}"
|
||||
)
|
||||
assert "index.html" in os.listdir(correct_client_dir), (
|
||||
f"index.html should exist in correct path: {correct_client_dir}"
|
||||
)
|
||||
78
tests/test_websocket_e2e.py
Normal file
78
tests/test_websocket_e2e.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
End-to-end tests for WebSocket display using Playwright.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class TestWebSocketE2E:
|
||||
"""End-to-end tests for WebSocket display with browser."""
|
||||
|
||||
@pytest.mark.e2e
|
||||
def test_websocket_server_starts(self):
|
||||
"""Test that WebSocket server starts and serves HTTP."""
|
||||
import threading
|
||||
|
||||
from engine.display.backends.websocket import WebSocketDisplay
|
||||
|
||||
display = WebSocketDisplay(host="127.0.0.1", port=18765)
|
||||
|
||||
server_thread = threading.Thread(target=display.start_http_server)
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
import urllib.request
|
||||
|
||||
response = urllib.request.urlopen("http://127.0.0.1:18765", timeout=5)
|
||||
assert response.status == 200
|
||||
content = response.read().decode("utf-8")
|
||||
assert len(content) > 0
|
||||
finally:
|
||||
display.cleanup()
|
||||
time.sleep(0.5)
|
||||
|
||||
@pytest.mark.e2e
|
||||
@pytest.mark.skipif(
|
||||
not pytest.importorskip("playwright", reason="playwright not installed"),
|
||||
reason="playwright not installed",
|
||||
)
|
||||
def test_websocket_browser_connection(self):
|
||||
"""Test WebSocket connection with actual browser."""
|
||||
import threading
|
||||
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
from engine.display.backends.websocket import WebSocketDisplay
|
||||
|
||||
display = WebSocketDisplay(host="127.0.0.1", port=18767)
|
||||
|
||||
server_thread = threading.Thread(target=display.start_server)
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
http_thread = threading.Thread(target=display.start_http_server)
|
||||
http_thread.daemon = True
|
||||
http_thread.start()
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch(headless=True)
|
||||
page = browser.new_page()
|
||||
|
||||
page.goto("http://127.0.0.1:18767")
|
||||
time.sleep(0.5)
|
||||
|
||||
title = page.title()
|
||||
assert len(title) >= 0
|
||||
|
||||
browser.close()
|
||||
finally:
|
||||
display.cleanup()
|
||||
time.sleep(0.5)
|
||||
Reference in New Issue
Block a user