diff --git a/engine/app/main.py b/engine/app/main.py
index 183a245..b38829e 100644
--- a/engine/app/main.py
+++ b/engine/app/main.py
@@ -9,20 +9,24 @@ from engine import config
from engine.display import BorderMode, DisplayRegistry
from engine.effects import get_registry
from engine.fetch import fetch_all, fetch_all_fast, fetch_poetry, load_cache, save_cache
-from engine.pipeline import (
+
+# Import from sideline (the framework)
+from sideline.pipeline import (
Pipeline,
PipelineConfig,
PipelineContext,
- list_presets,
+ StageRegistry,
)
-from engine.pipeline.adapters import (
+from sideline.pipeline.adapters import (
CameraStage,
DataSourceStage,
EffectPluginStage,
create_stage_from_display,
create_stage_from_effect,
)
-from engine.pipeline.params import PipelineParams
+from sideline.pipeline.params import PipelineParams
+
+# Import from engine (Mainline-specific)
from engine.pipeline.ui import UIConfig, UIPanel
from engine.pipeline.validation import validate_pipeline_config
@@ -34,11 +38,39 @@ except ImportError:
from .pipeline_runner import run_pipeline_mode
+def _register_mainline_stages():
+ """Register Mainline-specific stage components with Sideline.
+
+ This should be called early in application startup to ensure
+ all Mainline stages are available for pipeline construction.
+ """
+ try:
+ from sideline.pipeline import StageRegistry
+
+ # Method 1: Explicit registration via engine.plugins
+ try:
+ from engine.plugins import register_stages
+
+ register_stages(StageRegistry)
+ except ImportError as e:
+ print(f"Warning: Failed to register Mainline stages: {e}")
+
+ # Method 2: Register via plugin module (for entry point discovery)
+ StageRegistry.register_plugin_module("engine.plugins")
+
+ print("Mainline stage components registered successfully")
+ except Exception as e:
+ print(f"Warning: Stage registration failed: {e}")
+
+
def main():
"""Main entry point - all modes now use presets or CLI construction."""
+ # Register Mainline stages with Sideline
+ _register_mainline_stages()
+
if config.PIPELINE_DIAGRAM:
try:
- from engine.pipeline import generate_pipeline_diagram
+ from sideline.pipeline import generate_pipeline_diagram
except ImportError:
print("Error: pipeline diagram not available")
return
diff --git a/engine/pipeline/__init__.py b/engine/pipeline/__init__.py
index ff03c3f..ff740d0 100644
--- a/engine/pipeline/__init__.py
+++ b/engine/pipeline/__init__.py
@@ -1,50 +1,32 @@
"""
-Unified Pipeline Architecture.
+Unified Pipeline Architecture (Compatibility Shim).
-This module provides a clean, dependency-managed pipeline system:
-- Stage: Base class for all pipeline components
-- Pipeline: DAG-based execution orchestrator
-- PipelineParams: Runtime configuration for animation
-- PipelinePreset: Pre-configured pipeline configurations
-- StageRegistry: Unified registration for all stage types
+This module re-exports the pipeline architecture from Sideline for backward
+compatibility with existing Mainline code. New code should import directly
+from sideline.pipeline.
-The pipeline architecture supports:
-- Sources: Data providers (headlines, poetry, pipeline viz)
-- Effects: Post-processors (noise, fade, glitch, hud)
-- Displays: Output backends (terminal, pygame, websocket)
-- Cameras: Viewport controllers (vertical, horizontal, omni)
-
-Example:
- from engine.pipeline import Pipeline, PipelineConfig, StageRegistry
-
- pipeline = Pipeline(PipelineConfig(source="headlines", display="terminal"))
- pipeline.add_stage("source", StageRegistry.create("source", "headlines"))
- pipeline.add_stage("display", StageRegistry.create("display", "terminal"))
- pipeline.build().initialize()
-
- result = pipeline.execute(initial_data)
+Note: This module is deprecated and will be removed in future versions.
"""
-from engine.pipeline.controller import (
+# Re-export from sideline for backward compatibility
+from sideline.pipeline import (
Pipeline,
PipelineConfig,
- PipelineRunner,
- create_default_pipeline,
- create_pipeline_from_params,
-)
-from engine.pipeline.core import (
PipelineContext,
Stage,
StageConfig,
StageError,
StageResult,
-)
-from engine.pipeline.params import (
- DEFAULT_HEADLINE_PARAMS,
- DEFAULT_PIPELINE_PARAMS,
- DEFAULT_PYGAME_PARAMS,
PipelineParams,
+ StageRegistry,
+ discover_stages,
+ register_camera,
+ register_display,
+ register_effect,
+ register_source,
)
+
+# Re-export from engine.pipeline.presets (Mainline-specific)
from engine.pipeline.presets import (
DEMO_PRESET,
FIREHOSE_PRESET,
@@ -57,34 +39,40 @@ from engine.pipeline.presets import (
get_preset,
list_presets,
)
-from engine.pipeline.registry import (
- StageRegistry,
- discover_stages,
- register_camera,
- register_display,
- register_effect,
- register_source,
+
+# Re-export from sideline.pipeline.params
+from sideline.pipeline.params import (
+ DEFAULT_HEADLINE_PARAMS,
+ DEFAULT_PIPELINE_PARAMS,
+ DEFAULT_PYGAME_PARAMS,
+)
+
+# Re-export additional functions from sideline.pipeline
+from sideline.pipeline import (
+ create_default_pipeline,
+ create_pipeline_from_params,
+ PipelineRunner,
)
__all__ = [
- # Core
+ # Core (from sideline)
"Stage",
"StageConfig",
"StageError",
"StageResult",
"PipelineContext",
- # Controller
+ # Controller (from sideline)
"Pipeline",
"PipelineConfig",
"PipelineRunner",
"create_default_pipeline",
"create_pipeline_from_params",
- # Params
+ # Params (from sideline)
"PipelineParams",
"DEFAULT_HEADLINE_PARAMS",
"DEFAULT_PIPELINE_PARAMS",
"DEFAULT_PYGAME_PARAMS",
- # Presets
+ # Presets (from engine)
"PipelinePreset",
"PRESETS",
"DEMO_PRESET",
@@ -96,7 +84,7 @@ __all__ = [
"get_preset",
"list_presets",
"create_preset_from_params",
- # Registry
+ # Registry (from sideline)
"StageRegistry",
"discover_stages",
"register_source",
diff --git a/engine/plugins.py b/engine/plugins.py
new file mode 100644
index 0000000..b7978b5
--- /dev/null
+++ b/engine/plugins.py
@@ -0,0 +1,97 @@
+"""
+Mainline stage component registration.
+
+This module registers all Mainline-specific stage components with the Sideline framework.
+It should be called during application startup to ensure all components are available.
+
+Terminology:
+- Stage: A pipeline component (source, effect, display, camera, overlay)
+- Plugin: A distributable package containing one or more stages
+- This module registers stage components, not plugins themselves
+"""
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def register_stages(registry):
+ """Register Mainline-specific stage components with the Sideline registry.
+
+ This function is called by Sideline's plugin discovery system.
+
+ Args:
+ registry: StageRegistry instance from Sideline
+ """
+ logger.info("Registering Mainline stage components")
+
+ # Register data sources
+ _register_data_sources(registry)
+
+ # Register effects
+ _register_effects(registry)
+
+ # Register any other Mainline-specific stages
+ _register_other_stages(registry)
+
+
+def _register_data_sources(registry):
+ """Register Mainline data source stages."""
+ try:
+ from engine.data_sources.sources import HeadlinesDataSource, PoetryDataSource
+ from engine.data_sources.pipeline_introspection import (
+ PipelineIntrospectionSource,
+ )
+
+ registry.register("source", HeadlinesDataSource)
+ registry.register("source", PoetryDataSource)
+ registry.register("source", PipelineIntrospectionSource)
+
+ # Register with friendly aliases
+ registry._categories["source"]["headlines"] = HeadlinesDataSource
+ registry._categories["source"]["poetry"] = PoetryDataSource
+ registry._categories["source"]["pipeline-inspect"] = PipelineIntrospectionSource
+
+ logger.info("Registered Mainline data sources")
+ except ImportError as e:
+ logger.warning(f"Failed to register data sources: {e}")
+
+
+def _register_effects(registry):
+ """Register Mainline effect stages."""
+ try:
+ # Register effects
+ from sideline.effects import EffectRegistry
+ from sideline.effects.registry import get_registry
+
+ # Get the global effect registry instance
+ effect_registry = get_registry()
+
+ # Note: EffectRegistry stores effect instances, not classes
+ # For now, skip effect registration since it requires more refactoring
+ logger.info("Effect registration skipped (requires effect refactoring)")
+ except ImportError as e:
+ logger.warning(f"Failed to register effects: {e}")
+
+
+def _register_other_stages(registry):
+ """Register other Mainline-specific stage components."""
+ try:
+ # Register buffer stages
+ from sideline.pipeline.stages.framebuffer import FrameBufferStage
+
+ registry.register("effect", FrameBufferStage)
+ logger.info("Registered Mainline buffer stages")
+ except ImportError as e:
+ logger.warning(f"Failed to register buffer stages: {e}")
+
+
+# Convenience function for explicit registration
+def register_all_stages():
+ """Explicitly register all Mainline stages.
+
+ This can be called directly instead of using plugin discovery.
+ """
+ from sideline.pipeline import StageRegistry
+
+ register_stages(StageRegistry)
diff --git a/sideline/__init__.py b/sideline/__init__.py
new file mode 100644
index 0000000..35068e9
--- /dev/null
+++ b/sideline/__init__.py
@@ -0,0 +1,85 @@
+"""
+Sideline - A modular pipeline framework for real-time terminal visualization.
+
+Sideline provides a Stage-based pipeline architecture with capability-based
+dependency resolution for building real-time visualization applications.
+
+Features:
+- Stage-based pipeline execution with DAG dependency resolution
+- Capability-based dependency injection
+- Display backends (Terminal, WebSocket, Null, etc.)
+- Effect plugin system with param bindings
+- Sensor framework for real-time input
+- Canvas and Camera for 2D rendering
+
+Example:
+ from sideline.pipeline import Pipeline, PipelineConfig, StageRegistry
+
+ pipeline = Pipeline(PipelineConfig(source="custom", display="terminal"))
+ pipeline.add_stage("source", MyDataSourceStage())
+ pipeline.add_stage("display", StageRegistry.create("display", "terminal"))
+ pipeline.build().initialize()
+
+ result = pipeline.execute(initial_data)
+"""
+
+__version__ = "0.1.0"
+
+# Re-export core components for convenience
+from sideline.pipeline import (
+ Pipeline,
+ PipelineConfig,
+ PipelineContext,
+ Stage,
+ StageRegistry,
+)
+
+from sideline.display import Display, DisplayRegistry
+
+from sideline.effects import Effect, EffectPlugin, EffectRegistry
+
+from sideline.plugins import (
+ StagePlugin,
+ Plugin, # Backward compatibility
+ PluginMetadata,
+ SecurityCapability,
+ SecurityManager,
+ VersionConstraint,
+ CompatibilityManager,
+)
+
+from sideline.preset_packs import (
+ PresetPack,
+ PresetPackMetadata,
+ PresetPackManager,
+ PresetPackEncoder,
+)
+
+__all__ = [
+ # Pipeline
+ "Pipeline",
+ "PipelineConfig",
+ "PipelineContext",
+ "Stage",
+ "StageRegistry",
+ # Display
+ "Display",
+ "DisplayRegistry",
+ # Effects
+ "Effect", # Primary class name
+ "EffectPlugin", # Backward compatibility alias
+ "EffectRegistry",
+ # Plugins
+ "StagePlugin",
+ "Plugin", # Backward compatibility alias
+ "PluginMetadata",
+ "SecurityCapability",
+ "SecurityManager",
+ "VersionConstraint",
+ "CompatibilityManager",
+ # Preset Packs
+ "PresetPack",
+ "PresetPackMetadata",
+ "PresetPackManager",
+ "PresetPackEncoder",
+]
diff --git a/sideline/camera.py b/sideline/camera.py
new file mode 100644
index 0000000..93b8c48
--- /dev/null
+++ b/sideline/camera.py
@@ -0,0 +1,473 @@
+"""
+Camera system for viewport scrolling.
+
+Provides abstraction for camera motion in different modes:
+- Vertical: traditional upward scroll
+- Horizontal: left/right movement
+- Omni: combination of both
+- Floating: sinusoidal/bobbing motion
+
+The camera defines a visible viewport into a larger Canvas.
+"""
+
+import math
+from collections.abc import Callable
+from dataclasses import dataclass, field
+from enum import Enum, auto
+
+
+class CameraMode(Enum):
+ FEED = auto() # Single item view (static or rapid cycling)
+ SCROLL = auto() # Smooth vertical scrolling (movie credits style)
+ HORIZONTAL = auto()
+ OMNI = auto()
+ FLOATING = auto()
+ BOUNCE = auto()
+ RADIAL = auto() # Polar coordinates (r, theta) for radial scanning
+
+
+@dataclass
+class CameraViewport:
+ """Represents the visible viewport."""
+
+ x: int
+ y: int
+ width: int
+ height: int
+
+
+@dataclass
+class Camera:
+ """Camera for viewport scrolling.
+
+ The camera defines a visible viewport into a Canvas.
+ It can be smaller than the canvas to allow scrolling,
+ and supports zoom to scale the view.
+
+ Attributes:
+ x: Current horizontal offset (positive = scroll left)
+ y: Current vertical offset (positive = scroll up)
+ mode: Current camera mode
+ speed: Base scroll speed
+ zoom: Zoom factor (1.0 = 100%, 2.0 = 200% zoom out)
+ canvas_width: Width of the canvas being viewed
+ canvas_height: Height of the canvas being viewed
+ custom_update: Optional custom update function
+ """
+
+ x: int = 0
+ y: int = 0
+ mode: CameraMode = CameraMode.FEED
+ speed: float = 1.0
+ zoom: float = 1.0
+ canvas_width: int = 200 # Larger than viewport for scrolling
+ canvas_height: int = 200
+ custom_update: Callable[["Camera", float], None] | None = None
+ _x_float: float = field(default=0.0, repr=False)
+ _y_float: float = field(default=0.0, repr=False)
+ _time: float = field(default=0.0, repr=False)
+
+ @property
+ def w(self) -> int:
+ """Shorthand for viewport_width."""
+ return self.viewport_width
+
+ def set_speed(self, speed: float) -> None:
+ """Set the camera scroll speed dynamically.
+
+ This allows camera speed to be modulated during runtime
+ via PipelineParams or directly.
+
+ Args:
+ speed: New speed value (0.0 = stopped, >0 = movement)
+ """
+ self.speed = max(0.0, speed)
+
+ @property
+ def h(self) -> int:
+ """Shorthand for viewport_height."""
+ return self.viewport_height
+
+ @property
+ def viewport_width(self) -> int:
+ """Get the visible viewport width.
+
+ This is the canvas width divided by zoom.
+ """
+ return max(1, int(self.canvas_width / self.zoom))
+
+ @property
+ def viewport_height(self) -> int:
+ """Get the visible viewport height.
+
+ This is the canvas height divided by zoom.
+ """
+ return max(1, int(self.canvas_height / self.zoom))
+
+ def get_viewport(self, viewport_height: int | None = None) -> CameraViewport:
+ """Get the current viewport bounds.
+
+ Args:
+ viewport_height: Optional viewport height to use instead of camera's viewport_height
+
+ Returns:
+ CameraViewport with position and size (clamped to canvas bounds)
+ """
+ vw = self.viewport_width
+ vh = viewport_height if viewport_height is not None else self.viewport_height
+
+ clamped_x = max(0, min(self.x, self.canvas_width - vw))
+ clamped_y = max(0, min(self.y, self.canvas_height - vh))
+
+ return CameraViewport(
+ x=clamped_x,
+ y=clamped_y,
+ width=vw,
+ height=vh,
+ )
+
+ return CameraViewport(
+ x=clamped_x,
+ y=clamped_y,
+ width=vw,
+ height=vh,
+ )
+
+ def set_zoom(self, zoom: float) -> None:
+ """Set the zoom factor.
+
+ Args:
+ zoom: Zoom factor (1.0 = 100%, 2.0 = zoomed out 2x, 0.5 = zoomed in 2x)
+ """
+ self.zoom = max(0.1, min(10.0, zoom))
+
+ def update(self, dt: float) -> None:
+ """Update camera position based on mode.
+
+ Args:
+ dt: Delta time in seconds
+ """
+ self._time += dt
+
+ if self.custom_update:
+ self.custom_update(self, dt)
+ return
+
+ if self.mode == CameraMode.FEED:
+ self._update_feed(dt)
+ elif self.mode == CameraMode.SCROLL:
+ self._update_scroll(dt)
+ elif self.mode == CameraMode.HORIZONTAL:
+ self._update_horizontal(dt)
+ elif self.mode == CameraMode.OMNI:
+ self._update_omni(dt)
+ elif self.mode == CameraMode.FLOATING:
+ self._update_floating(dt)
+ elif self.mode == CameraMode.BOUNCE:
+ self._update_bounce(dt)
+ elif self.mode == CameraMode.RADIAL:
+ self._update_radial(dt)
+
+ # Bounce mode handles its own bounds checking
+ if self.mode != CameraMode.BOUNCE:
+ self._clamp_to_bounds()
+
+ def _clamp_to_bounds(self) -> None:
+ """Clamp camera position to stay within canvas bounds.
+
+ Only clamps if the viewport is smaller than the canvas.
+ If viewport equals canvas (no scrolling needed), allows any position
+ for backwards compatibility with original behavior.
+ """
+ vw = self.viewport_width
+ vh = self.viewport_height
+
+ # Only clamp if there's room to scroll
+ if vw < self.canvas_width:
+ self.x = max(0, min(self.x, self.canvas_width - vw))
+ if vh < self.canvas_height:
+ self.y = max(0, min(self.y, self.canvas_height - vh))
+
+ def _update_feed(self, dt: float) -> None:
+ """Feed mode: rapid scrolling (1 row per frame at speed=1.0)."""
+ self.y += int(self.speed * dt * 60)
+
+ def _update_scroll(self, dt: float) -> None:
+ """Scroll mode: smooth vertical scrolling with float accumulation."""
+ self._y_float += self.speed * dt * 60
+ self.y = int(self._y_float)
+
+ def _update_horizontal(self, dt: float) -> None:
+ self.x += int(self.speed * dt * 60)
+
+ def _update_omni(self, dt: float) -> None:
+ speed = self.speed * dt * 60
+ self.y += int(speed)
+ self.x += int(speed * 0.5)
+
+ def _update_floating(self, dt: float) -> None:
+ base = self.speed * 30
+ self.y = int(math.sin(self._time * 2) * base)
+ self.x = int(math.cos(self._time * 1.5) * base * 0.5)
+
+ def _update_bounce(self, dt: float) -> None:
+ """Bouncing DVD-style camera that bounces off canvas edges."""
+ vw = self.viewport_width
+ vh = self.viewport_height
+
+ # Initialize direction if not set
+ if not hasattr(self, "_bounce_dx"):
+ self._bounce_dx = 1
+ self._bounce_dy = 1
+
+ # Calculate max positions
+ max_x = max(0, self.canvas_width - vw)
+ max_y = max(0, self.canvas_height - vh)
+
+ # Move
+ move_speed = self.speed * dt * 60
+
+ # Bounce off edges - reverse direction when hitting bounds
+ self.x += int(move_speed * self._bounce_dx)
+ self.y += int(move_speed * self._bounce_dy)
+
+ # Bounce horizontally
+ if self.x <= 0:
+ self.x = 0
+ self._bounce_dx = 1
+ elif self.x >= max_x:
+ self.x = max_x
+ self._bounce_dx = -1
+
+ # Bounce vertically
+ if self.y <= 0:
+ self.y = 0
+ self._bounce_dy = 1
+ elif self.y >= max_y:
+ self.y = max_y
+ self._bounce_dy = -1
+
+ def _update_radial(self, dt: float) -> None:
+ """Radial camera mode: polar coordinate scrolling (r, theta).
+
+ The camera rotates around the center of the canvas while optionally
+ moving outward/inward along rays. This enables:
+ - Radar sweep animations
+ - Pendulum view oscillation
+ - Spiral scanning motion
+
+ Uses polar coordinates internally:
+ - _r_float: radial distance from center (accumulates smoothly)
+ - _theta_float: angle in radians (accumulates smoothly)
+ - Updates x, y based on conversion from polar to Cartesian
+ """
+ # Initialize radial state if needed
+ if not hasattr(self, "_r_float"):
+ self._r_float = 0.0
+ self._theta_float = 0.0
+
+ # Update angular position (rotation around center)
+ # Speed controls rotation rate
+ theta_speed = self.speed * dt * 1.0 # radians per second
+ self._theta_float += theta_speed
+
+ # Update radial position (inward/outward from center)
+ # Can be modulated by external sensor
+ if hasattr(self, "_radial_input"):
+ r_input = self._radial_input
+ else:
+ # Default: slow outward drift
+ r_input = 0.0
+
+ r_speed = self.speed * dt * 20.0 # pixels per second
+ self._r_float += r_input + r_speed * 0.01
+
+ # Clamp radial position to canvas bounds
+ max_r = min(self.canvas_width, self.canvas_height) / 2
+ self._r_float = max(0.0, min(self._r_float, max_r))
+
+ # Convert polar to Cartesian, centered at canvas center
+ center_x = self.canvas_width / 2
+ center_y = self.canvas_height / 2
+
+ self.x = int(center_x + self._r_float * math.cos(self._theta_float))
+ self.y = int(center_y + self._r_float * math.sin(self._theta_float))
+
+ # Clamp to canvas bounds
+ self._clamp_to_bounds()
+
+ def set_radial_input(self, value: float) -> None:
+ """Set radial input for sensor-driven radius modulation.
+
+ Args:
+ value: Sensor value (0-1) that modulates radial distance
+ """
+ self._radial_input = value * 10.0 # Scale to reasonable pixel range
+
+ def set_radial_angle(self, angle: float) -> None:
+ """Set radial angle directly (for OSC integration).
+
+ Args:
+ angle: Angle in radians (0 to 2π)
+ """
+ self._theta_float = angle
+
+ def reset(self) -> None:
+ """Reset camera position and state."""
+ self.x = 0
+ self.y = 0
+ self._time = 0.0
+ self.zoom = 1.0
+ # Reset bounce direction state
+ if hasattr(self, "_bounce_dx"):
+ self._bounce_dx = 1
+ self._bounce_dy = 1
+ # Reset radial state
+ if hasattr(self, "_r_float"):
+ self._r_float = 0.0
+ self._theta_float = 0.0
+
+ def set_canvas_size(self, width: int, height: int) -> None:
+ """Set the canvas size and clamp position if needed.
+
+ Args:
+ width: New canvas width
+ height: New canvas height
+ """
+ self.canvas_width = width
+ self.canvas_height = height
+ self._clamp_to_bounds()
+
+ def apply(
+ self, buffer: list[str], viewport_width: int, viewport_height: int | None = None
+ ) -> list[str]:
+ """Apply camera viewport to a text buffer.
+
+ Slices the buffer based on camera position (x, y) and viewport dimensions.
+ Handles ANSI escape codes correctly for colored/styled text.
+
+ Args:
+ buffer: List of strings representing lines of text
+ viewport_width: Width of the visible viewport in characters
+ viewport_height: Height of the visible viewport (overrides camera's viewport_height if provided)
+
+ Returns:
+ Sliced buffer containing only the visible lines and columns
+ """
+ from sideline.effects.legacy import vis_offset, vis_trunc
+
+ if not buffer:
+ return buffer
+
+ # Get current viewport bounds (clamped to canvas size)
+ viewport = self.get_viewport(viewport_height)
+
+ # Use provided viewport_height if given, otherwise use camera's viewport
+ vh = viewport_height if viewport_height is not None else viewport.height
+
+ # Vertical slice: extract lines that fit in viewport height
+ start_y = viewport.y
+ end_y = min(viewport.y + vh, len(buffer))
+
+ if start_y >= len(buffer):
+ # Scrolled past end of buffer, return empty viewport
+ return [""] * vh
+
+ vertical_slice = buffer[start_y:end_y]
+
+ # Horizontal slice: apply horizontal offset and truncate to width
+ horizontal_slice = []
+ for line in vertical_slice:
+ # Apply horizontal offset (skip first x characters, handling ANSI)
+ offset_line = vis_offset(line, viewport.x)
+ # Truncate to viewport width (handling ANSI)
+ truncated_line = vis_trunc(offset_line, viewport_width)
+
+ # Pad line to full viewport width to prevent ghosting when panning
+ # Skip padding for empty lines to preserve intentional blank lines
+ import re
+
+ visible_len = len(re.sub(r"\x1b\[[0-9;]*m", "", truncated_line))
+ if visible_len < viewport_width and visible_len > 0:
+ truncated_line += " " * (viewport_width - visible_len)
+
+ horizontal_slice.append(truncated_line)
+
+ # Pad with empty lines if needed to fill viewport height
+ while len(horizontal_slice) < vh:
+ horizontal_slice.append("")
+
+ return horizontal_slice
+
+ @classmethod
+ def feed(cls, speed: float = 1.0) -> "Camera":
+ """Create a feed camera (rapid single-item scrolling, 1 row/frame at speed=1.0)."""
+ return cls(mode=CameraMode.FEED, speed=speed, canvas_height=200)
+
+ @classmethod
+ def scroll(cls, speed: float = 0.5) -> "Camera":
+ """Create a smooth scrolling camera (movie credits style).
+
+ Uses float accumulation for sub-integer speeds.
+ Sets canvas_width=0 so it matches viewport_width for proper text wrapping.
+ """
+ return cls(
+ mode=CameraMode.SCROLL, speed=speed, canvas_width=0, canvas_height=200
+ )
+
+ @classmethod
+ def vertical(cls, speed: float = 1.0) -> "Camera":
+ """Deprecated: Use feed() or scroll() instead."""
+ return cls(mode=CameraMode.FEED, speed=speed, canvas_height=200)
+
+ @classmethod
+ def horizontal(cls, speed: float = 1.0) -> "Camera":
+ """Create a horizontal scrolling camera."""
+ return cls(mode=CameraMode.HORIZONTAL, speed=speed, canvas_width=200)
+
+ @classmethod
+ def omni(cls, speed: float = 1.0) -> "Camera":
+ """Create an omnidirectional scrolling camera."""
+ return cls(
+ mode=CameraMode.OMNI, speed=speed, canvas_width=200, canvas_height=200
+ )
+
+ @classmethod
+ def floating(cls, speed: float = 1.0) -> "Camera":
+ """Create a floating/bobbing camera."""
+ return cls(
+ mode=CameraMode.FLOATING, speed=speed, canvas_width=200, canvas_height=200
+ )
+
+ @classmethod
+ def bounce(cls, speed: float = 1.0) -> "Camera":
+ """Create a bouncing DVD-style camera that bounces off canvas edges."""
+ return cls(
+ mode=CameraMode.BOUNCE, speed=speed, canvas_width=200, canvas_height=200
+ )
+
+ @classmethod
+ def radial(cls, speed: float = 1.0) -> "Camera":
+ """Create a radial camera (polar coordinate scanning).
+
+ The camera rotates around the center of the canvas with smooth angular motion.
+ Enables radar sweep, pendulum view, and spiral scanning animations.
+
+ Args:
+ speed: Rotation speed (higher = faster rotation)
+
+ Returns:
+ Camera configured for radial polar coordinate scanning
+ """
+ cam = cls(
+ mode=CameraMode.RADIAL, speed=speed, canvas_width=200, canvas_height=200
+ )
+ # Initialize radial state
+ cam._r_float = 0.0
+ cam._theta_float = 0.0
+ return cam
+
+ @classmethod
+ def custom(cls, update_fn: Callable[["Camera", float], None]) -> "Camera":
+ """Create a camera with custom update function."""
+ return cls(custom_update=update_fn)
diff --git a/sideline/canvas.py b/sideline/canvas.py
new file mode 100644
index 0000000..9341223
--- /dev/null
+++ b/sideline/canvas.py
@@ -0,0 +1,186 @@
+"""
+Canvas - 2D surface for rendering.
+
+The Canvas represents a full rendered surface that can be larger than the display.
+The Camera then defines the visible viewport into this canvas.
+"""
+
+from dataclasses import dataclass
+
+
+@dataclass
+class CanvasRegion:
+ """A rectangular region on the canvas."""
+
+ x: int
+ y: int
+ width: int
+ height: int
+
+ def is_valid(self) -> bool:
+ """Check if region has positive dimensions."""
+ return self.width > 0 and self.height > 0
+
+ def rows(self) -> set[int]:
+ """Return set of row indices in this region."""
+ return set(range(self.y, self.y + self.height))
+
+
+class Canvas:
+ """2D canvas for rendering content.
+
+ The canvas is a 2D grid of cells that can hold text content.
+ It can be larger than the visible viewport (display).
+
+ Attributes:
+ width: Total width in characters
+ height: Total height in characters
+ """
+
+ def __init__(self, width: int = 80, height: int = 24):
+ self.width = width
+ self.height = height
+ self._grid: list[list[str]] = [
+ [" " for _ in range(width)] for _ in range(height)
+ ]
+ self._dirty_regions: list[CanvasRegion] = [] # Track dirty regions
+
+ def clear(self) -> None:
+ """Clear the entire canvas."""
+ self._grid = [[" " for _ in range(self.width)] for _ in range(self.height)]
+ self._dirty_regions = [CanvasRegion(0, 0, self.width, self.height)]
+
+ def mark_dirty(self, x: int, y: int, width: int, height: int) -> None:
+ """Mark a region as dirty (caller declares what they changed)."""
+ self._dirty_regions.append(CanvasRegion(x, y, width, height))
+
+ def get_dirty_regions(self) -> list[CanvasRegion]:
+ """Get all dirty regions and clear the set."""
+ regions = self._dirty_regions
+ self._dirty_regions = []
+ return regions
+
+ def get_dirty_rows(self) -> set[int]:
+ """Get union of all dirty rows."""
+ rows: set[int] = set()
+ for region in self._dirty_regions:
+ rows.update(region.rows())
+ return rows
+
+ def is_dirty(self) -> bool:
+ """Check if any region is dirty."""
+ return len(self._dirty_regions) > 0
+
+ def get_region(self, x: int, y: int, width: int, height: int) -> list[list[str]]:
+ """Get a rectangular region from the canvas.
+
+ Args:
+ x: Left position
+ y: Top position
+ width: Region width
+ height: Region height
+
+ Returns:
+ 2D list of characters (height rows, width columns)
+ """
+ region: list[list[str]] = []
+ for py in range(y, y + height):
+ row: list[str] = []
+ for px in range(x, x + width):
+ if 0 <= py < self.height and 0 <= px < self.width:
+ row.append(self._grid[py][px])
+ else:
+ row.append(" ")
+ region.append(row)
+ return region
+
+ def get_region_flat(self, x: int, y: int, width: int, height: int) -> list[str]:
+ """Get a rectangular region as flat list of lines.
+
+ Args:
+ x: Left position
+ y: Top position
+ width: Region width
+ height: Region height
+
+ Returns:
+ List of strings (one per row)
+ """
+ region = self.get_region(x, y, width, height)
+ return ["".join(row) for row in region]
+
+ def put_region(self, x: int, y: int, content: list[list[str]]) -> None:
+ """Put content into a rectangular region on the canvas.
+
+ Args:
+ x: Left position
+ y: Top position
+ content: 2D list of characters to place
+ """
+ height = len(content) if content else 0
+ width = len(content[0]) if height > 0 else 0
+
+ for py, row in enumerate(content):
+ for px, char in enumerate(row):
+ canvas_x = x + px
+ canvas_y = y + py
+ if 0 <= canvas_y < self.height and 0 <= canvas_x < self.width:
+ self._grid[canvas_y][canvas_x] = char
+
+ if width > 0 and height > 0:
+ self.mark_dirty(x, y, width, height)
+
+ def put_text(self, x: int, y: int, text: str) -> None:
+ """Put a single line of text at position.
+
+ Args:
+ x: Left position
+ y: Row position
+ text: Text to place
+ """
+ text_len = len(text)
+ for i, char in enumerate(text):
+ canvas_x = x + i
+ if 0 <= canvas_x < self.width and 0 <= y < self.height:
+ self._grid[y][canvas_x] = char
+
+ if text_len > 0:
+ self.mark_dirty(x, y, text_len, 1)
+
+ def fill(self, x: int, y: int, width: int, height: int, char: str = " ") -> None:
+ """Fill a rectangular region with a character.
+
+ Args:
+ x: Left position
+ y: Top position
+ width: Region width
+ height: Region height
+ char: Character to fill with
+ """
+ for py in range(y, y + height):
+ for px in range(x, x + width):
+ if 0 <= py < self.height and 0 <= px < self.width:
+ self._grid[py][px] = char
+
+ if width > 0 and height > 0:
+ self.mark_dirty(x, y, width, height)
+
+ def resize(self, width: int, height: int) -> None:
+ """Resize the canvas.
+
+ Args:
+ width: New width
+ height: New height
+ """
+ if width == self.width and height == self.height:
+ return
+
+ new_grid: list[list[str]] = [[" " for _ in range(width)] for _ in range(height)]
+
+ for py in range(min(self.height, height)):
+ for px in range(min(self.width, width)):
+ new_grid[py][px] = self._grid[py][px]
+
+ self.width = width
+ self.height = height
+ self._grid = new_grid
diff --git a/sideline/data_sources/__init__.py b/sideline/data_sources/__init__.py
new file mode 100644
index 0000000..c9eb81d
--- /dev/null
+++ b/sideline/data_sources/__init__.py
@@ -0,0 +1,32 @@
+"""
+Data source types for Sideline.
+
+This module defines the data structures used by data sources.
+"""
+
+from dataclasses import dataclass
+from typing import Any, Optional
+
+
+@dataclass
+class SourceItem:
+ """A single item from a data source."""
+
+ content: str
+ source: str
+ timestamp: str
+ metadata: Optional[dict[str, Any]] = None
+
+
+@dataclass
+class ImageItem:
+ """An image item from a data source - wraps a PIL Image."""
+
+ image: Any # PIL Image
+ source: str
+ timestamp: str
+ path: Optional[str] = None # File path or URL if applicable
+ metadata: Optional[dict[str, Any]] = None
+
+
+__all__ = ["SourceItem", "ImageItem"]
diff --git a/sideline/display/__init__.py b/sideline/display/__init__.py
new file mode 100644
index 0000000..25794d3
--- /dev/null
+++ b/sideline/display/__init__.py
@@ -0,0 +1,296 @@
+"""
+Display backend system with registry pattern.
+
+Allows swapping output backends via the Display protocol.
+Supports auto-discovery of display backends.
+"""
+
+from enum import Enum, auto
+from typing import Protocol
+
+# Optional backend - requires moderngl package
+try:
+ from sideline.display.backends.moderngl import ModernGLDisplay
+
+ _MODERNGL_AVAILABLE = True
+except ImportError:
+ ModernGLDisplay = None
+ _MODERNGL_AVAILABLE = False
+
+from sideline.display.backends.multi import MultiDisplay
+from sideline.display.backends.null import NullDisplay
+from sideline.display.backends.pygame import PygameDisplay
+from sideline.display.backends.replay import ReplayDisplay
+from sideline.display.backends.terminal import TerminalDisplay
+from sideline.display.backends.websocket import WebSocketDisplay
+
+
+class BorderMode(Enum):
+ """Border rendering modes for displays."""
+
+ OFF = auto() # No border
+ SIMPLE = auto() # Traditional border with FPS/frame time
+ UI = auto() # Right-side UI panel with interactive controls
+
+
+class Display(Protocol):
+ """Protocol for display backends.
+
+ Required attributes:
+ - width: int
+ - height: int
+
+ Required methods (duck typing - actual signatures may vary):
+ - init(width, height, reuse=False)
+ - show(buffer, border=False)
+ - clear()
+ - cleanup()
+ - get_dimensions() -> (width, height)
+
+ Optional attributes (for UI mode):
+ - ui_panel: UIPanel instance (set by app when border=UI)
+
+ Optional methods:
+ - is_quit_requested() -> bool
+ - clear_quit_request() -> None
+ """
+
+ width: int
+ height: int
+
+
+class DisplayRegistry:
+ """Registry for display backends with auto-discovery."""
+
+ _backends: dict[str, type[Display]] = {}
+ _initialized = False
+
+ @classmethod
+ def register(cls, name: str, backend_class: type[Display]) -> None:
+ cls._backends[name.lower()] = backend_class
+
+ @classmethod
+ def get(cls, name: str) -> type[Display] | None:
+ return cls._backends.get(name.lower())
+
+ @classmethod
+ def list_backends(cls) -> list[str]:
+ return list(cls._backends.keys())
+
+ @classmethod
+ def create(cls, name: str, **kwargs) -> Display | None:
+ cls.initialize()
+ backend_class = cls.get(name)
+ if backend_class:
+ return backend_class(**kwargs)
+ return None
+
+ @classmethod
+ def initialize(cls) -> None:
+ if cls._initialized:
+ return
+ cls.register("terminal", TerminalDisplay)
+ cls.register("null", NullDisplay)
+ cls.register("replay", ReplayDisplay)
+ cls.register("websocket", WebSocketDisplay)
+ cls.register("pygame", PygameDisplay)
+ if _MODERNGL_AVAILABLE:
+ cls.register("moderngl", ModernGLDisplay) # type: ignore[arg-type]
+ cls._initialized = True
+
+ @classmethod
+ def create_multi(cls, names: list[str]) -> MultiDisplay | None:
+ displays = []
+ for name in names:
+ backend = cls.create(name)
+ if backend:
+ displays.append(backend)
+ else:
+ return None
+ if not displays:
+ return None
+ return MultiDisplay(displays)
+
+
+def get_monitor():
+ """Get the performance monitor."""
+ try:
+ from sideline.effects.performance import get_monitor as _get_monitor
+
+ return _get_monitor()
+ except Exception:
+ return None
+
+
+def _strip_ansi(s: str) -> str:
+ """Strip ANSI escape sequences from string for length calculation."""
+ import re
+
+ return re.sub(r"\x1b\[[0-9;]*[a-zA-Z]", "", s)
+
+
+def _render_simple_border(
+ buf: list[str], width: int, height: int, fps: float = 0.0, frame_time: float = 0.0
+) -> list[str]:
+ """Render a traditional border around the buffer."""
+ if not buf or width < 3 or height < 3:
+ return buf
+
+ inner_w = width - 2
+ inner_h = height - 2
+
+ cropped = []
+ for i in range(min(inner_h, len(buf))):
+ line = buf[i]
+ visible_len = len(_strip_ansi(line))
+ if visible_len > inner_w:
+ cropped.append(line[:inner_w])
+ else:
+ cropped.append(line + " " * (inner_w - visible_len))
+
+ while len(cropped) < inner_h:
+ cropped.append(" " * inner_w)
+
+ if fps > 0:
+ fps_str = f" FPS:{fps:.0f}"
+ if len(fps_str) < inner_w:
+ right_len = inner_w - len(fps_str)
+ top_border = "┌" + "─" * right_len + fps_str + "┐"
+ else:
+ top_border = "┌" + "─" * inner_w + "┐"
+ else:
+ top_border = "┌" + "─" * inner_w + "┐"
+
+ if frame_time > 0:
+ ft_str = f" {frame_time:.1f}ms"
+ if len(ft_str) < inner_w:
+ right_len = inner_w - len(ft_str)
+ bottom_border = "└" + "─" * right_len + ft_str + "┘"
+ else:
+ bottom_border = "└" + "─" * inner_w + "┘"
+ else:
+ bottom_border = "└" + "─" * inner_w + "┘"
+
+ result = [top_border]
+ for line in cropped:
+ if len(line) < inner_w:
+ line = line + " " * (inner_w - len(line))
+ elif len(line) > inner_w:
+ line = line[:inner_w]
+ result.append("│" + line + "│")
+ result.append(bottom_border)
+
+ return result
+
+
+def render_ui_panel(
+ buf: list[str],
+ width: int,
+ height: int,
+ ui_panel,
+ fps: float = 0.0,
+ frame_time: float = 0.0,
+) -> list[str]:
+ """Render buffer with a right-side UI panel."""
+ # Note: UIPanel is in engine/pipeline/ui.py (Mainline-specific)
+ # This function is kept in sideline for compatibility but requires Mainline import
+ try:
+ from sideline.pipeline.ui import UIPanel
+ except ImportError:
+ # If UIPanel is not available, fall back to simple border
+ return _render_simple_border(buf, width, height, fps, frame_time)
+
+ if not isinstance(ui_panel, UIPanel):
+ return _render_simple_border(buf, width, height, fps, frame_time)
+
+ panel_width = min(ui_panel.config.panel_width, width - 4)
+ main_width = width - panel_width - 1
+
+ panel_lines = ui_panel.render(panel_width, height)
+
+ main_buf = buf[: height - 2]
+ main_result = _render_simple_border(
+ main_buf, main_width + 2, height, fps, frame_time
+ )
+
+ combined = []
+ for i in range(height):
+ if i < len(main_result):
+ main_line = main_result[i]
+ if len(main_line) >= 2:
+ main_content = (
+ main_line[1:-1] if main_line[-1] in "│┌┐└┘" else main_line[1:]
+ )
+ main_content = main_content.ljust(main_width)[:main_width]
+ else:
+ main_content = " " * main_width
+ else:
+ main_content = " " * main_width
+
+ panel_idx = i
+ panel_line = (
+ panel_lines[panel_idx][:panel_width].ljust(panel_width)
+ if panel_idx < len(panel_lines)
+ else " " * panel_width
+ )
+
+ separator = "│" if 0 < i < height - 1 else "┼" if i == 0 else "┴"
+ combined.append(main_content + separator + panel_line)
+
+ return combined
+
+
+def render_border(
+ buf: list[str],
+ width: int,
+ height: int,
+ fps: float = 0.0,
+ frame_time: float = 0.0,
+ border_mode: BorderMode | bool = BorderMode.SIMPLE,
+) -> list[str]:
+ """Render a border or UI panel around the buffer.
+
+ Args:
+ buf: Input buffer
+ width: Display width
+ height: Display height
+ fps: FPS for top border
+ frame_time: Frame time for bottom border
+ border_mode: Border rendering mode
+
+ Returns:
+ Buffer with border/panel applied
+ """
+ # Normalize border_mode to BorderMode enum
+ if isinstance(border_mode, bool):
+ border_mode = BorderMode.SIMPLE if border_mode else BorderMode.OFF
+
+ if border_mode == BorderMode.UI:
+ # UI panel requires a UIPanel instance (injected separately)
+ # For now, this will be called by displays that have a ui_panel attribute
+ # This function signature doesn't include ui_panel, so we'll handle it in render_ui_panel
+ # Fall back to simple border if no panel available
+ return _render_simple_border(buf, width, height, fps, frame_time)
+ elif border_mode == BorderMode.SIMPLE:
+ return _render_simple_border(buf, width, height, fps, frame_time)
+ else:
+ return buf
+
+
+__all__ = [
+ "Display",
+ "DisplayRegistry",
+ "get_monitor",
+ "render_border",
+ "render_ui_panel",
+ "BorderMode",
+ "TerminalDisplay",
+ "NullDisplay",
+ "ReplayDisplay",
+ "WebSocketDisplay",
+ "MultiDisplay",
+ "PygameDisplay",
+]
+
+if _MODERNGL_AVAILABLE:
+ __all__.append("ModernGLDisplay")
diff --git a/sideline/display/backends/animation_report.py b/sideline/display/backends/animation_report.py
new file mode 100644
index 0000000..35dd259
--- /dev/null
+++ b/sideline/display/backends/animation_report.py
@@ -0,0 +1,656 @@
+"""
+Animation Report Display Backend
+
+Captures frames from pipeline stages and generates an interactive HTML report
+showing before/after states for each transformative stage.
+"""
+
+import time
+from dataclasses import dataclass, field
+from datetime import datetime
+from pathlib import Path
+from typing import Any
+
+from sideline.display.streaming import compute_diff
+
+
+@dataclass
+class CapturedFrame:
+ """A captured frame with metadata."""
+
+ stage: str
+ buffer: list[str]
+ timestamp: float
+ frame_number: int
+ diff_from_previous: dict[str, Any] | None = None
+
+
+@dataclass
+class StageCapture:
+ """Captures frames for a single pipeline stage."""
+
+ name: str
+ frames: list[CapturedFrame] = field(default_factory=list)
+ start_time: float = field(default_factory=time.time)
+ end_time: float = 0.0
+
+ def add_frame(
+ self,
+ buffer: list[str],
+ frame_number: int,
+ previous_buffer: list[str] | None = None,
+ ) -> None:
+ """Add a captured frame."""
+ timestamp = time.time()
+ diff = None
+ if previous_buffer is not None:
+ diff_data = compute_diff(previous_buffer, buffer)
+ diff = {
+ "changed_lines": len(diff_data.changed_lines),
+ "total_lines": len(buffer),
+ "width": diff_data.width,
+ "height": diff_data.height,
+ }
+
+ frame = CapturedFrame(
+ stage=self.name,
+ buffer=list(buffer),
+ timestamp=timestamp,
+ frame_number=frame_number,
+ diff_from_previous=diff,
+ )
+ self.frames.append(frame)
+
+ def finish(self) -> None:
+ """Mark capture as finished."""
+ self.end_time = time.time()
+
+
+class AnimationReportDisplay:
+ """
+ Display backend that captures frames for animation report generation.
+
+ Instead of rendering to terminal, this display captures the buffer at each
+ stage and stores it for later HTML report generation.
+ """
+
+ width: int = 80
+ height: int = 24
+
+ def __init__(self, output_dir: str = "./reports"):
+ """
+ Initialize the animation report display.
+
+ Args:
+ output_dir: Directory where reports will be saved
+ """
+ self.output_dir = Path(output_dir)
+ self.output_dir.mkdir(parents=True, exist_ok=True)
+
+ self._stages: dict[str, StageCapture] = {}
+ self._current_stage: str = ""
+ self._previous_buffer: list[str] | None = None
+ self._frame_number: int = 0
+ self._total_frames: int = 0
+ self._start_time: float = 0.0
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize display with dimensions."""
+ self.width = width
+ self.height = height
+ self._start_time = time.time()
+
+ def show(self, buffer: list[str], border: bool = False) -> None:
+ """
+ Capture a frame for the current stage.
+
+ Args:
+ buffer: The frame buffer to capture
+ border: Border flag (ignored)
+ """
+ if not self._current_stage:
+ # If no stage is set, use a default name
+ self._current_stage = "final"
+
+ if self._current_stage not in self._stages:
+ self._stages[self._current_stage] = StageCapture(self._current_stage)
+
+ stage = self._stages[self._current_stage]
+ stage.add_frame(buffer, self._frame_number, self._previous_buffer)
+
+ self._previous_buffer = list(buffer)
+ self._frame_number += 1
+ self._total_frames += 1
+
+ def start_stage(self, stage_name: str) -> None:
+ """
+ Start capturing frames for a new stage.
+
+ Args:
+ stage_name: Name of the stage (e.g., "noise", "fade", "firehose")
+ """
+ if self._current_stage and self._current_stage in self._stages:
+ # Finish previous stage
+ self._stages[self._current_stage].finish()
+
+ self._current_stage = stage_name
+ self._previous_buffer = None # Reset for new stage
+
+ def clear(self) -> None:
+ """Clear the display (no-op for report display)."""
+ pass
+
+ def cleanup(self) -> None:
+ """Cleanup resources."""
+ # Finish current stage
+ if self._current_stage and self._current_stage in self._stages:
+ self._stages[self._current_stage].finish()
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get current dimensions."""
+ return (self.width, self.height)
+
+ def get_stages(self) -> dict[str, StageCapture]:
+ """Get all captured stages."""
+ return self._stages
+
+ def generate_report(self, title: str = "Animation Report") -> Path:
+ """
+ Generate an HTML report with captured frames and animations.
+
+ Args:
+ title: Title of the report
+
+ Returns:
+ Path to the generated HTML file
+ """
+ report_path = self.output_dir / f"animation_report_{int(time.time())}.html"
+ html_content = self._build_html(title)
+ report_path.write_text(html_content)
+ return report_path
+
+ def _build_html(self, title: str) -> str:
+ """Build the HTML content for the report."""
+ # Collect all frames across stages
+ all_frames = []
+ for stage_name, stage in self._stages.items():
+ for frame in stage.frames:
+ all_frames.append(frame)
+
+ # Sort frames by timestamp
+ all_frames.sort(key=lambda f: f.timestamp)
+
+ # Build stage sections
+ stages_html = ""
+ for stage_name, stage in self._stages.items():
+ stages_html += self._build_stage_section(stage_name, stage)
+
+ # Build full HTML
+ html = f"""
+
+
+
+
+
+ {title}
+
+
+
+
+
+
+
+
+ {stages_html}
+
+
+
+
+
+
+
+"""
+ return html
+
+ def _build_stage_section(self, stage_name: str, stage: StageCapture) -> str:
+ """Build HTML for a single stage section."""
+ frames_html = ""
+ for i, frame in enumerate(stage.frames):
+ diff_info = ""
+ if frame.diff_from_previous:
+ changed = frame.diff_from_previous.get("changed_lines", 0)
+ total = frame.diff_from_previous.get("total_lines", 0)
+ diff_info = f'Δ {changed}/{total}'
+
+ frames_html += f"""
+
+
+
{self._escape_html("".join(frame.buffer))}
+
+ """
+
+ return f"""
+
+ """
+
+ def _build_timeline(self, all_frames: list[CapturedFrame]) -> str:
+ """Build timeline HTML."""
+ if not all_frames:
+ return ""
+
+ markers_html = ""
+ for i, frame in enumerate(all_frames):
+ left_percent = (i / len(all_frames)) * 100
+ markers_html += f''
+
+ return markers_html
+
+ def _build_stage_colors(self) -> str:
+ """Build stage color mapping for JavaScript."""
+ colors = [
+ "#00d4ff",
+ "#00ff88",
+ "#ff6b6b",
+ "#ffd93d",
+ "#a855f7",
+ "#ec4899",
+ "#14b8a6",
+ "#f97316",
+ "#8b5cf6",
+ "#06b6d4",
+ ]
+ color_map = ""
+ for i, stage_name in enumerate(self._stages.keys()):
+ color = colors[i % len(colors)]
+ color_map += f' "{stage_name}": "{color}",\n'
+ return color_map.rstrip(",\n")
+
+ def _build_timeline_markers(self, all_frames: list[CapturedFrame]) -> str:
+ """Build timeline markers in JavaScript."""
+ if not all_frames:
+ return ""
+
+ markers_js = ""
+ for i, frame in enumerate(all_frames):
+ left_percent = (i / len(all_frames)) * 100
+ stage_color = f"stageColors['{frame.stage}']"
+ markers_js += f"""
+ const marker{i} = document.createElement('div');
+ marker{i}.className = 'timeline-marker stage-{{frame.stage}}';
+ marker{i}.style.left = '{left_percent}%';
+ marker{i}.style.setProperty('--stage-color', {stage_color});
+ marker{i}.onclick = () => {{
+ currentFrame = {i};
+ updateFrameDisplay();
+ }};
+ timeline.appendChild(marker{i});
+ """
+
+ return markers_js
+
+ def _escape_html(self, text: str) -> str:
+ """Escape HTML special characters."""
+ return (
+ text.replace("&", "&")
+ .replace("<", "<")
+ .replace(">", ">")
+ .replace('"', """)
+ .replace("'", "'")
+ )
diff --git a/sideline/display/backends/multi.py b/sideline/display/backends/multi.py
new file mode 100644
index 0000000..fd13be5
--- /dev/null
+++ b/sideline/display/backends/multi.py
@@ -0,0 +1,50 @@
+"""
+Multi display backend - forwards to multiple displays.
+"""
+
+
+class MultiDisplay:
+ """Display that forwards to multiple displays.
+
+ Supports reuse - passes reuse flag to all child displays.
+ """
+
+ width: int = 80
+ height: int = 24
+
+ def __init__(self, displays: list):
+ self.displays = displays
+ self.width = 80
+ self.height = 24
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize all child displays with dimensions.
+
+ Args:
+ width: Terminal width in characters
+ height: Terminal height in rows
+ reuse: If True, use reuse mode for child displays
+ """
+ self.width = width
+ self.height = height
+ for d in self.displays:
+ d.init(width, height, reuse=reuse)
+
+ def show(self, buffer: list[str], border: bool = False) -> None:
+ for d in self.displays:
+ d.show(buffer, border=border)
+
+ def clear(self) -> None:
+ for d in self.displays:
+ d.clear()
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get dimensions from the first child display that supports it."""
+ for d in self.displays:
+ if hasattr(d, "get_dimensions"):
+ return d.get_dimensions()
+ return (self.width, self.height)
+
+ def cleanup(self) -> None:
+ for d in self.displays:
+ d.cleanup()
diff --git a/sideline/display/backends/null.py b/sideline/display/backends/null.py
new file mode 100644
index 0000000..9798899
--- /dev/null
+++ b/sideline/display/backends/null.py
@@ -0,0 +1,183 @@
+"""
+Null/headless display backend.
+"""
+
+import json
+import time
+from pathlib import Path
+from typing import Any
+
+
+class NullDisplay:
+ """Headless/null display - discards all output.
+
+ This display does nothing - useful for headless benchmarking
+ or when no display output is needed. Captures last buffer
+ for testing purposes. Supports frame recording for replay
+ and file export/import.
+ """
+
+ width: int = 80
+ height: int = 24
+ _last_buffer: list[str] | None = None
+
+ def __init__(self):
+ self._last_buffer = None
+ self._is_recording = False
+ self._recorded_frames: list[dict[str, Any]] = []
+ self._frame_count = 0
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize display with dimensions.
+
+ Args:
+ width: Terminal width in characters
+ height: Terminal height in rows
+ reuse: Ignored for NullDisplay (no resources to reuse)
+ """
+ self.width = width
+ self.height = height
+ self._last_buffer = None
+
+ def show(self, buffer: list[str], border: bool = False) -> None:
+ import sys
+
+ from sideline.display import get_monitor, render_border
+
+ fps = 0.0
+ frame_time = 0.0
+ monitor = get_monitor()
+ if monitor:
+ stats = monitor.get_stats()
+ avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0
+ frame_count = stats.get("frame_count", 0) if stats else 0
+ if avg_ms and frame_count > 0:
+ fps = 1000.0 / avg_ms
+ frame_time = avg_ms
+
+ if border:
+ buffer = render_border(buffer, self.width, self.height, fps, frame_time)
+
+ self._last_buffer = buffer
+
+ if self._is_recording:
+ self._recorded_frames.append(
+ {
+ "frame_number": self._frame_count,
+ "buffer": buffer,
+ "width": self.width,
+ "height": self.height,
+ }
+ )
+
+ if self._frame_count <= 5 or self._frame_count % 10 == 0:
+ sys.stdout.write("\n" + "=" * 80 + "\n")
+ sys.stdout.write(
+ f"Frame {self._frame_count} (buffer height: {len(buffer)})\n"
+ )
+ sys.stdout.write("=" * 80 + "\n")
+ for i, line in enumerate(buffer[:30]):
+ sys.stdout.write(f"{i:2}: {line}\n")
+ if len(buffer) > 30:
+ sys.stdout.write(f"... ({len(buffer) - 30} more lines)\n")
+ sys.stdout.flush()
+
+ if monitor:
+ t0 = time.perf_counter()
+ chars_in = sum(len(line) for line in buffer)
+ elapsed_ms = (time.perf_counter() - t0) * 1000
+ monitor.record_effect("null_display", elapsed_ms, chars_in, chars_in)
+
+ self._frame_count += 1
+
+ def start_recording(self) -> None:
+ """Begin recording frames."""
+ self._is_recording = True
+ self._recorded_frames = []
+
+ def stop_recording(self) -> None:
+ """Stop recording frames."""
+ self._is_recording = False
+
+ def get_frames(self) -> list[list[str]]:
+ """Get recorded frames as list of buffers.
+
+ Returns:
+ List of buffers, each buffer is a list of strings (lines)
+ """
+ return [frame["buffer"] for frame in self._recorded_frames]
+
+ def get_recorded_data(self) -> list[dict[str, Any]]:
+ """Get full recorded data including metadata.
+
+ Returns:
+ List of frame dicts with 'frame_number', 'buffer', 'width', 'height'
+ """
+ return self._recorded_frames
+
+ def clear_recording(self) -> None:
+ """Clear recorded frames."""
+ self._recorded_frames = []
+
+ def save_recording(self, filepath: str | Path) -> None:
+ """Save recorded frames to a JSON file.
+
+ Args:
+ filepath: Path to save the recording
+ """
+ path = Path(filepath)
+ data = {
+ "version": 1,
+ "display": "null",
+ "width": self.width,
+ "height": self.height,
+ "frame_count": len(self._recorded_frames),
+ "frames": self._recorded_frames,
+ }
+ path.write_text(json.dumps(data, indent=2))
+
+ def load_recording(self, filepath: str | Path) -> list[dict[str, Any]]:
+ """Load recorded frames from a JSON file.
+
+ Args:
+ filepath: Path to load the recording from
+
+ Returns:
+ List of frame dicts
+ """
+ path = Path(filepath)
+ data = json.loads(path.read_text())
+ self._recorded_frames = data.get("frames", [])
+ self.width = data.get("width", 80)
+ self.height = data.get("height", 24)
+ return self._recorded_frames
+
+ def replay_frames(self) -> list[list[str]]:
+ """Get frames for replay.
+
+ Returns:
+ List of buffers for replay
+ """
+ return self.get_frames()
+
+ def clear(self) -> None:
+ pass
+
+ def cleanup(self) -> None:
+ pass
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get current dimensions.
+
+ Returns:
+ (width, height) in character cells
+ """
+ return (self.width, self.height)
+
+ def is_quit_requested(self) -> bool:
+ """Check if quit was requested (optional protocol method)."""
+ return False
+
+ def clear_quit_request(self) -> None:
+ """Clear quit request (optional protocol method)."""
+ pass
diff --git a/sideline/display/backends/pygame.py b/sideline/display/backends/pygame.py
new file mode 100644
index 0000000..da4a9b8
--- /dev/null
+++ b/sideline/display/backends/pygame.py
@@ -0,0 +1,369 @@
+"""
+Pygame display backend - renders to a native application window.
+"""
+
+import time
+
+from sideline.display.renderer import parse_ansi
+
+
+class PygameDisplay:
+ """Pygame display backend - renders to native window.
+
+ Supports reuse mode - when reuse=True, skips SDL initialization
+ and reuses the existing pygame window from a previous instance.
+ """
+
+ width: int = 80
+ window_width: int = 800
+ window_height: int = 600
+
+ def __init__(
+ self,
+ cell_width: int = 10,
+ cell_height: int = 18,
+ window_width: int = 800,
+ window_height: int = 600,
+ target_fps: float = 30.0,
+ ):
+ self.width = 80
+ self.height = 24
+ self.cell_width = cell_width
+ self.cell_height = cell_height
+ self.window_width = window_width
+ self.window_height = window_height
+ self.target_fps = target_fps
+ self._initialized = False
+ self._pygame = None
+ self._screen = None
+ self._font = None
+ self._resized = False
+ self._quit_requested = False
+ self._last_frame_time = 0.0
+ self._frame_period = 1.0 / target_fps if target_fps > 0 else 0
+ self._glyph_cache = {}
+
+ def _get_font_path(self) -> str | None:
+ """Get font path for rendering."""
+ import os
+ import sys
+ from pathlib import Path
+
+ env_font = os.environ.get("MAINLINE_PYGAME_FONT")
+ if env_font and os.path.exists(env_font):
+ return env_font
+
+ def search_dir(base_path: str) -> str | None:
+ if not os.path.exists(base_path):
+ return None
+ if os.path.isfile(base_path):
+ return base_path
+ for font_file in Path(base_path).rglob("*"):
+ if font_file.suffix.lower() in (".ttf", ".otf", ".ttc"):
+ name = font_file.stem.lower()
+ if "geist" in name and ("nerd" in name or "mono" in name):
+ return str(font_file)
+ return None
+
+ search_dirs = []
+ if sys.platform == "darwin":
+ search_dirs.append(os.path.expanduser("~/Library/Fonts/"))
+ elif sys.platform == "win32":
+ search_dirs.append(
+ os.path.expanduser("~\\AppData\\Local\\Microsoft\\Windows\\Fonts\\")
+ )
+ else:
+ search_dirs.extend(
+ [
+ os.path.expanduser("~/.local/share/fonts/"),
+ os.path.expanduser("~/.fonts/"),
+ "/usr/share/fonts/",
+ ]
+ )
+
+ for search_dir_path in search_dirs:
+ found = search_dir(search_dir_path)
+ if found:
+ return found
+
+ return None
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize display with dimensions.
+
+ Args:
+ width: Terminal width in characters
+ height: Terminal height in rows
+ reuse: If True, attach to existing pygame window instead of creating new
+ """
+ self.width = width
+ self.height = height
+
+ try:
+ import pygame
+ except ImportError:
+ return
+
+ if reuse and PygameDisplay._pygame_initialized:
+ self._pygame = pygame
+ self._initialized = True
+ return
+
+ pygame.init()
+ pygame.display.set_caption("Mainline")
+
+ self._screen = pygame.display.set_mode(
+ (self.window_width, self.window_height),
+ pygame.RESIZABLE,
+ )
+ self._pygame = pygame
+ PygameDisplay._pygame_initialized = True
+
+ # Calculate character dimensions from actual window size
+ self.width = max(1, self.window_width // self.cell_width)
+ self.height = max(1, self.window_height // self.cell_height)
+
+ font_path = self._get_font_path()
+ if font_path:
+ try:
+ self._font = pygame.font.Font(font_path, self.cell_height - 2)
+ except Exception:
+ self._font = pygame.font.SysFont("monospace", self.cell_height - 2)
+ else:
+ self._font = pygame.font.SysFont("monospace", self.cell_height - 2)
+
+ # Check if font supports box-drawing characters; if not, try to find one
+ self._use_fallback_border = False
+ if self._font:
+ try:
+ # Test rendering some key box-drawing characters
+ test_chars = ["┌", "─", "┐", "│", "└", "┘"]
+ for ch in test_chars:
+ surf = self._font.render(ch, True, (255, 255, 255))
+ # If surface is empty (width=0 or all black), font lacks glyph
+ if surf.get_width() == 0:
+ raise ValueError("Missing glyph")
+ except Exception:
+ # Font doesn't support box-drawing, will use line drawing fallback
+ self._use_fallback_border = True
+
+ self._initialized = True
+
+ def show(self, buffer: list[str], border: bool = False) -> None:
+ if not self._initialized or not self._pygame:
+ return
+
+ t0 = time.perf_counter()
+
+ for event in self._pygame.event.get():
+ if event.type == self._pygame.QUIT:
+ self._quit_requested = True
+ elif event.type == self._pygame.KEYDOWN:
+ if event.key in (self._pygame.K_ESCAPE, self._pygame.K_c):
+ if event.key == self._pygame.K_c and not (
+ event.mod & self._pygame.KMOD_LCTRL
+ or event.mod & self._pygame.KMOD_RCTRL
+ ):
+ continue
+ self._quit_requested = True
+ elif event.type == self._pygame.VIDEORESIZE:
+ self.window_width = event.w
+ self.window_height = event.h
+ self.width = max(1, self.window_width // self.cell_width)
+ self.height = max(1, self.window_height // self.cell_height)
+ self._resized = True
+
+ # FPS limiting - skip frame if we're going too fast
+ if self._frame_period > 0:
+ now = time.perf_counter()
+ elapsed = now - self._last_frame_time
+ if elapsed < self._frame_period:
+ return # Skip this frame
+ self._last_frame_time = now
+
+ # Get metrics for border display
+ fps = 0.0
+ frame_time = 0.0
+ from sideline.display import get_monitor
+
+ monitor = get_monitor()
+ if monitor:
+ stats = monitor.get_stats()
+ avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0
+ frame_count = stats.get("frame_count", 0) if stats else 0
+ if avg_ms and frame_count > 0:
+ fps = 1000.0 / avg_ms
+ frame_time = avg_ms
+
+ self._screen.fill((0, 0, 0))
+
+ # If border requested but font lacks box-drawing glyphs, use graphical fallback
+ if border and self._use_fallback_border:
+ self._draw_fallback_border(fps, frame_time)
+ # Adjust content area to fit inside border
+ content_offset_x = self.cell_width
+ content_offset_y = self.cell_height
+ self.window_width - 2 * self.cell_width
+ self.window_height - 2 * self.cell_height
+ else:
+ # Normal rendering (with or without text border)
+ content_offset_x = 0
+ content_offset_y = 0
+
+ if border:
+ from sideline.display import render_border
+
+ buffer = render_border(buffer, self.width, self.height, fps, frame_time)
+
+ blit_list = []
+
+ for row_idx, line in enumerate(buffer[: self.height]):
+ if row_idx >= self.height:
+ break
+
+ tokens = parse_ansi(line)
+ x_pos = content_offset_x
+
+ for text, fg, bg, _bold in tokens:
+ if not text:
+ continue
+
+ # Use None as key for no background
+ bg_key = bg if bg != (0, 0, 0) else None
+ cache_key = (text, fg, bg_key)
+
+ if cache_key not in self._glyph_cache:
+ # Render and cache
+ if bg_key is not None:
+ self._glyph_cache[cache_key] = self._font.render(
+ text, True, fg, bg_key
+ )
+ else:
+ self._glyph_cache[cache_key] = self._font.render(text, True, fg)
+
+ surface = self._glyph_cache[cache_key]
+ blit_list.append(
+ (surface, (x_pos, content_offset_y + row_idx * self.cell_height))
+ )
+ x_pos += self._font.size(text)[0]
+
+ self._screen.blits(blit_list)
+
+ # Draw fallback border using graphics if needed
+ if border and self._use_fallback_border:
+ self._draw_fallback_border(fps, frame_time)
+
+ self._pygame.display.flip()
+
+ elapsed_ms = (time.perf_counter() - t0) * 1000
+
+ if monitor:
+ chars_in = sum(len(line) for line in buffer)
+ monitor.record_effect("pygame_display", elapsed_ms, chars_in, chars_in)
+
+ def _draw_fallback_border(self, fps: float, frame_time: float) -> None:
+ """Draw border using pygame graphics primitives instead of text."""
+ if not self._screen or not self._pygame:
+ return
+
+ # Colors
+ border_color = (0, 255, 0) # Green (like terminal border)
+ text_color = (255, 255, 255)
+
+ # Calculate dimensions
+ x1 = 0
+ y1 = 0
+ x2 = self.window_width - 1
+ y2 = self.window_height - 1
+
+ # Draw outer rectangle
+ self._pygame.draw.rect(
+ self._screen, border_color, (x1, y1, x2 - x1 + 1, y2 - y1 + 1), 1
+ )
+
+ # Draw top border with FPS
+ if fps > 0:
+ fps_text = f" FPS:{fps:.0f}"
+ else:
+ fps_text = ""
+ # We need to render this text with a fallback font that has basic ASCII
+ # Use system font which should have these characters
+ try:
+ font = self._font # May not have box chars but should have alphanumeric
+ text_surf = font.render(fps_text, True, text_color, (0, 0, 0))
+ text_rect = text_surf.get_rect()
+ # Position on top border, right-aligned
+ text_x = x2 - text_rect.width - 5
+ text_y = y1 + 2
+ self._screen.blit(text_surf, (text_x, text_y))
+ except Exception:
+ pass
+
+ # Draw bottom border with frame time
+ if frame_time > 0:
+ ft_text = f" {frame_time:.1f}ms"
+ try:
+ ft_surf = self._font.render(ft_text, True, text_color, (0, 0, 0))
+ ft_rect = ft_surf.get_rect()
+ ft_x = x2 - ft_rect.width - 5
+ ft_y = y2 - ft_rect.height - 2
+ self._screen.blit(ft_surf, (ft_x, ft_y))
+ except Exception:
+ pass
+
+ def clear(self) -> None:
+ if self._screen and self._pygame:
+ self._screen.fill((0, 0, 0))
+ self._pygame.display.flip()
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get current terminal dimensions based on window size.
+
+ Returns:
+ (width, height) in character cells
+ """
+ # Query actual window size and recalculate character cells
+ if self._screen and self._pygame:
+ try:
+ w, h = self._screen.get_size()
+ if w != self.window_width or h != self.window_height:
+ self.window_width = w
+ self.window_height = h
+ self.width = max(1, w // self.cell_width)
+ self.height = max(1, h // self.cell_height)
+ except Exception:
+ pass
+ return self.width, self.height
+
+ def cleanup(self, quit_pygame: bool = True) -> None:
+ """Cleanup display resources.
+
+ Args:
+ quit_pygame: If True, quit pygame entirely. Set to False when
+ reusing the display to avoid closing shared window.
+ """
+ if quit_pygame and self._pygame:
+ self._pygame.quit()
+ PygameDisplay._pygame_initialized = False
+
+ @classmethod
+ def reset_state(cls) -> None:
+ """Reset pygame state - useful for testing."""
+ cls._pygame_initialized = False
+
+ def is_quit_requested(self) -> bool:
+ """Check if user requested quit (Ctrl+C, Ctrl+Q, or Escape).
+
+ Returns True if the user pressed Ctrl+C, Ctrl+Q, or Escape.
+ The main loop should check this and raise KeyboardInterrupt.
+ """
+ return self._quit_requested
+
+ def clear_quit_request(self) -> bool:
+ """Clear the quit request flag after handling.
+
+ Returns the previous quit request state.
+ """
+ was_requested = self._quit_requested
+ self._quit_requested = False
+ return was_requested
diff --git a/sideline/display/backends/replay.py b/sideline/display/backends/replay.py
new file mode 100644
index 0000000..4076ffe
--- /dev/null
+++ b/sideline/display/backends/replay.py
@@ -0,0 +1,122 @@
+"""
+Replay display backend - plays back recorded frames.
+"""
+
+from typing import Any
+
+
+class ReplayDisplay:
+ """Replay display - plays back recorded frames.
+
+ This display reads frames from a recording (list of frame data)
+ and yields them sequentially, useful for testing and demo purposes.
+ """
+
+ width: int = 80
+ height: int = 24
+
+ def __init__(self):
+ self._frames: list[dict[str, Any]] = []
+ self._current_frame = 0
+ self._playback_index = 0
+ self._loop = False
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize display with dimensions.
+
+ Args:
+ width: Terminal width in characters
+ height: Terminal height in rows
+ reuse: Ignored for ReplayDisplay
+ """
+ self.width = width
+ self.height = height
+
+ def set_frames(self, frames: list[dict[str, Any]]) -> None:
+ """Set frames to replay.
+
+ Args:
+ frames: List of frame dicts with 'buffer', 'width', 'height'
+ """
+ self._frames = frames
+ self._current_frame = 0
+ self._playback_index = 0
+
+ def set_loop(self, loop: bool) -> None:
+ """Set loop playback mode.
+
+ Args:
+ loop: True to loop, False to stop at end
+ """
+ self._loop = loop
+
+ def show(self, buffer: list[str], border: bool = False) -> None:
+ """Display a frame (ignored in replay mode).
+
+ Args:
+ buffer: Buffer to display (ignored)
+ border: Border flag (ignored)
+ """
+ pass
+
+ def get_next_frame(self) -> list[str] | None:
+ """Get the next frame in the recording.
+
+ Returns:
+ Buffer list of strings, or None if playback is done
+ """
+ if not self._frames:
+ return None
+
+ if self._playback_index >= len(self._frames):
+ if self._loop:
+ self._playback_index = 0
+ else:
+ return None
+
+ frame = self._frames[self._playback_index]
+ self._playback_index += 1
+ return frame.get("buffer")
+
+ def reset(self) -> None:
+ """Reset playback to the beginning."""
+ self._playback_index = 0
+
+ def seek(self, index: int) -> None:
+ """Seek to a specific frame.
+
+ Args:
+ index: Frame index to seek to
+ """
+ if 0 <= index < len(self._frames):
+ self._playback_index = index
+
+ def is_finished(self) -> bool:
+ """Check if playback is finished.
+
+ Returns:
+ True if at end of frames and not looping
+ """
+ return not self._loop and self._playback_index >= len(self._frames)
+
+ def clear(self) -> None:
+ pass
+
+ def cleanup(self) -> None:
+ pass
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get current dimensions.
+
+ Returns:
+ (width, height) in character cells
+ """
+ return (self.width, self.height)
+
+ def is_quit_requested(self) -> bool:
+ """Check if quit was requested (optional protocol method)."""
+ return False
+
+ def clear_quit_request(self) -> None:
+ """Clear quit request (optional protocol method)."""
+ pass
diff --git a/sideline/display/backends/terminal.py b/sideline/display/backends/terminal.py
new file mode 100644
index 0000000..ae7de04
--- /dev/null
+++ b/sideline/display/backends/terminal.py
@@ -0,0 +1,161 @@
+"""
+ANSI terminal display backend.
+"""
+
+import os
+
+
+class TerminalDisplay:
+ """ANSI terminal display backend.
+
+ Renders buffer to stdout using ANSI escape codes.
+ Supports reuse - when reuse=True, skips re-initializing terminal state.
+ Auto-detects terminal dimensions on init.
+ """
+
+ width: int = 80
+ height: int = 24
+ _initialized: bool = False
+
+ def __init__(self, target_fps: float = 30.0):
+ self.target_fps = target_fps
+ self._frame_period = 1.0 / target_fps if target_fps > 0 else 0
+ self._last_frame_time = 0.0
+ self._cached_dimensions: tuple[int, int] | None = None
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize display with dimensions.
+
+ If width/height are not provided (0/None), auto-detects terminal size.
+ Otherwise uses provided dimensions or falls back to terminal size
+ if the provided dimensions exceed terminal capacity.
+
+ Args:
+ width: Desired terminal width (0 = auto-detect)
+ height: Desired terminal height (0 = auto-detect)
+ reuse: If True, skip terminal re-initialization
+ """
+ from sideline.terminal import CURSOR_OFF
+
+ # Auto-detect terminal size (handle case where no terminal)
+ try:
+ term_size = os.get_terminal_size()
+ term_width = term_size.columns
+ term_height = term_size.lines
+ except OSError:
+ # No terminal available (e.g., in tests)
+ term_width = width if width > 0 else 80
+ term_height = height if height > 0 else 24
+
+ # Use provided dimensions if valid, otherwise use terminal size
+ if width > 0 and height > 0:
+ self.width = min(width, term_width)
+ self.height = min(height, term_height)
+ else:
+ self.width = term_width
+ self.height = term_height
+
+ if not reuse or not self._initialized:
+ print(CURSOR_OFF, end="", flush=True)
+ self._initialized = True
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get current terminal dimensions.
+
+ Returns cached dimensions to avoid querying terminal every frame,
+ which can cause inconsistent results. Dimensions are only refreshed
+ when they actually change.
+
+ Returns:
+ (width, height) in character cells
+ """
+ try:
+ term_size = os.get_terminal_size()
+ new_dims = (term_size.columns, term_size.lines)
+ except OSError:
+ new_dims = (self.width, self.height)
+
+ # Only update cached dimensions if they actually changed
+ if self._cached_dimensions is None or self._cached_dimensions != new_dims:
+ self._cached_dimensions = new_dims
+ self.width = new_dims[0]
+ self.height = new_dims[1]
+
+ return self._cached_dimensions
+
+ def show(
+ self, buffer: list[str], border: bool = False, positioning: str = "mixed"
+ ) -> None:
+ """Display buffer with optional border and positioning mode.
+
+ Args:
+ buffer: List of lines to display
+ border: Whether to apply border
+ positioning: Positioning mode - "mixed" (default), "absolute", or "relative"
+ """
+ import sys
+
+ from sideline.display import get_monitor, render_border
+
+ # Note: Frame rate limiting is handled by the caller (e.g., FrameTimer).
+ # This display renders every frame it receives.
+
+ # Get metrics for border display
+ fps = 0.0
+ frame_time = 0.0
+ monitor = get_monitor()
+ if monitor:
+ stats = monitor.get_stats()
+ avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0
+ frame_count = stats.get("frame_count", 0) if stats else 0
+ if avg_ms and frame_count > 0:
+ fps = 1000.0 / avg_ms
+ frame_time = avg_ms
+
+ # Apply border if requested
+ from sideline.display import BorderMode
+
+ if border and border != BorderMode.OFF:
+ buffer = render_border(buffer, self.width, self.height, fps, frame_time)
+
+ # Apply positioning based on mode
+ if positioning == "absolute":
+ # All lines should have cursor positioning codes
+ # Join with newlines (cursor codes already in buffer)
+ output = "\033[H\033[J" + "\n".join(buffer)
+ elif positioning == "relative":
+ # Remove cursor positioning codes (except colors) and join with newlines
+ import re
+
+ cleaned_buffer = []
+ for line in buffer:
+ # Remove cursor positioning codes but keep color codes
+ # Pattern: \033[row;colH or \033[row;col;...H
+ cleaned = re.sub(r"\033\[[0-9;]*H", "", line)
+ cleaned_buffer.append(cleaned)
+ output = "\033[H\033[J" + "\n".join(cleaned_buffer)
+ else: # mixed (default)
+ # Current behavior: join with newlines
+ # Effects that need absolute positioning have their own cursor codes
+ output = "\033[H\033[J" + "\n".join(buffer)
+
+ sys.stdout.buffer.write(output.encode())
+ sys.stdout.flush()
+
+ def clear(self) -> None:
+ from sideline.terminal import CLR
+
+ print(CLR, end="", flush=True)
+
+ def cleanup(self) -> None:
+ from sideline.terminal import CURSOR_ON
+
+ print(CURSOR_ON, end="", flush=True)
+
+ def is_quit_requested(self) -> bool:
+ """Check if quit was requested (optional protocol method)."""
+ return False
+
+ def clear_quit_request(self) -> None:
+ """Clear quit request (optional protocol method)."""
+ pass
diff --git a/sideline/display/backends/websocket.py b/sideline/display/backends/websocket.py
new file mode 100644
index 0000000..7316815
--- /dev/null
+++ b/sideline/display/backends/websocket.py
@@ -0,0 +1,464 @@
+"""
+WebSocket display backend - broadcasts frame buffer to connected web clients.
+
+Supports streaming protocols:
+- Full frame (JSON) - default for compatibility
+- Binary streaming - efficient binary protocol
+- Diff streaming - only sends changed lines
+
+TODO: Transform to a true streaming backend with:
+- Proper WebSocket message streaming (currently sends full buffer each frame)
+- Connection pooling and backpressure handling
+- Binary protocol for efficiency (instead of JSON)
+- Client management with proper async handling
+- Mark for deprecation if replaced by a new streaming implementation
+
+Current implementation: Simple broadcast of text frames to all connected clients.
+"""
+
+import asyncio
+import base64
+import json
+import threading
+import time
+from enum import IntFlag
+
+from sideline.display.streaming import (
+ MessageType,
+ compress_frame,
+ compute_diff,
+ encode_binary_message,
+ encode_diff_message,
+)
+
+
+class StreamingMode(IntFlag):
+ """Streaming modes for WebSocket display."""
+
+ JSON = 0x01 # Full JSON frames (default, compatible)
+ BINARY = 0x02 # Binary compression
+ DIFF = 0x04 # Differential updates
+
+
+try:
+ import websockets
+except ImportError:
+ websockets = None
+
+
+def get_monitor():
+ """Get the performance monitor."""
+ try:
+ from sideline.effects.performance import get_monitor as _get_monitor
+
+ return _get_monitor()
+ except Exception:
+ return None
+
+
+class WebSocketDisplay:
+ """WebSocket display backend - broadcasts to HTML Canvas clients."""
+
+ width: int = 80
+ height: int = 24
+
+ def __init__(
+ self,
+ host: str = "0.0.0.0",
+ port: int = 8765,
+ http_port: int = 8766,
+ streaming_mode: StreamingMode = StreamingMode.JSON,
+ ):
+ self.host = host
+ self.port = port
+ self.http_port = http_port
+ self.width = 80
+ self.height = 24
+ self._clients: set = set()
+ self._server_running = False
+ self._http_running = False
+ self._server_thread: threading.Thread | None = None
+ self._http_thread: threading.Thread | None = None
+ self._available = True
+ self._max_clients = 10
+ self._client_connected_callback = None
+ self._client_disconnected_callback = None
+ self._command_callback = None
+ self._controller = None # Reference to UI panel or pipeline controller
+ self._frame_delay = 0.0
+ self._httpd = None # HTTP server instance
+
+ # Streaming configuration
+ self._streaming_mode = streaming_mode
+ self._last_buffer: list[str] = []
+ self._client_capabilities: dict = {} # Track client capabilities
+
+ try:
+ import websockets as _ws
+
+ self._available = _ws is not None
+ except ImportError:
+ self._available = False
+
+ def is_available(self) -> bool:
+ """Check if WebSocket support is available."""
+ return self._available
+
+ def init(self, width: int, height: int, reuse: bool = False) -> None:
+ """Initialize display with dimensions and start server.
+
+ Args:
+ width: Terminal width in characters
+ height: Terminal height in rows
+ reuse: If True, skip starting servers (assume already running)
+ """
+ self.width = width
+ self.height = height
+
+ if not reuse or not self._server_running:
+ self.start_server()
+ self.start_http_server()
+
+ def show(self, buffer: list[str], border: bool = False) -> None:
+ """Broadcast buffer to all connected clients using streaming protocol."""
+ t0 = time.perf_counter()
+
+ # Get metrics for border display
+ fps = 0.0
+ frame_time = 0.0
+ monitor = get_monitor()
+ if monitor:
+ stats = monitor.get_stats()
+ avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0
+ frame_count = stats.get("frame_count", 0) if stats else 0
+ if avg_ms and frame_count > 0:
+ fps = 1000.0 / avg_ms
+ frame_time = avg_ms
+
+ # Apply border if requested
+ if border:
+ from sideline.display import render_border
+
+ buffer = render_border(buffer, self.width, self.height, fps, frame_time)
+
+ if not self._clients:
+ self._last_buffer = buffer
+ return
+
+ # Send to each client based on their capabilities
+ disconnected = set()
+ for client in list(self._clients):
+ try:
+ client_id = id(client)
+ client_mode = self._client_capabilities.get(
+ client_id, StreamingMode.JSON
+ )
+
+ if client_mode & StreamingMode.DIFF:
+ self._send_diff_frame(client, buffer)
+ elif client_mode & StreamingMode.BINARY:
+ self._send_binary_frame(client, buffer)
+ else:
+ self._send_json_frame(client, buffer)
+ except Exception:
+ disconnected.add(client)
+
+ for client in disconnected:
+ self._clients.discard(client)
+ if self._client_disconnected_callback:
+ self._client_disconnected_callback(client)
+
+ self._last_buffer = buffer
+
+ elapsed_ms = (time.perf_counter() - t0) * 1000
+ if monitor:
+ chars_in = sum(len(line) for line in buffer)
+ monitor.record_effect("websocket_display", elapsed_ms, chars_in, chars_in)
+
+ def _send_json_frame(self, client, buffer: list[str]) -> None:
+ """Send frame as JSON."""
+ frame_data = {
+ "type": "frame",
+ "width": self.width,
+ "height": self.height,
+ "lines": buffer,
+ }
+ message = json.dumps(frame_data)
+ asyncio.run(client.send(message))
+
+ def _send_binary_frame(self, client, buffer: list[str]) -> None:
+ """Send frame as compressed binary."""
+ compressed = compress_frame(buffer)
+ message = encode_binary_message(
+ MessageType.FULL_FRAME, self.width, self.height, compressed
+ )
+ encoded = base64.b64encode(message).decode("utf-8")
+ asyncio.run(client.send(encoded))
+
+ def _send_diff_frame(self, client, buffer: list[str]) -> None:
+ """Send frame as diff."""
+ diff = compute_diff(self._last_buffer, buffer)
+
+ if not diff.changed_lines:
+ return
+
+ diff_payload = encode_diff_message(diff)
+ message = encode_binary_message(
+ MessageType.DIFF_FRAME, self.width, self.height, diff_payload
+ )
+ encoded = base64.b64encode(message).decode("utf-8")
+ asyncio.run(client.send(encoded))
+
+ def set_streaming_mode(self, mode: StreamingMode) -> None:
+ """Set the default streaming mode for new clients."""
+ self._streaming_mode = mode
+
+ def get_streaming_mode(self) -> StreamingMode:
+ """Get the current streaming mode."""
+ return self._streaming_mode
+
+ def clear(self) -> None:
+ """Broadcast clear command to all clients."""
+ if self._clients:
+ clear_data = {"type": "clear"}
+ message = json.dumps(clear_data)
+ for client in list(self._clients):
+ try:
+ asyncio.run(client.send(message))
+ except Exception:
+ pass
+
+ def cleanup(self) -> None:
+ """Stop the servers."""
+ self.stop_server()
+ self.stop_http_server()
+
+ async def _websocket_handler(self, websocket):
+ """Handle WebSocket connections."""
+ if len(self._clients) >= self._max_clients:
+ await websocket.close()
+ return
+
+ self._clients.add(websocket)
+ if self._client_connected_callback:
+ self._client_connected_callback(websocket)
+
+ try:
+ async for message in websocket:
+ try:
+ data = json.loads(message)
+ msg_type = data.get("type")
+
+ if msg_type == "resize":
+ self.width = data.get("width", 80)
+ self.height = data.get("height", 24)
+ elif msg_type == "command" and self._command_callback:
+ # Forward commands to the pipeline controller
+ command = data.get("command", {})
+ self._command_callback(command)
+ elif msg_type == "state_request":
+ # Send current state snapshot
+ state = self._get_state_snapshot()
+ if state:
+ response = {"type": "state", "state": state}
+ await websocket.send(json.dumps(response))
+ except json.JSONDecodeError:
+ pass
+ except Exception:
+ pass
+ finally:
+ self._clients.discard(websocket)
+ if self._client_disconnected_callback:
+ self._client_disconnected_callback(websocket)
+
+ async def _run_websocket_server(self):
+ """Run the WebSocket server."""
+ if not websockets:
+ return
+ async with websockets.serve(self._websocket_handler, self.host, self.port):
+ while self._server_running:
+ await asyncio.sleep(0.1)
+
+ async def _run_http_server(self):
+ """Run simple HTTP server for the client."""
+ import os
+ from http.server import HTTPServer, SimpleHTTPRequestHandler
+
+ # Find the project root by locating 'engine' directory in the path
+ websocket_file = os.path.abspath(__file__)
+ parts = websocket_file.split(os.sep)
+ if "engine" in parts:
+ engine_idx = parts.index("engine")
+ project_root = os.sep.join(parts[:engine_idx])
+ client_dir = os.path.join(project_root, "client")
+ else:
+ # Fallback: go up 4 levels from websocket.py
+ # websocket.py: .../engine/display/backends/websocket.py
+ # We need: .../client
+ client_dir = os.path.join(
+ os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+ ),
+ "client",
+ )
+
+ class Handler(SimpleHTTPRequestHandler):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, directory=client_dir, **kwargs)
+
+ def log_message(self, format, *args):
+ pass
+
+ httpd = HTTPServer((self.host, self.http_port), Handler)
+ # Store reference for shutdown
+ self._httpd = httpd
+ # Serve requests continuously
+ httpd.serve_forever()
+
+ def _run_async(self, coro):
+ """Run coroutine in background."""
+ try:
+ asyncio.run(coro)
+ except Exception as e:
+ print(f"WebSocket async error: {e}")
+
+ def start_server(self):
+ """Start the WebSocket server in a background thread."""
+ if not self._available:
+ return
+ if self._server_thread is not None:
+ return
+
+ self._server_running = True
+ self._server_thread = threading.Thread(
+ target=self._run_async, args=(self._run_websocket_server(),), daemon=True
+ )
+ self._server_thread.start()
+
+ def stop_server(self):
+ """Stop the WebSocket server."""
+ self._server_running = False
+ self._server_thread = None
+
+ def start_http_server(self):
+ """Start the HTTP server in a background thread."""
+ if not self._available:
+ return
+ if self._http_thread is not None:
+ return
+
+ self._http_running = True
+
+ self._http_running = True
+ self._http_thread = threading.Thread(
+ target=self._run_async, args=(self._run_http_server(),), daemon=True
+ )
+ self._http_thread.start()
+
+ def stop_http_server(self):
+ """Stop the HTTP server."""
+ self._http_running = False
+ if hasattr(self, "_httpd") and self._httpd:
+ self._httpd.shutdown()
+ self._http_thread = None
+
+ def client_count(self) -> int:
+ """Return number of connected clients."""
+ return len(self._clients)
+
+ def get_ws_port(self) -> int:
+ """Return WebSocket port."""
+ return self.port
+
+ def get_http_port(self) -> int:
+ """Return HTTP port."""
+ return self.http_port
+
+ def set_frame_delay(self, delay: float) -> None:
+ """Set delay between frames in seconds."""
+ self._frame_delay = delay
+
+ def get_frame_delay(self) -> float:
+ """Get delay between frames."""
+ return self._frame_delay
+
+ def set_client_connected_callback(self, callback) -> None:
+ """Set callback for client connections."""
+ self._client_connected_callback = callback
+
+ def set_client_disconnected_callback(self, callback) -> None:
+ """Set callback for client disconnections."""
+ self._client_disconnected_callback = callback
+
+ def set_command_callback(self, callback) -> None:
+ """Set callback for incoming command messages from clients."""
+ self._command_callback = callback
+
+ def set_controller(self, controller) -> None:
+ """Set controller (UI panel or pipeline) for state queries and command execution."""
+ self._controller = controller
+
+ def broadcast_state(self, state: dict) -> None:
+ """Broadcast state update to all connected clients.
+
+ Args:
+ state: Dictionary containing state data to send to clients
+ """
+ if not self._clients:
+ return
+
+ message = json.dumps({"type": "state", "state": state})
+
+ disconnected = set()
+ for client in list(self._clients):
+ try:
+ asyncio.run(client.send(message))
+ except Exception:
+ disconnected.add(client)
+
+ for client in disconnected:
+ self._clients.discard(client)
+ if self._client_disconnected_callback:
+ self._client_disconnected_callback(client)
+
+ def _get_state_snapshot(self) -> dict | None:
+ """Get current state snapshot from controller."""
+ if not self._controller:
+ return None
+
+ try:
+ # Expect controller to have methods we need
+ state = {}
+
+ # Get stages info if UIPanel
+ if hasattr(self._controller, "stages"):
+ state["stages"] = {
+ name: {
+ "enabled": ctrl.enabled,
+ "params": ctrl.params,
+ "selected": ctrl.selected,
+ }
+ for name, ctrl in self._controller.stages.items()
+ }
+
+ # Get current preset
+ if hasattr(self._controller, "_current_preset"):
+ state["preset"] = self._controller._current_preset
+ if hasattr(self._controller, "_presets"):
+ state["presets"] = self._controller._presets
+
+ # Get selected stage
+ if hasattr(self._controller, "selected_stage"):
+ state["selected_stage"] = self._controller.selected_stage
+
+ return state
+ except Exception:
+ return None
+
+ def get_dimensions(self) -> tuple[int, int]:
+ """Get current dimensions.
+
+ Returns:
+ (width, height) in character cells
+ """
+ return (self.width, self.height)
diff --git a/sideline/display/renderer.py b/sideline/display/renderer.py
new file mode 100644
index 0000000..81017c0
--- /dev/null
+++ b/sideline/display/renderer.py
@@ -0,0 +1,280 @@
+"""
+Shared display rendering utilities.
+
+Provides common functionality for displays that render text to images
+(Pygame, Sixel, Kitty displays).
+"""
+
+from typing import Any
+
+ANSI_COLORS = {
+ 0: (0, 0, 0),
+ 1: (205, 49, 49),
+ 2: (13, 188, 121),
+ 3: (229, 229, 16),
+ 4: (36, 114, 200),
+ 5: (188, 63, 188),
+ 6: (17, 168, 205),
+ 7: (229, 229, 229),
+ 8: (102, 102, 102),
+ 9: (241, 76, 76),
+ 10: (35, 209, 139),
+ 11: (245, 245, 67),
+ 12: (59, 142, 234),
+ 13: (214, 112, 214),
+ 14: (41, 184, 219),
+ 15: (255, 255, 255),
+}
+
+
+def parse_ansi(
+ text: str,
+) -> list[tuple[str, tuple[int, int, int], tuple[int, int, int], bool]]:
+ """Parse ANSI escape sequences into text tokens with colors.
+
+ Args:
+ text: Text containing ANSI escape sequences
+
+ Returns:
+ List of (text, fg_rgb, bg_rgb, bold) tuples
+ """
+ tokens = []
+ current_text = ""
+ fg = (204, 204, 204)
+ bg = (0, 0, 0)
+ bold = False
+ i = 0
+
+ ANSI_COLORS_4BIT = {
+ 0: (0, 0, 0),
+ 1: (205, 49, 49),
+ 2: (13, 188, 121),
+ 3: (229, 229, 16),
+ 4: (36, 114, 200),
+ 5: (188, 63, 188),
+ 6: (17, 168, 205),
+ 7: (229, 229, 229),
+ 8: (102, 102, 102),
+ 9: (241, 76, 76),
+ 10: (35, 209, 139),
+ 11: (245, 245, 67),
+ 12: (59, 142, 234),
+ 13: (214, 112, 214),
+ 14: (41, 184, 219),
+ 15: (255, 255, 255),
+ }
+
+ while i < len(text):
+ char = text[i]
+
+ if char == "\x1b" and i + 1 < len(text) and text[i + 1] == "[":
+ if current_text:
+ tokens.append((current_text, fg, bg, bold))
+ current_text = ""
+
+ i += 2
+ code = ""
+ while i < len(text):
+ c = text[i]
+ if c.isalpha():
+ break
+ code += c
+ i += 1
+
+ if code:
+ codes = code.split(";")
+ for c in codes:
+ if c == "0":
+ fg = (204, 204, 204)
+ bg = (0, 0, 0)
+ bold = False
+ elif c == "1":
+ bold = True
+ elif c == "22":
+ bold = False
+ elif c == "39":
+ fg = (204, 204, 204)
+ elif c == "49":
+ bg = (0, 0, 0)
+ elif c.isdigit():
+ color_idx = int(c)
+ if color_idx in ANSI_COLORS_4BIT:
+ fg = ANSI_COLORS_4BIT[color_idx]
+ elif 30 <= color_idx <= 37:
+ fg = ANSI_COLORS_4BIT.get(color_idx - 30, fg)
+ elif 40 <= color_idx <= 47:
+ bg = ANSI_COLORS_4BIT.get(color_idx - 40, bg)
+ elif 90 <= color_idx <= 97:
+ fg = ANSI_COLORS_4BIT.get(color_idx - 90 + 8, fg)
+ elif 100 <= color_idx <= 107:
+ bg = ANSI_COLORS_4BIT.get(color_idx - 100 + 8, bg)
+ elif c.startswith("38;5;"):
+ idx = int(c.split(";")[-1])
+ if idx < 256:
+ if idx < 16:
+ fg = ANSI_COLORS_4BIT.get(idx, fg)
+ elif idx < 232:
+ c_idx = idx - 16
+ fg = (
+ (c_idx >> 4) * 51,
+ ((c_idx >> 2) & 7) * 51,
+ (c_idx & 3) * 85,
+ )
+ else:
+ gray = (idx - 232) * 10 + 8
+ fg = (gray, gray, gray)
+ elif c.startswith("48;5;"):
+ idx = int(c.split(";")[-1])
+ if idx < 256:
+ if idx < 16:
+ bg = ANSI_COLORS_4BIT.get(idx, bg)
+ elif idx < 232:
+ c_idx = idx - 16
+ bg = (
+ (c_idx >> 4) * 51,
+ ((c_idx >> 2) & 7) * 51,
+ (c_idx & 3) * 85,
+ )
+ else:
+ gray = (idx - 232) * 10 + 8
+ bg = (gray, gray, gray)
+ i += 1
+ else:
+ current_text += char
+ i += 1
+
+ if current_text:
+ tokens.append((current_text, fg, bg, bold))
+
+ return tokens if tokens else [("", fg, bg, bold)]
+
+
+def get_default_font_path() -> str | None:
+ """Get the path to a default monospace font."""
+ import os
+ import sys
+ from pathlib import Path
+
+ def search_dir(base_path: str) -> str | None:
+ if not os.path.exists(base_path):
+ return None
+ if os.path.isfile(base_path):
+ return base_path
+ for font_file in Path(base_path).rglob("*"):
+ if font_file.suffix.lower() in (".ttf", ".otf", ".ttc"):
+ name = font_file.stem.lower()
+ if "geist" in name and ("nerd" in name or "mono" in name):
+ return str(font_file)
+ if "mono" in name or "courier" in name or "terminal" in name:
+ return str(font_file)
+ return None
+
+ search_dirs = []
+ if sys.platform == "darwin":
+ search_dirs.extend(
+ [
+ os.path.expanduser("~/Library/Fonts/"),
+ "/System/Library/Fonts/",
+ ]
+ )
+ elif sys.platform == "win32":
+ search_dirs.extend(
+ [
+ os.path.expanduser("~\\AppData\\Local\\Microsoft\\Windows\\Fonts\\"),
+ "C:\\Windows\\Fonts\\",
+ ]
+ )
+ else:
+ search_dirs.extend(
+ [
+ os.path.expanduser("~/.local/share/fonts/"),
+ os.path.expanduser("~/.fonts/"),
+ "/usr/share/fonts/",
+ ]
+ )
+
+ for search_dir_path in search_dirs:
+ found = search_dir(search_dir_path)
+ if found:
+ return found
+
+ if sys.platform != "win32":
+ try:
+ import subprocess
+
+ for pattern in ["monospace", "DejaVuSansMono", "LiberationMono"]:
+ result = subprocess.run(
+ ["fc-match", "-f", "%{file}", pattern],
+ capture_output=True,
+ text=True,
+ timeout=5,
+ )
+ if result.returncode == 0 and result.stdout.strip():
+ font_file = result.stdout.strip()
+ if os.path.exists(font_file):
+ return font_file
+ except Exception:
+ pass
+
+ return None
+
+
+def render_to_pil(
+ buffer: list[str],
+ width: int,
+ height: int,
+ cell_width: int = 10,
+ cell_height: int = 18,
+ font_path: str | None = None,
+) -> Any:
+ """Render buffer to a PIL Image.
+
+ Args:
+ buffer: List of text lines to render
+ width: Terminal width in characters
+ height: Terminal height in rows
+ cell_width: Width of each character cell in pixels
+ cell_height: Height of each character cell in pixels
+ font_path: Path to TTF/OTF font file (optional)
+
+ Returns:
+ PIL Image object
+ """
+ from PIL import Image, ImageDraw, ImageFont
+
+ img_width = width * cell_width
+ img_height = height * cell_height
+
+ img = Image.new("RGBA", (img_width, img_height), (0, 0, 0, 255))
+ draw = ImageDraw.Draw(img)
+
+ if font_path:
+ try:
+ font = ImageFont.truetype(font_path, cell_height - 2)
+ except Exception:
+ font = ImageFont.load_default()
+ else:
+ font = ImageFont.load_default()
+
+ for row_idx, line in enumerate(buffer[:height]):
+ if row_idx >= height:
+ break
+
+ tokens = parse_ansi(line)
+ x_pos = 0
+ y_pos = row_idx * cell_height
+
+ for text, fg, bg, _bold in tokens:
+ if not text:
+ continue
+
+ if bg != (0, 0, 0):
+ bbox = draw.textbbox((x_pos, y_pos), text, font=font)
+ draw.rectangle(bbox, fill=(*bg, 255))
+
+ draw.text((x_pos, y_pos), text, fill=(*fg, 255), font=font)
+
+ if font:
+ x_pos += draw.textlength(text, font=font)
+
+ return img
diff --git a/sideline/display/streaming.py b/sideline/display/streaming.py
new file mode 100644
index 0000000..54d08a6
--- /dev/null
+++ b/sideline/display/streaming.py
@@ -0,0 +1,268 @@
+"""
+Streaming protocol utilities for efficient frame transmission.
+
+Provides:
+- Frame differencing: Only send changed lines
+- Run-length encoding: Compress repeated lines
+- Binary encoding: Compact message format
+"""
+
+import json
+import zlib
+from dataclasses import dataclass
+from enum import IntEnum
+
+
+class MessageType(IntEnum):
+ """Message types for streaming protocol."""
+
+ FULL_FRAME = 1
+ DIFF_FRAME = 2
+ STATE = 3
+ CLEAR = 4
+ PING = 5
+ PONG = 6
+
+
+@dataclass
+class FrameDiff:
+ """Represents a diff between two frames."""
+
+ width: int
+ height: int
+ changed_lines: list[tuple[int, str]] # (line_index, content)
+
+
+def compute_diff(old_buffer: list[str], new_buffer: list[str]) -> FrameDiff:
+ """Compute differences between old and new buffer.
+
+ Args:
+ old_buffer: Previous frame buffer
+ new_buffer: Current frame buffer
+
+ Returns:
+ FrameDiff with only changed lines
+ """
+ height = len(new_buffer)
+ changed_lines = []
+
+ for i, line in enumerate(new_buffer):
+ if i >= len(old_buffer) or line != old_buffer[i]:
+ changed_lines.append((i, line))
+
+ return FrameDiff(
+ width=len(new_buffer[0]) if new_buffer else 0,
+ height=height,
+ changed_lines=changed_lines,
+ )
+
+
+def encode_rle(lines: list[tuple[int, str]]) -> list[tuple[int, str, int]]:
+ """Run-length encode consecutive identical lines.
+
+ Args:
+ lines: List of (index, content) tuples (must be sorted by index)
+
+ Returns:
+ List of (start_index, content, run_length) tuples
+ """
+ if not lines:
+ return []
+
+ encoded = []
+ start_idx = lines[0][0]
+ current_line = lines[0][1]
+ current_rle = 1
+
+ for idx, line in lines[1:]:
+ if line == current_line:
+ current_rle += 1
+ else:
+ encoded.append((start_idx, current_line, current_rle))
+ start_idx = idx
+ current_line = line
+ current_rle = 1
+
+ encoded.append((start_idx, current_line, current_rle))
+ return encoded
+
+
+def decode_rle(encoded: list[tuple[int, str, int]]) -> list[tuple[int, str]]:
+ """Decode run-length encoded lines.
+
+ Args:
+ encoded: List of (start_index, content, run_length) tuples
+
+ Returns:
+ List of (index, content) tuples
+ """
+ result = []
+ for start_idx, line, rle in encoded:
+ for i in range(rle):
+ result.append((start_idx + i, line))
+ return result
+
+
+def compress_frame(buffer: list[str], level: int = 6) -> bytes:
+ """Compress a frame buffer using zlib.
+
+ Args:
+ buffer: Frame buffer (list of lines)
+ level: Compression level (0-9)
+
+ Returns:
+ Compressed bytes
+ """
+ content = "\n".join(buffer)
+ return zlib.compress(content.encode("utf-8"), level)
+
+
+def decompress_frame(data: bytes, height: int) -> list[str]:
+ """Decompress a frame buffer.
+
+ Args:
+ data: Compressed bytes
+ height: Number of lines in original buffer
+
+ Returns:
+ Frame buffer (list of lines)
+ """
+ content = zlib.decompress(data).decode("utf-8")
+ lines = content.split("\n")
+ if len(lines) > height:
+ lines = lines[:height]
+ while len(lines) < height:
+ lines.append("")
+ return lines
+
+
+def encode_binary_message(
+ msg_type: MessageType, width: int, height: int, payload: bytes
+) -> bytes:
+ """Encode a binary message.
+
+ Message format:
+ - 1 byte: message type
+ - 2 bytes: width (uint16)
+ - 2 bytes: height (uint16)
+ - 4 bytes: payload length (uint32)
+ - N bytes: payload
+
+ Args:
+ msg_type: Message type
+ width: Frame width
+ height: Frame height
+ payload: Message payload
+
+ Returns:
+ Encoded binary message
+ """
+ import struct
+
+ header = struct.pack("!BHHI", msg_type.value, width, height, len(payload))
+ return header + payload
+
+
+def decode_binary_message(data: bytes) -> tuple[MessageType, int, int, bytes]:
+ """Decode a binary message.
+
+ Args:
+ data: Binary message data
+
+ Returns:
+ Tuple of (msg_type, width, height, payload)
+ """
+ import struct
+
+ msg_type_val, width, height, payload_len = struct.unpack("!BHHI", data[:9])
+ payload = data[9 : 9 + payload_len]
+ return MessageType(msg_type_val), width, height, payload
+
+
+def encode_diff_message(diff: FrameDiff, use_rle: bool = True) -> bytes:
+ """Encode a diff message for transmission.
+
+ Args:
+ diff: Frame diff
+ use_rle: Whether to use run-length encoding
+
+ Returns:
+ Encoded diff payload
+ """
+
+ if use_rle:
+ encoded_lines = encode_rle(diff.changed_lines)
+ data = [[idx, line, rle] for idx, line, rle in encoded_lines]
+ else:
+ data = [[idx, line] for idx, line in diff.changed_lines]
+
+ payload = json.dumps(data).encode("utf-8")
+ return payload
+
+
+def decode_diff_message(payload: bytes, use_rle: bool = True) -> list[tuple[int, str]]:
+ """Decode a diff message.
+
+ Args:
+ payload: Encoded diff payload
+ use_rle: Whether run-length encoding was used
+
+ Returns:
+ List of (line_index, content) tuples
+ """
+
+ data = json.loads(payload.decode("utf-8"))
+
+ if use_rle:
+ return decode_rle([(idx, line, rle) for idx, line, rle in data])
+ else:
+ return [(idx, line) for idx, line in data]
+
+
+def should_use_diff(
+ old_buffer: list[str], new_buffer: list[str], threshold: float = 0.3
+) -> bool:
+ """Determine if diff or full frame is more efficient.
+
+ Args:
+ old_buffer: Previous frame
+ new_buffer: Current frame
+ threshold: Max changed ratio to use diff (0.0-1.0)
+
+ Returns:
+ True if diff is more efficient
+ """
+ if not old_buffer or not new_buffer:
+ return False
+
+ diff = compute_diff(old_buffer, new_buffer)
+ total_lines = len(new_buffer)
+ changed_ratio = len(diff.changed_lines) / total_lines if total_lines > 0 else 1.0
+
+ return changed_ratio <= threshold
+
+
+def apply_diff(old_buffer: list[str], diff: FrameDiff) -> list[str]:
+ """Apply a diff to an old buffer to get the new buffer.
+
+ Args:
+ old_buffer: Previous frame buffer
+ diff: Frame diff to apply
+
+ Returns:
+ New frame buffer
+ """
+ new_buffer = list(old_buffer)
+
+ for line_idx, content in diff.changed_lines:
+ if line_idx < len(new_buffer):
+ new_buffer[line_idx] = content
+ else:
+ while len(new_buffer) < line_idx:
+ new_buffer.append("")
+ new_buffer.append(content)
+
+ while len(new_buffer) < diff.height:
+ new_buffer.append("")
+
+ return new_buffer[: diff.height]
diff --git a/sideline/effects/__init__.py b/sideline/effects/__init__.py
new file mode 100644
index 0000000..f33486c
--- /dev/null
+++ b/sideline/effects/__init__.py
@@ -0,0 +1,27 @@
+from sideline.effects.chain import EffectChain
+from sideline.effects.performance import PerformanceMonitor, get_monitor, set_monitor
+from sideline.effects.registry import EffectRegistry, get_registry, set_registry
+from sideline.effects.types import (
+ EffectConfig,
+ EffectContext,
+ Effect,
+ EffectPlugin, # Backward compatibility alias
+ create_effect_context,
+)
+
+# Note: Legacy effects and controller are Mainline-specific and moved to engine/effects/
+
+__all__ = [
+ "EffectChain",
+ "EffectRegistry",
+ "EffectConfig",
+ "EffectContext",
+ "Effect", # Primary class name
+ "EffectPlugin", # Backward compatibility alias
+ "create_effect_context",
+ "get_registry",
+ "set_registry",
+ "get_monitor",
+ "set_monitor",
+ "PerformanceMonitor",
+]
diff --git a/sideline/effects/chain.py b/sideline/effects/chain.py
new file mode 100644
index 0000000..1d0a34a
--- /dev/null
+++ b/sideline/effects/chain.py
@@ -0,0 +1,87 @@
+import time
+
+from sideline.effects.performance import PerformanceMonitor, get_monitor
+from sideline.effects.registry import EffectRegistry
+from sideline.effects.types import EffectContext, PartialUpdate
+
+
+class EffectChain:
+ def __init__(
+ self, registry: EffectRegistry, monitor: PerformanceMonitor | None = None
+ ):
+ self._registry = registry
+ self._order: list[str] = []
+ self._monitor = monitor
+
+ def _get_monitor(self) -> PerformanceMonitor:
+ if self._monitor is not None:
+ return self._monitor
+ return get_monitor()
+
+ def set_order(self, names: list[str]) -> None:
+ self._order = list(names)
+
+ def get_order(self) -> list[str]:
+ return self._order.copy()
+
+ def add_effect(self, name: str, position: int | None = None) -> bool:
+ if name not in self._registry.list_all():
+ return False
+ if position is None:
+ self._order.append(name)
+ else:
+ self._order.insert(position, name)
+ return True
+
+ def remove_effect(self, name: str) -> bool:
+ if name in self._order:
+ self._order.remove(name)
+ return True
+ return False
+
+ def reorder(self, new_order: list[str]) -> bool:
+ all_plugins = set(self._registry.list_all().keys())
+ if not all(name in all_plugins for name in new_order):
+ return False
+ self._order = list(new_order)
+ return True
+
+ def process(self, buf: list[str], ctx: EffectContext) -> list[str]:
+ monitor = self._get_monitor()
+ frame_number = ctx.frame_number
+ monitor.start_frame(frame_number)
+
+ # Get dirty regions from canvas via context (set by CanvasStage)
+ dirty_rows = ctx.get_state("canvas.dirty_rows")
+
+ # Create PartialUpdate for effects that support it
+ full_buffer = dirty_rows is None or len(dirty_rows) == 0
+ partial = PartialUpdate(
+ rows=None,
+ cols=None,
+ dirty=dirty_rows,
+ full_buffer=full_buffer,
+ )
+
+ frame_start = time.perf_counter()
+ result = list(buf)
+ for name in self._order:
+ plugin = self._registry.get(name)
+ if plugin and plugin.config.enabled:
+ chars_in = sum(len(line) for line in result)
+ effect_start = time.perf_counter()
+ try:
+ # Use process_partial if supported, otherwise fall back to process
+ if getattr(plugin, "supports_partial_updates", False):
+ result = plugin.process_partial(result, ctx, partial)
+ else:
+ result = plugin.process(result, ctx)
+ except Exception:
+ plugin.config.enabled = False
+ elapsed = time.perf_counter() - effect_start
+ chars_out = sum(len(line) for line in result)
+ monitor.record_effect(name, elapsed * 1000, chars_in, chars_out)
+
+ total_elapsed = time.perf_counter() - frame_start
+ monitor.end_frame(frame_number, total_elapsed * 1000)
+ return result
diff --git a/sideline/effects/performance.py b/sideline/effects/performance.py
new file mode 100644
index 0000000..7a26bb9
--- /dev/null
+++ b/sideline/effects/performance.py
@@ -0,0 +1,103 @@
+from collections import deque
+from dataclasses import dataclass
+
+
+@dataclass
+class EffectTiming:
+ name: str
+ duration_ms: float
+ buffer_chars_in: int
+ buffer_chars_out: int
+
+
+@dataclass
+class FrameTiming:
+ frame_number: int
+ total_ms: float
+ effects: list[EffectTiming]
+
+
+class PerformanceMonitor:
+ """Collects and stores performance metrics for effect pipeline."""
+
+ def __init__(self, max_frames: int = 60):
+ self._max_frames = max_frames
+ self._frames: deque[FrameTiming] = deque(maxlen=max_frames)
+ self._current_frame: list[EffectTiming] = []
+
+ def start_frame(self, frame_number: int) -> None:
+ self._current_frame = []
+
+ def record_effect(
+ self, name: str, duration_ms: float, chars_in: int, chars_out: int
+ ) -> None:
+ self._current_frame.append(
+ EffectTiming(
+ name=name,
+ duration_ms=duration_ms,
+ buffer_chars_in=chars_in,
+ buffer_chars_out=chars_out,
+ )
+ )
+
+ def end_frame(self, frame_number: int, total_ms: float) -> None:
+ self._frames.append(
+ FrameTiming(
+ frame_number=frame_number,
+ total_ms=total_ms,
+ effects=self._current_frame,
+ )
+ )
+
+ def get_stats(self) -> dict:
+ if not self._frames:
+ return {"error": "No timing data available"}
+
+ total_times = [f.total_ms for f in self._frames]
+ avg_total = sum(total_times) / len(total_times)
+ min_total = min(total_times)
+ max_total = max(total_times)
+
+ effect_stats: dict[str, dict] = {}
+ for frame in self._frames:
+ for effect in frame.effects:
+ if effect.name not in effect_stats:
+ effect_stats[effect.name] = {"times": [], "total_chars": 0}
+ effect_stats[effect.name]["times"].append(effect.duration_ms)
+ effect_stats[effect.name]["total_chars"] += effect.buffer_chars_out
+
+ for name, stats in effect_stats.items():
+ times = stats["times"]
+ stats["avg_ms"] = sum(times) / len(times)
+ stats["min_ms"] = min(times)
+ stats["max_ms"] = max(times)
+ del stats["times"]
+
+ return {
+ "frame_count": len(self._frames),
+ "pipeline": {
+ "avg_ms": avg_total,
+ "min_ms": min_total,
+ "max_ms": max_total,
+ },
+ "effects": effect_stats,
+ }
+
+ def reset(self) -> None:
+ self._frames.clear()
+ self._current_frame = []
+
+
+_monitor: PerformanceMonitor | None = None
+
+
+def get_monitor() -> PerformanceMonitor:
+ global _monitor
+ if _monitor is None:
+ _monitor = PerformanceMonitor()
+ return _monitor
+
+
+def set_monitor(monitor: PerformanceMonitor) -> None:
+ global _monitor
+ _monitor = monitor
diff --git a/sideline/effects/registry.py b/sideline/effects/registry.py
new file mode 100644
index 0000000..6b33930
--- /dev/null
+++ b/sideline/effects/registry.py
@@ -0,0 +1,59 @@
+from sideline.effects.types import EffectConfig, EffectPlugin
+
+
+class EffectRegistry:
+ def __init__(self):
+ self._plugins: dict[str, EffectPlugin] = {}
+ self._discovered: bool = False
+
+ def register(self, plugin: EffectPlugin) -> None:
+ self._plugins[plugin.name] = plugin
+
+ def get(self, name: str) -> EffectPlugin | None:
+ return self._plugins.get(name)
+
+ def list_all(self) -> dict[str, EffectPlugin]:
+ return self._plugins.copy()
+
+ def list_enabled(self) -> list[EffectPlugin]:
+ return [p for p in self._plugins.values() if p.config.enabled]
+
+ def enable(self, name: str) -> bool:
+ plugin = self._plugins.get(name)
+ if plugin:
+ plugin.config.enabled = True
+ return True
+ return False
+
+ def disable(self, name: str) -> bool:
+ plugin = self._plugins.get(name)
+ if plugin:
+ plugin.config.enabled = False
+ return True
+ return False
+
+ def configure(self, name: str, config: EffectConfig) -> bool:
+ plugin = self._plugins.get(name)
+ if plugin:
+ plugin.configure(config)
+ return True
+ return False
+
+ def is_enabled(self, name: str) -> bool:
+ plugin = self._plugins.get(name)
+ return plugin.config.enabled if plugin else False
+
+
+_registry: EffectRegistry | None = None
+
+
+def get_registry() -> EffectRegistry:
+ global _registry
+ if _registry is None:
+ _registry = EffectRegistry()
+ return _registry
+
+
+def set_registry(registry: EffectRegistry) -> None:
+ global _registry
+ _registry = registry
diff --git a/sideline/effects/types.py b/sideline/effects/types.py
new file mode 100644
index 0000000..6375f40
--- /dev/null
+++ b/sideline/effects/types.py
@@ -0,0 +1,288 @@
+"""
+Visual effects type definitions and base classes.
+
+EffectPlugin Architecture:
+- Uses ABC (Abstract Base Class) for interface enforcement
+- Runtime discovery via directory scanning (effects_plugins/)
+- Configuration via EffectConfig dataclass
+- Context passed through EffectContext dataclass
+
+Plugin System Research (see AGENTS.md for references):
+- VST: Standardized audio interfaces, chaining, presets (FXP/FXB)
+- Python Entry Points: Namespace packages, importlib.metadata discovery
+- Shadertoy: Shader-based with uniforms as context
+
+Current gaps vs industry patterns:
+- No preset save/load system
+- No external plugin distribution via entry points
+- No plugin metadata (version, author, description)
+"""
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Any
+
+
+@dataclass
+class PartialUpdate:
+ """Represents a partial buffer update for optimized rendering.
+
+ Instead of processing the full buffer every frame, effects that support
+ partial updates can process only changed regions.
+
+ Attributes:
+ rows: Row indices that changed (None = all rows)
+ cols: Column range that changed (None = full width)
+ dirty: Set of dirty row indices
+ """
+
+ rows: tuple[int, int] | None = None # (start, end) inclusive
+ cols: tuple[int, int] | None = None # (start, end) inclusive
+ dirty: set[int] | None = None # Set of dirty row indices
+ full_buffer: bool = True # If True, process entire buffer
+
+
+@dataclass
+class EffectContext:
+ """Context passed to effect plugins during processing.
+
+ Contains terminal dimensions, camera state, frame info, and real-time sensor values.
+ """
+
+ terminal_width: int
+ terminal_height: int
+ scroll_cam: int
+ ticker_height: int
+ camera_x: int = 0
+ mic_excess: float = 0.0
+ grad_offset: float = 0.0
+ frame_number: int = 0
+ has_message: bool = False
+ items: list = field(default_factory=list)
+ _state: dict[str, Any] = field(default_factory=dict, repr=False)
+
+ def compute_entropy(self, effect_name: str, data: Any) -> float:
+ """Compute entropy score for an effect based on its output.
+
+ Args:
+ effect_name: Name of the effect
+ data: Processed buffer or effect-specific data
+
+ Returns:
+ Entropy score 0.0-1.0 representing visual chaos
+ """
+ # Default implementation: use effect name as seed for deterministic randomness
+ # Better implementations can analyze actual buffer content
+ import hashlib
+
+ data_str = str(data)[:100] if data else ""
+ hash_val = hashlib.md5(f"{effect_name}:{data_str}".encode()).hexdigest()
+ # Convert hash to float 0.0-1.0
+ entropy = int(hash_val[:8], 16) / 0xFFFFFFFF
+ return min(max(entropy, 0.0), 1.0)
+
+ def get_sensor_value(self, sensor_name: str) -> float | None:
+ """Get a sensor value from context state.
+
+ Args:
+ sensor_name: Name of the sensor (e.g., "mic", "camera")
+
+ Returns:
+ Sensor value as float, or None if not available.
+ """
+ return self._state.get(f"sensor.{sensor_name}")
+
+ def set_state(self, key: str, value: Any) -> None:
+ """Set a state value in the context."""
+ self._state[key] = value
+
+ def get_state(self, key: str, default: Any = None) -> Any:
+ """Get a state value from the context."""
+ return self._state.get(key, default)
+
+ @property
+ def state(self) -> dict[str, Any]:
+ """Get the state dictionary for direct access by effects."""
+ return self._state
+
+
+@dataclass
+class EffectConfig:
+ enabled: bool = True
+ intensity: float = 1.0
+ entropy: float = 0.0 # Visual chaos metric (0.0 = calm, 1.0 = chaotic)
+ params: dict[str, Any] = field(default_factory=dict)
+
+
+class Effect(ABC):
+ """Abstract base class for visual effects.
+
+ Effects are pipeline stages that transform the rendered buffer.
+ They can apply visual transformations like noise, fade, glitch, etc.
+
+ Subclasses must define:
+ - name: str - unique identifier for the effect
+ - config: EffectConfig - current configuration
+
+ Optional class attribute:
+ - param_bindings: dict - Declarative sensor-to-param bindings
+ Example:
+ param_bindings = {
+ "intensity": {"sensor": "mic", "transform": "linear"},
+ "rate": {"sensor": "mic", "transform": "exponential"},
+ }
+
+ And implement:
+ - process(buf, ctx) -> list[str]
+ - configure(config) -> None
+
+ Effect Behavior with ticker_height=0:
+ - NoiseEffect: Returns buffer unchanged (no ticker to apply noise to)
+ - FadeEffect: Returns buffer unchanged (no ticker to fade)
+ - GlitchEffect: Processes normally (doesn't depend on ticker_height)
+ - FirehoseEffect: Returns buffer unchanged if no items in context
+
+ Effects should handle missing or zero context values gracefully by
+ returning the input buffer unchanged rather than raising errors.
+
+ The param_bindings system enables PureData-style signal routing:
+ - Sensors emit values that effects can bind to
+ - Transform functions map sensor values to param ranges
+ - Effects read bound values from context.state["sensor.{name}"]
+ """
+
+ name: str
+ config: EffectConfig
+ param_bindings: dict[str, dict[str, str | float]] = {}
+ supports_partial_updates: bool = False # Override in subclasses for optimization
+
+ @abstractmethod
+ def process(self, buf: list[str], ctx: EffectContext) -> list[str]:
+ """Process the buffer with this effect applied.
+
+ Args:
+ buf: List of lines to process
+ ctx: Effect context with terminal state
+
+ Returns:
+ Processed buffer (may be same object or new list)
+ """
+ ...
+
+ def process_partial(
+ self, buf: list[str], ctx: EffectContext, partial: PartialUpdate
+ ) -> list[str]:
+ """Process a partial buffer for optimized rendering.
+
+ Override this in subclasses that support partial updates for performance.
+ Default implementation falls back to full buffer processing.
+
+ Args:
+ buf: List of lines to process
+ ctx: Effect context with terminal state
+ partial: PartialUpdate indicating which regions changed
+
+ Returns:
+ Processed buffer (may be same object or new list)
+ """
+ # Default: fall back to full processing
+ return self.process(buf, ctx)
+
+ @abstractmethod
+ def configure(self, config: EffectConfig) -> None:
+ """Configure the effect with new settings.
+
+ Args:
+ config: New configuration to apply
+ """
+ ...
+
+
+# Backward compatibility alias
+EffectPlugin = Effect
+
+
+def create_effect_context(
+ terminal_width: int = 80,
+ terminal_height: int = 24,
+ scroll_cam: int = 0,
+ ticker_height: int = 0,
+ mic_excess: float = 0.0,
+ grad_offset: float = 0.0,
+ frame_number: int = 0,
+ has_message: bool = False,
+ items: list | None = None,
+) -> EffectContext:
+ """Factory function to create EffectContext with sensible defaults."""
+ return EffectContext(
+ terminal_width=terminal_width,
+ terminal_height=terminal_height,
+ scroll_cam=scroll_cam,
+ ticker_height=ticker_height,
+ mic_excess=mic_excess,
+ grad_offset=grad_offset,
+ frame_number=frame_number,
+ has_message=has_message,
+ items=items or [],
+ )
+
+
+@dataclass
+class PipelineConfig:
+ order: list[str] = field(default_factory=list)
+ effects: dict[str, EffectConfig] = field(default_factory=dict)
+
+
+def apply_param_bindings(
+ effect: "EffectPlugin",
+ ctx: EffectContext,
+) -> EffectConfig:
+ """Apply sensor bindings to effect config.
+
+ This resolves param_bindings declarations by reading sensor values
+ from the context and applying transform functions.
+
+ Args:
+ effect: The effect with param_bindings to apply
+ ctx: EffectContext containing sensor values
+
+ Returns:
+ Modified EffectConfig with sensor-driven values applied.
+ """
+ import copy
+
+ if not effect.param_bindings:
+ return effect.config
+
+ config = copy.deepcopy(effect.config)
+
+ for param_name, binding in effect.param_bindings.items():
+ sensor_name: str = binding.get("sensor", "")
+ transform: str = binding.get("transform", "linear")
+
+ if not sensor_name:
+ continue
+
+ sensor_value = ctx.get_sensor_value(sensor_name)
+ if sensor_value is None:
+ continue
+
+ if transform == "linear":
+ applied_value: float = sensor_value
+ elif transform == "exponential":
+ applied_value = sensor_value**2
+ elif transform == "threshold":
+ threshold = float(binding.get("threshold", 0.5))
+ applied_value = 1.0 if sensor_value > threshold else 0.0
+ elif transform == "inverse":
+ applied_value = 1.0 - sensor_value
+ else:
+ applied_value = sensor_value
+
+ config.params[f"{param_name}_sensor"] = applied_value
+
+ if param_name == "intensity":
+ base_intensity = effect.config.intensity
+ config.intensity = base_intensity * (0.5 + applied_value * 0.5)
+
+ return config
diff --git a/sideline/fonts/Corptic.otf b/sideline/fonts/Corptic.otf
new file mode 100644
index 0000000..bc440de
Binary files /dev/null and b/sideline/fonts/Corptic.otf differ
diff --git a/sideline/fonts/__init__.py b/sideline/fonts/__init__.py
new file mode 100644
index 0000000..cc9ca48
--- /dev/null
+++ b/sideline/fonts/__init__.py
@@ -0,0 +1,38 @@
+"""
+Sideline font configuration.
+
+Provides default fonts for block letter rendering.
+"""
+
+import os
+from pathlib import Path
+
+# Directory containing Sideline fonts
+FONTS_DIR = Path(__file__).parent
+
+# Default font for block letter rendering
+DEFAULT_FONT = FONTS_DIR / "Corptic.otf"
+
+# Font size for default rendering
+DEFAULT_FONT_SIZE = 32
+
+
+def get_default_font_path() -> str:
+ """Get path to default font file."""
+ if DEFAULT_FONT.exists():
+ return str(DEFAULT_FONT)
+ raise FileNotFoundError(f"Default font not found: {DEFAULT_FONT}")
+
+
+def get_default_font_size() -> int:
+ """Get default font size."""
+ return DEFAULT_FONT_SIZE
+
+
+__all__ = [
+ "get_default_font_path",
+ "get_default_font_size",
+ "DEFAULT_FONT",
+ "DEFAULT_FONT_SIZE",
+ "FONTS_DIR",
+]
diff --git a/sideline/pipeline/__init__.py b/sideline/pipeline/__init__.py
new file mode 100644
index 0000000..5ee2fb2
--- /dev/null
+++ b/sideline/pipeline/__init__.py
@@ -0,0 +1,94 @@
+"""
+Unified Pipeline Architecture.
+
+This module provides a clean, dependency-managed pipeline system:
+- Stage: Base class for all pipeline components
+- Pipeline: DAG-based execution orchestrator
+- PipelineParams: Runtime configuration for animation
+- PipelinePreset: Pre-configured pipeline configurations
+- StageRegistry: Unified registration for all stage types
+- Plugin system: Support for external stage plugins
+
+The pipeline architecture supports:
+- Sources: Data providers (headlines, poetry, pipeline viz)
+- Effects: Post-processors (noise, fade, glitch, hud)
+- Displays: Output backends (terminal, pygame, websocket)
+- Cameras: Viewport controllers (vertical, horizontal, omni)
+
+Plugin System:
+ Plugins can be registered explicitly or discovered automatically via entry points.
+ Applications can register their own stages using StageRegistry.register() or
+ StageRegistry.register_plugin().
+
+Example:
+ from sideline.pipeline import Pipeline, PipelineConfig, StageRegistry
+
+ # Register application-specific stages
+ StageRegistry.register("source", MyDataSource)
+
+ # Or discover plugins automatically
+ StageRegistry.discover_plugins()
+
+ pipeline = Pipeline(PipelineConfig(source="my_source", display="terminal"))
+ pipeline.add_stage("source", StageRegistry.create("source", "my_source"))
+ pipeline.add_stage("display", StageRegistry.create("display", "terminal"))
+ pipeline.build().initialize()
+
+ result = pipeline.execute(initial_data)
+"""
+
+from sideline.pipeline.controller import (
+ Pipeline,
+ PipelineConfig,
+ PipelineRunner,
+ create_default_pipeline,
+ create_pipeline_from_params,
+)
+from sideline.pipeline.core import (
+ PipelineContext,
+ Stage,
+ StageConfig,
+ StageError,
+ StageResult,
+)
+from sideline.pipeline.params import (
+ DEFAULT_HEADLINE_PARAMS,
+ DEFAULT_PIPELINE_PARAMS,
+ DEFAULT_PYGAME_PARAMS,
+ PipelineParams,
+)
+from sideline.pipeline.registry import (
+ StageRegistry,
+ discover_stages,
+ register_camera,
+ register_display,
+ register_effect,
+ register_source,
+)
+
+__all__ = [
+ # Core
+ "Stage",
+ "StageConfig",
+ "StageError",
+ "StageResult",
+ "PipelineContext",
+ # Controller
+ "Pipeline",
+ "PipelineConfig",
+ "PipelineRunner",
+ "create_default_pipeline",
+ "create_pipeline_from_params",
+ # Params
+ "PipelineParams",
+ "DEFAULT_HEADLINE_PARAMS",
+ "DEFAULT_PIPELINE_PARAMS",
+ "DEFAULT_PYGAME_PARAMS",
+ # Registry
+ "StageRegistry",
+ "discover_stages",
+ "register_source",
+ "register_effect",
+ "register_display",
+ "register_camera",
+]
diff --git a/sideline/pipeline/adapters.py b/sideline/pipeline/adapters.py
new file mode 100644
index 0000000..0f4063e
--- /dev/null
+++ b/sideline/pipeline/adapters.py
@@ -0,0 +1,50 @@
+"""
+Stage adapters - Bridge existing components to the Stage interface.
+
+This module provides adapters that wrap existing components
+(EffectPlugin, Display, DataSource, Camera) as Stage implementations.
+
+DEPRECATED: This file is now a compatibility wrapper.
+Use `engine.pipeline.adapters` package instead.
+"""
+
+# Re-export from the new package structure for backward compatibility
+from sideline.pipeline.adapters import (
+ # Adapter classes
+ CameraStage,
+ CanvasStage,
+ DataSourceStage,
+ DisplayStage,
+ EffectPluginStage,
+ FontStage,
+ ImageToTextStage,
+ PassthroughStage,
+ SourceItemsToBufferStage,
+ ViewportFilterStage,
+ # Factory functions
+ create_stage_from_camera,
+ create_stage_from_display,
+ create_stage_from_effect,
+ create_stage_from_font,
+ create_stage_from_source,
+)
+
+__all__ = [
+ # Adapter classes
+ "EffectPluginStage",
+ "DisplayStage",
+ "DataSourceStage",
+ "PassthroughStage",
+ "SourceItemsToBufferStage",
+ "CameraStage",
+ "ViewportFilterStage",
+ "FontStage",
+ "ImageToTextStage",
+ "CanvasStage",
+ # Factory functions
+ "create_stage_from_display",
+ "create_stage_from_effect",
+ "create_stage_from_source",
+ "create_stage_from_camera",
+ "create_stage_from_font",
+]
diff --git a/sideline/pipeline/adapters/__init__.py b/sideline/pipeline/adapters/__init__.py
new file mode 100644
index 0000000..dce025c
--- /dev/null
+++ b/sideline/pipeline/adapters/__init__.py
@@ -0,0 +1,55 @@
+"""Stage adapters - Bridge existing components to the Stage interface.
+
+This module provides adapters that wrap existing components
+(EffectPlugin, Display, DataSource, Camera) as Stage implementations.
+"""
+
+from .camera import CameraClockStage, CameraStage
+from .data_source import DataSourceStage, PassthroughStage, SourceItemsToBufferStage
+from .display import DisplayStage
+from .effect_plugin import EffectPluginStage
+from .factory import (
+ create_stage_from_camera,
+ create_stage_from_display,
+ create_stage_from_effect,
+ create_stage_from_font,
+ create_stage_from_source,
+)
+from .message_overlay import MessageOverlayConfig, MessageOverlayStage
+from .positioning import (
+ PositioningMode,
+ PositionStage,
+ create_position_stage,
+)
+from .transform import (
+ CanvasStage,
+ FontStage,
+ ImageToTextStage,
+ ViewportFilterStage,
+)
+
+__all__ = [
+ # Adapter classes
+ "EffectPluginStage",
+ "DisplayStage",
+ "DataSourceStage",
+ "PassthroughStage",
+ "SourceItemsToBufferStage",
+ "CameraStage",
+ "CameraClockStage",
+ "ViewportFilterStage",
+ "FontStage",
+ "ImageToTextStage",
+ "CanvasStage",
+ "MessageOverlayStage",
+ "MessageOverlayConfig",
+ "PositionStage",
+ "PositioningMode",
+ # Factory functions
+ "create_stage_from_display",
+ "create_stage_from_effect",
+ "create_stage_from_source",
+ "create_stage_from_camera",
+ "create_stage_from_font",
+ "create_position_stage",
+]
diff --git a/sideline/pipeline/adapters/camera.py b/sideline/pipeline/adapters/camera.py
new file mode 100644
index 0000000..98cf91a
--- /dev/null
+++ b/sideline/pipeline/adapters/camera.py
@@ -0,0 +1,219 @@
+"""Adapter for camera stage."""
+
+import time
+from typing import Any
+
+from sideline.pipeline.core import DataType, PipelineContext, Stage
+
+
+class CameraClockStage(Stage):
+ """Per-frame clock stage that updates camera state.
+
+ This stage runs once per frame and updates the camera's internal state
+ (position, time). It makes camera_y/camera_x available to subsequent
+ stages via the pipeline context.
+
+ Unlike other stages, this is a pure clock stage and doesn't process
+ data - it just updates camera state and passes data through unchanged.
+ """
+
+ def __init__(self, camera, name: str = "camera-clock"):
+ self._camera = camera
+ self.name = name
+ self.category = "camera"
+ self.optional = False
+ self._last_frame_time: float | None = None
+
+ @property
+ def stage_type(self) -> str:
+ return "camera"
+
+ @property
+ def capabilities(self) -> set[str]:
+ # Provides camera state info only
+ # NOTE: Do NOT provide "source" as it conflicts with viewport_filter's "source.filtered"
+ return {"camera.state"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ # Clock stage - no dependencies (updates every frame regardless of data flow)
+ return set()
+
+ @property
+ def inlet_types(self) -> set:
+ # Accept any data type - this is a pass-through stage
+ return {DataType.ANY}
+
+ @property
+ def outlet_types(self) -> set:
+ # Pass through whatever was received
+ return {DataType.ANY}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Update camera state and pass data through.
+
+ This stage updates the camera's internal state (position, time) and
+ makes the updated camera_y/camera_x available to subsequent stages
+ via the pipeline context.
+
+ The data is passed through unchanged - this stage only updates
+ camera state, it doesn't transform the data.
+ """
+ if data is None:
+ return data
+
+ # Update camera speed from params if explicitly set (for dynamic modulation)
+ # Only update if camera_speed in params differs from the default (1.0)
+ # This preserves camera speed set during construction
+ if (
+ ctx.params
+ and hasattr(ctx.params, "camera_speed")
+ and ctx.params.camera_speed != 1.0
+ ):
+ self._camera.set_speed(ctx.params.camera_speed)
+
+ current_time = time.perf_counter()
+ dt = 0.0
+ if self._last_frame_time is not None:
+ dt = current_time - self._last_frame_time
+ self._camera.update(dt)
+ self._last_frame_time = current_time
+
+ # Update context with current camera position
+ ctx.set_state("camera_y", self._camera.y)
+ ctx.set_state("camera_x", self._camera.x)
+
+ # Pass data through unchanged
+ return data
+
+
+class CameraStage(Stage):
+ """Adapter wrapping Camera as a Stage.
+
+ This stage applies camera viewport transformation to the rendered buffer.
+ Camera state updates are handled by CameraClockStage.
+ """
+
+ def __init__(self, camera, name: str = "vertical"):
+ self._camera = camera
+ self.name = name
+ self.category = "camera"
+ self.optional = True
+ self._last_frame_time: float | None = None
+
+ def save_state(self) -> dict[str, Any]:
+ """Save camera state for restoration after pipeline rebuild.
+
+ Returns:
+ Dictionary containing camera state that can be restored
+ """
+ state = {
+ "x": self._camera.x,
+ "y": self._camera.y,
+ "mode": self._camera.mode.value
+ if hasattr(self._camera.mode, "value")
+ else self._camera.mode,
+ "speed": self._camera.speed,
+ "zoom": self._camera.zoom,
+ "canvas_width": self._camera.canvas_width,
+ "canvas_height": self._camera.canvas_height,
+ "_x_float": getattr(self._camera, "_x_float", 0.0),
+ "_y_float": getattr(self._camera, "_y_float", 0.0),
+ "_time": getattr(self._camera, "_time", 0.0),
+ }
+ # Save radial camera state if present
+ if hasattr(self._camera, "_r_float"):
+ state["_r_float"] = self._camera._r_float
+ if hasattr(self._camera, "_theta_float"):
+ state["_theta_float"] = self._camera._theta_float
+ if hasattr(self._camera, "_radial_input"):
+ state["_radial_input"] = self._camera._radial_input
+ return state
+
+ def restore_state(self, state: dict[str, Any]) -> None:
+ """Restore camera state from saved state.
+
+ Args:
+ state: Dictionary containing camera state from save_state()
+ """
+ from sideline.camera import CameraMode
+
+ self._camera.x = state.get("x", 0)
+ self._camera.y = state.get("y", 0)
+
+ # Restore mode - handle both enum value and direct enum
+ mode_value = state.get("mode", 0)
+ if isinstance(mode_value, int):
+ self._camera.mode = CameraMode(mode_value)
+ else:
+ self._camera.mode = mode_value
+
+ self._camera.speed = state.get("speed", 1.0)
+ self._camera.zoom = state.get("zoom", 1.0)
+ self._camera.canvas_width = state.get("canvas_width", 200)
+ self._camera.canvas_height = state.get("canvas_height", 200)
+
+ # Restore internal state
+ if hasattr(self._camera, "_x_float"):
+ self._camera._x_float = state.get("_x_float", 0.0)
+ if hasattr(self._camera, "_y_float"):
+ self._camera._y_float = state.get("_y_float", 0.0)
+ if hasattr(self._camera, "_time"):
+ self._camera._time = state.get("_time", 0.0)
+
+ # Restore radial camera state if present
+ if hasattr(self._camera, "_r_float"):
+ self._camera._r_float = state.get("_r_float", 0.0)
+ if hasattr(self._camera, "_theta_float"):
+ self._camera._theta_float = state.get("_theta_float", 0.0)
+ if hasattr(self._camera, "_radial_input"):
+ self._camera._radial_input = state.get("_radial_input", 0.0)
+
+ @property
+ def stage_type(self) -> str:
+ return "camera"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"camera"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return {"render.output", "camera.state"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Apply camera transformation to items."""
+ if data is None:
+ return data
+
+ # Camera state is updated by CameraClockStage
+ # We only apply the viewport transformation here
+
+ if hasattr(self._camera, "apply"):
+ viewport_width = ctx.params.viewport_width if ctx.params else 80
+ viewport_height = ctx.params.viewport_height if ctx.params else 24
+
+ # Use filtered camera position if available (from ViewportFilterStage)
+ # This handles the case where the buffer has been filtered and starts at row 0
+ filtered_camera_y = ctx.get("camera_y", self._camera.y)
+
+ # Temporarily adjust camera position for filtering
+ original_y = self._camera.y
+ self._camera.y = filtered_camera_y
+
+ try:
+ result = self._camera.apply(data, viewport_width, viewport_height)
+ finally:
+ # Restore original camera position
+ self._camera.y = original_y
+
+ return result
+ return data
diff --git a/sideline/pipeline/adapters/data_source.py b/sideline/pipeline/adapters/data_source.py
new file mode 100644
index 0000000..6608eae
--- /dev/null
+++ b/sideline/pipeline/adapters/data_source.py
@@ -0,0 +1,143 @@
+"""
+Stage adapters - Bridge existing components to the Stage interface.
+
+This module provides adapters that wrap existing components
+(DataSource) as Stage implementations.
+"""
+
+from typing import Any
+
+from sideline.data_sources import SourceItem
+from sideline.pipeline.core import DataType, PipelineContext, Stage
+
+
+class DataSourceStage(Stage):
+ """Adapter wrapping DataSource as a Stage."""
+
+ def __init__(self, data_source, name: str = "headlines"):
+ self._source = data_source
+ self.name = name
+ self.category = "source"
+ self.optional = False
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {f"source.{self.name}"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return set()
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.NONE} # Sources don't take input
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Fetch data from source."""
+ if hasattr(self._source, "get_items"):
+ return self._source.get_items()
+ return data
+
+
+class PassthroughStage(Stage):
+ """Simple stage that passes data through unchanged.
+
+ Used for sources that already provide the data in the correct format
+ (e.g., pipeline introspection that outputs text directly).
+ """
+
+ def __init__(self, name: str = "passthrough"):
+ self.name = name
+ self.category = "render"
+ self.optional = True
+
+ @property
+ def stage_type(self) -> str:
+ return "render"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"render.output"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return {"source"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Pass data through unchanged."""
+ return data
+
+
+class SourceItemsToBufferStage(Stage):
+ """Convert SourceItem objects to text buffer.
+
+ Takes a list of SourceItem objects and extracts their content,
+ splitting on newlines to create a proper text buffer for display.
+ """
+
+ def __init__(self, name: str = "items-to-buffer"):
+ self.name = name
+ self.category = "render"
+ self.optional = True
+
+ @property
+ def stage_type(self) -> str:
+ return "render"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"render.output"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return {"source"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Convert SourceItem list to text buffer."""
+ if data is None:
+ return []
+
+ # If already a list of strings, return as-is
+ if isinstance(data, list) and data and isinstance(data[0], str):
+ return data
+
+ # If it's a list of SourceItem, extract content
+ if isinstance(data, list):
+ result = []
+ for item in data:
+ if isinstance(item, SourceItem):
+ # Split content by newline to get individual lines
+ lines = item.content.split("\n")
+ result.extend(lines)
+ elif hasattr(item, "content"): # Has content attribute
+ lines = str(item.content).split("\n")
+ result.extend(lines)
+ else:
+ result.append(str(item))
+ return result
+
+ # Single item
+ if isinstance(data, SourceItem):
+ return data.content.split("\n")
+
+ return [str(data)]
diff --git a/sideline/pipeline/adapters/display.py b/sideline/pipeline/adapters/display.py
new file mode 100644
index 0000000..d797264
--- /dev/null
+++ b/sideline/pipeline/adapters/display.py
@@ -0,0 +1,108 @@
+"""Adapter wrapping Display as a Stage."""
+
+from typing import Any
+
+from sideline.pipeline.core import PipelineContext, Stage
+
+
+class DisplayStage(Stage):
+ """Adapter wrapping Display as a Stage."""
+
+ def __init__(self, display, name: str = "terminal", positioning: str = "mixed"):
+ self._display = display
+ self.name = name
+ self.category = "display"
+ self.optional = False
+ self._initialized = False
+ self._init_width = 80
+ self._init_height = 24
+ self._positioning = positioning
+
+ def save_state(self) -> dict[str, Any]:
+ """Save display state for restoration after pipeline rebuild.
+
+ Returns:
+ Dictionary containing display state that can be restored
+ """
+ return {
+ "initialized": self._initialized,
+ "init_width": self._init_width,
+ "init_height": self._init_height,
+ "width": getattr(self._display, "width", 80),
+ "height": getattr(self._display, "height", 24),
+ }
+
+ def restore_state(self, state: dict[str, Any]) -> None:
+ """Restore display state from saved state.
+
+ Args:
+ state: Dictionary containing display state from save_state()
+ """
+ self._initialized = state.get("initialized", False)
+ self._init_width = state.get("init_width", 80)
+ self._init_height = state.get("init_height", 24)
+
+ # Restore display dimensions if the display supports it
+ if hasattr(self._display, "width"):
+ self._display.width = state.get("width", 80)
+ if hasattr(self._display, "height"):
+ self._display.height = state.get("height", 24)
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"display.output"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ # Display needs rendered content and camera transformation
+ return {"render.output", "camera"}
+
+ @property
+ def inlet_types(self) -> set:
+ from sideline.pipeline.core import DataType
+
+ return {DataType.TEXT_BUFFER} # Display consumes rendered text
+
+ @property
+ def outlet_types(self) -> set:
+ from sideline.pipeline.core import DataType
+
+ return {DataType.NONE} # Display is a terminal stage (no output)
+
+ def init(self, ctx: PipelineContext) -> bool:
+ w = ctx.params.viewport_width if ctx.params else 80
+ h = ctx.params.viewport_height if ctx.params else 24
+
+ # Try to reuse display if already initialized
+ reuse = self._initialized
+ result = self._display.init(w, h, reuse=reuse)
+
+ # Update initialization state
+ if result is not False:
+ self._initialized = True
+ self._init_width = w
+ self._init_height = h
+
+ return result is not False
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Output data to display."""
+ if data is not None:
+ # Check if positioning mode is specified in context params
+ positioning = self._positioning
+ if ctx and ctx.params and hasattr(ctx.params, "positioning"):
+ positioning = ctx.params.positioning
+
+ # Pass positioning to display if supported
+ if (
+ hasattr(self._display, "show")
+ and "positioning" in self._display.show.__code__.co_varnames
+ ):
+ self._display.show(data, positioning=positioning)
+ else:
+ # Fallback for displays that don't support positioning parameter
+ self._display.show(data)
+ return data
+
+ def cleanup(self) -> None:
+ self._display.cleanup()
diff --git a/sideline/pipeline/adapters/effect_plugin.py b/sideline/pipeline/adapters/effect_plugin.py
new file mode 100644
index 0000000..a8db2b9
--- /dev/null
+++ b/sideline/pipeline/adapters/effect_plugin.py
@@ -0,0 +1,124 @@
+"""Adapter wrapping EffectPlugin as a Stage."""
+
+from typing import Any
+
+from sideline.pipeline.core import PipelineContext, Stage
+
+
+class EffectPluginStage(Stage):
+ """Adapter wrapping EffectPlugin as a Stage.
+
+ Supports capability-based dependencies through the dependencies parameter.
+ """
+
+ def __init__(
+ self,
+ effect_plugin,
+ name: str = "effect",
+ dependencies: set[str] | None = None,
+ ):
+ self._effect = effect_plugin
+ self.name = name
+ self.category = "effect"
+ self.optional = False
+ self._dependencies = dependencies or set()
+
+ @property
+ def stage_type(self) -> str:
+ """Return stage_type based on effect name.
+
+ Overlay effects have stage_type "overlay".
+ """
+ if self.is_overlay:
+ return "overlay"
+ return self.category
+
+ @property
+ def render_order(self) -> int:
+ """Return render_order based on effect type.
+
+ Overlay effects have high render_order to appear on top.
+ """
+ if self.is_overlay:
+ return 100 # High order for overlays
+ return 0
+
+ @property
+ def is_overlay(self) -> bool:
+ """Return True for overlay effects.
+
+ Overlay effects compose on top of the buffer
+ rather than transforming it for the next stage.
+ """
+ # Check if the effect has an is_overlay attribute that is explicitly True
+ # (not just any truthy value from a mock object)
+ if hasattr(self._effect, "is_overlay"):
+ effect_overlay = self._effect.is_overlay
+ # Only return True if it's explicitly set to True
+ if effect_overlay is True:
+ return True
+ return self.name == "hud"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {f"effect.{self.name}"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return self._dependencies
+
+ @property
+ def inlet_types(self) -> set:
+ from sideline.pipeline.core import DataType
+
+ return {DataType.TEXT_BUFFER}
+
+ @property
+ def outlet_types(self) -> set:
+ from sideline.pipeline.core import DataType
+
+ return {DataType.TEXT_BUFFER}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Process data through the effect."""
+ if data is None:
+ return None
+ from sideline.effects.types import EffectContext, apply_param_bindings
+
+ w = ctx.params.viewport_width if ctx.params else 80
+ h = ctx.params.viewport_height if ctx.params else 24
+ frame = ctx.params.frame_number if ctx.params else 0
+
+ effect_ctx = EffectContext(
+ terminal_width=w,
+ terminal_height=h,
+ scroll_cam=0,
+ ticker_height=h,
+ camera_x=0,
+ mic_excess=0.0,
+ grad_offset=(frame * 0.01) % 1.0,
+ frame_number=frame,
+ has_message=False,
+ items=ctx.get("items", []),
+ )
+
+ # Copy sensor state from PipelineContext to EffectContext
+ for key, value in ctx.state.items():
+ if key.startswith("sensor."):
+ effect_ctx.set_state(key, value)
+
+ # Copy metrics from PipelineContext to EffectContext
+ if "metrics" in ctx.state:
+ effect_ctx.set_state("metrics", ctx.state["metrics"])
+
+ # Copy pipeline_order from PipelineContext services to EffectContext state
+ pipeline_order = ctx.get("pipeline_order")
+ if pipeline_order:
+ effect_ctx.set_state("pipeline_order", pipeline_order)
+
+ # Apply sensor param bindings if effect has them
+ if hasattr(self._effect, "param_bindings") and self._effect.param_bindings:
+ bound_config = apply_param_bindings(self._effect, effect_ctx)
+ self._effect.configure(bound_config)
+
+ return self._effect.process(data, effect_ctx)
diff --git a/sideline/pipeline/adapters/factory.py b/sideline/pipeline/adapters/factory.py
new file mode 100644
index 0000000..797424f
--- /dev/null
+++ b/sideline/pipeline/adapters/factory.py
@@ -0,0 +1,38 @@
+"""Factory functions for creating stage instances."""
+
+from sideline.pipeline.adapters.camera import CameraStage
+from sideline.pipeline.adapters.data_source import DataSourceStage
+from sideline.pipeline.adapters.display import DisplayStage
+from sideline.pipeline.adapters.effect_plugin import EffectPluginStage
+from sideline.pipeline.adapters.transform import FontStage
+
+
+def create_stage_from_display(display, name: str = "terminal") -> DisplayStage:
+ """Create a DisplayStage from a display instance."""
+ return DisplayStage(display, name=name)
+
+
+def create_stage_from_effect(effect_plugin, name: str) -> EffectPluginStage:
+ """Create an EffectPluginStage from an effect plugin."""
+ return EffectPluginStage(effect_plugin, name=name)
+
+
+def create_stage_from_source(data_source, name: str = "headlines") -> DataSourceStage:
+ """Create a DataSourceStage from a data source."""
+ return DataSourceStage(data_source, name=name)
+
+
+def create_stage_from_camera(camera, name: str = "vertical") -> CameraStage:
+ """Create a CameraStage from a camera instance."""
+ return CameraStage(camera, name=name)
+
+
+def create_stage_from_font(
+ font_path: str | None = None,
+ font_size: int | None = None,
+ font_ref: str | None = "default",
+ name: str = "font",
+) -> FontStage:
+ """Create a FontStage with specified font configuration."""
+ # FontStage currently doesn't use these parameters but keeps them for compatibility
+ return FontStage(name=name)
diff --git a/sideline/pipeline/adapters/frame_capture.py b/sideline/pipeline/adapters/frame_capture.py
new file mode 100644
index 0000000..21f8ba0
--- /dev/null
+++ b/sideline/pipeline/adapters/frame_capture.py
@@ -0,0 +1,165 @@
+"""
+Frame Capture Stage Adapter
+
+Wraps pipeline stages to capture frames for animation report generation.
+"""
+
+from typing import Any
+
+from sideline.display.backends.animation_report import AnimationReportDisplay
+from sideline.pipeline.core import PipelineContext, Stage
+
+
+class FrameCaptureStage(Stage):
+ """
+ Wrapper stage that captures frames before and after a wrapped stage.
+
+ This allows generating animation reports showing how each stage
+ transforms the data.
+ """
+
+ def __init__(
+ self,
+ wrapped_stage: Stage,
+ display: AnimationReportDisplay,
+ name: str | None = None,
+ ):
+ """
+ Initialize frame capture stage.
+
+ Args:
+ wrapped_stage: The stage to wrap and capture frames from
+ display: The animation report display to send frames to
+ name: Optional name for this capture stage
+ """
+ self._wrapped_stage = wrapped_stage
+ self._display = display
+ self.name = name or f"capture_{wrapped_stage.name}"
+ self.category = wrapped_stage.category
+ self.optional = wrapped_stage.optional
+
+ # Capture state
+ self._captured_input = False
+ self._captured_output = False
+
+ @property
+ def stage_type(self) -> str:
+ return self._wrapped_stage.stage_type
+
+ @property
+ def capabilities(self) -> set[str]:
+ return self._wrapped_stage.capabilities
+
+ @property
+ def dependencies(self) -> set[str]:
+ return self._wrapped_stage.dependencies
+
+ @property
+ def inlet_types(self) -> set:
+ return self._wrapped_stage.inlet_types
+
+ @property
+ def outlet_types(self) -> set:
+ return self._wrapped_stage.outlet_types
+
+ def init(self, ctx: PipelineContext) -> bool:
+ """Initialize the wrapped stage."""
+ return self._wrapped_stage.init(ctx)
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """
+ Process data through wrapped stage and capture frames.
+
+ Args:
+ data: Input data (typically a text buffer)
+ ctx: Pipeline context
+
+ Returns:
+ Output data from wrapped stage
+ """
+ # Capture input frame (before stage processing)
+ if isinstance(data, list) and all(isinstance(line, str) for line in data):
+ self._display.start_stage(f"{self._wrapped_stage.name}_input")
+ self._display.show(data)
+ self._captured_input = True
+
+ # Process through wrapped stage
+ result = self._wrapped_stage.process(data, ctx)
+
+ # Capture output frame (after stage processing)
+ if isinstance(result, list) and all(isinstance(line, str) for line in result):
+ self._display.start_stage(f"{self._wrapped_stage.name}_output")
+ self._display.show(result)
+ self._captured_output = True
+
+ return result
+
+ def cleanup(self) -> None:
+ """Cleanup the wrapped stage."""
+ self._wrapped_stage.cleanup()
+
+
+class FrameCaptureController:
+ """
+ Controller for managing frame capture across the pipeline.
+
+ This class provides an easy way to enable frame capture for
+ specific stages or the entire pipeline.
+ """
+
+ def __init__(self, display: AnimationReportDisplay):
+ """
+ Initialize frame capture controller.
+
+ Args:
+ display: The animation report display to use for capture
+ """
+ self._display = display
+ self._captured_stages: list[FrameCaptureStage] = []
+
+ def wrap_stage(self, stage: Stage, name: str | None = None) -> FrameCaptureStage:
+ """
+ Wrap a stage with frame capture.
+
+ Args:
+ stage: The stage to wrap
+ name: Optional name for the capture stage
+
+ Returns:
+ Wrapped stage that captures frames
+ """
+ capture_stage = FrameCaptureStage(stage, self._display, name)
+ self._captured_stages.append(capture_stage)
+ return capture_stage
+
+ def wrap_stages(self, stages: dict[str, Stage]) -> dict[str, Stage]:
+ """
+ Wrap multiple stages with frame capture.
+
+ Args:
+ stages: Dictionary of stage names to stages
+
+ Returns:
+ Dictionary of stage names to wrapped stages
+ """
+ wrapped = {}
+ for name, stage in stages.items():
+ wrapped[name] = self.wrap_stage(stage, name)
+ return wrapped
+
+ def get_captured_stages(self) -> list[FrameCaptureStage]:
+ """Get list of all captured stages."""
+ return self._captured_stages
+
+ def generate_report(self, title: str = "Pipeline Animation Report") -> str:
+ """
+ Generate the animation report.
+
+ Args:
+ title: Title for the report
+
+ Returns:
+ Path to the generated HTML file
+ """
+ report_path = self._display.generate_report(title)
+ return str(report_path)
diff --git a/sideline/pipeline/adapters/message_overlay.py b/sideline/pipeline/adapters/message_overlay.py
new file mode 100644
index 0000000..ac1abd3
--- /dev/null
+++ b/sideline/pipeline/adapters/message_overlay.py
@@ -0,0 +1,185 @@
+"""
+Message overlay stage - Renders ntfy messages as an overlay on the buffer.
+
+This stage provides message overlay capability for displaying ntfy.sh messages
+as a centered panel with pink/magenta gradient, matching upstream/main aesthetics.
+"""
+
+import re
+import time
+from dataclasses import dataclass
+from datetime import datetime
+
+from engine import config
+from engine.effects.legacy import vis_trunc
+from sideline.pipeline.core import DataType, PipelineContext, Stage
+from sideline.render.blocks import big_wrap
+from sideline.render.gradient import msg_gradient
+
+
+@dataclass
+class MessageOverlayConfig:
+ """Configuration for MessageOverlayStage."""
+
+ enabled: bool = True
+ display_secs: int = 30 # How long to display messages
+ topic_url: str | None = None # Ntfy topic URL (None = use config default)
+
+
+class MessageOverlayStage(Stage):
+ """Stage that renders ntfy message overlay on the buffer.
+
+ Provides:
+ - message.overlay capability (optional)
+ - Renders centered panel with pink/magenta gradient
+ - Shows title, body, timestamp, and remaining time
+ """
+
+ name = "message_overlay"
+ category = "overlay"
+
+ def __init__(
+ self, config: MessageOverlayConfig | None = None, name: str = "message_overlay"
+ ):
+ self.config = config or MessageOverlayConfig()
+ self._ntfy_poller = None
+ self._msg_cache = (None, None) # (cache_key, rendered_rows)
+
+ @property
+ def capabilities(self) -> set[str]:
+ """Provides message overlay capability."""
+ return {"message.overlay"} if self.config.enabled else set()
+
+ @property
+ def dependencies(self) -> set[str]:
+ """Needs rendered buffer and camera transformation to overlay onto."""
+ return {"render.output", "camera"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def init(self, ctx: PipelineContext) -> bool:
+ """Initialize ntfy poller if topic URL is configured."""
+ if not self.config.enabled:
+ return True
+
+ # Get or create ntfy poller
+ topic_url = self.config.topic_url or config.NTFY_TOPIC
+ if topic_url:
+ from sideline.ntfy import NtfyPoller
+
+ self._ntfy_poller = NtfyPoller(
+ topic_url=topic_url,
+ reconnect_delay=getattr(config, "NTFY_RECONNECT_DELAY", 5),
+ display_secs=self.config.display_secs,
+ )
+ self._ntfy_poller.start()
+ ctx.set("ntfy_poller", self._ntfy_poller)
+
+ return True
+
+ def process(self, data: list[str], ctx: PipelineContext) -> list[str]:
+ """Render message overlay on the buffer."""
+ if not self.config.enabled or not data:
+ return data
+
+ # Get active message from poller
+ msg = None
+ if self._ntfy_poller:
+ msg = self._ntfy_poller.get_active_message()
+
+ if msg is None:
+ return data
+
+ # Render overlay
+ w = ctx.terminal_width if hasattr(ctx, "terminal_width") else 80
+ h = ctx.terminal_height if hasattr(ctx, "terminal_height") else 24
+
+ overlay, self._msg_cache = self._render_message_overlay(
+ msg, w, h, self._msg_cache
+ )
+
+ # Composite overlay onto buffer
+ result = list(data)
+ for line in overlay:
+ # Overlay uses ANSI cursor positioning, just append
+ result.append(line)
+
+ return result
+
+ def _render_message_overlay(
+ self,
+ msg: tuple[str, str, float] | None,
+ w: int,
+ h: int,
+ msg_cache: tuple,
+ ) -> tuple[list[str], tuple]:
+ """Render ntfy message overlay.
+
+ Args:
+ msg: (title, body, timestamp) or None
+ w: terminal width
+ h: terminal height
+ msg_cache: (cache_key, rendered_rows) for caching
+
+ Returns:
+ (list of ANSI strings, updated cache)
+ """
+ overlay = []
+ if msg is None:
+ return overlay, msg_cache
+
+ m_title, m_body, m_ts = msg
+ display_text = m_body or m_title or "(empty)"
+ display_text = re.sub(r"\s+", " ", display_text.upper())
+
+ cache_key = (display_text, w)
+ if msg_cache[0] != cache_key:
+ msg_rows = big_wrap(display_text, w - 4)
+ msg_cache = (cache_key, msg_rows)
+ else:
+ msg_rows = msg_cache[1]
+
+ msg_rows = msg_gradient(msg_rows, (time.monotonic() * config.GRAD_SPEED) % 1.0)
+
+ elapsed_s = int(time.monotonic() - m_ts)
+ remaining = max(0, self.config.display_secs - elapsed_s)
+ ts_str = datetime.now().strftime("%H:%M:%S")
+ panel_h = len(msg_rows) + 2
+ panel_top = max(0, (h - panel_h) // 2)
+
+ row_idx = 0
+ for mr in msg_rows:
+ ln = vis_trunc(mr, w)
+ overlay.append(f"\033[{panel_top + row_idx + 1};1H {ln}\033[0m\033[K")
+ row_idx += 1
+
+ meta_parts = []
+ if m_title and m_title != m_body:
+ meta_parts.append(m_title)
+ meta_parts.append(f"ntfy \u00b7 {ts_str} \u00b7 {remaining}s")
+ meta = (
+ " " + " \u00b7 ".join(meta_parts)
+ if len(meta_parts) > 1
+ else " " + meta_parts[0]
+ )
+ overlay.append(
+ f"\033[{panel_top + row_idx + 1};1H\033[38;5;245m{meta}\033[0m\033[K"
+ )
+ row_idx += 1
+
+ bar = "\u2500" * (w - 4)
+ overlay.append(
+ f"\033[{panel_top + row_idx + 1};1H \033[2;38;5;37m{bar}\033[0m\033[K"
+ )
+
+ return overlay, msg_cache
+
+ def cleanup(self) -> None:
+ """Cleanup resources."""
+ pass
diff --git a/sideline/pipeline/adapters/positioning.py b/sideline/pipeline/adapters/positioning.py
new file mode 100644
index 0000000..c54040c
--- /dev/null
+++ b/sideline/pipeline/adapters/positioning.py
@@ -0,0 +1,185 @@
+"""PositionStage - Configurable positioning mode for terminal rendering.
+
+This module provides positioning stages that allow choosing between
+different ANSI positioning approaches:
+- ABSOLUTE: Use cursor positioning codes (\\033[row;colH) for all lines
+- RELATIVE: Use newlines for all lines
+- MIXED: Base content uses newlines, effects use cursor positioning (default)
+"""
+
+from enum import Enum
+from typing import Any
+
+from sideline.pipeline.core import DataType, PipelineContext, Stage
+
+
+class PositioningMode(Enum):
+ """Positioning mode for terminal rendering."""
+
+ ABSOLUTE = "absolute" # All lines have cursor positioning codes
+ RELATIVE = "relative" # Lines use newlines (no cursor codes)
+ MIXED = "mixed" # Mixed: newlines for base, cursor codes for overlays (default)
+
+
+class PositionStage(Stage):
+ """Applies positioning mode to buffer before display.
+
+ This stage allows configuring how lines are positioned in the terminal:
+ - ABSOLUTE: Each line has \\033[row;colH prefix (precise control)
+ - RELATIVE: Lines are joined with \\n (natural flow)
+ - MIXED: Leaves buffer as-is (effects add their own positioning)
+ """
+
+ def __init__(
+ self, mode: PositioningMode = PositioningMode.RELATIVE, name: str = "position"
+ ):
+ self.mode = mode
+ self.name = name
+ self.category = "position"
+ self._mode_str = mode.value
+
+ def save_state(self) -> dict[str, Any]:
+ """Save positioning mode for restoration."""
+ return {"mode": self.mode.value}
+
+ def restore_state(self, state: dict[str, Any]) -> None:
+ """Restore positioning mode from saved state."""
+ mode_value = state.get("mode", "relative")
+ self.mode = PositioningMode(mode_value)
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"position.output"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ # Position stage typically runs after render but before effects
+ # Effects may add their own positioning codes
+ return {"render.output"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def init(self, ctx: PipelineContext) -> bool:
+ """Initialize the positioning stage."""
+ return True
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Apply positioning mode to the buffer.
+
+ Args:
+ data: List of strings (buffer lines)
+ ctx: Pipeline context
+
+ Returns:
+ Buffer with applied positioning mode
+ """
+ if data is None:
+ return data
+
+ if not isinstance(data, list):
+ return data
+
+ if self.mode == PositioningMode.ABSOLUTE:
+ return self._to_absolute(data, ctx)
+ elif self.mode == PositioningMode.RELATIVE:
+ return self._to_relative(data, ctx)
+ else: # MIXED
+ return data # No transformation
+
+ def _to_absolute(self, data: list[str], ctx: PipelineContext) -> list[str]:
+ """Convert buffer to absolute positioning (all lines have cursor codes).
+
+ This mode prefixes each line with \\033[row;colH to move cursor
+ to the exact position before writing the line.
+
+ Args:
+ data: List of buffer lines
+ ctx: Pipeline context (provides terminal dimensions)
+
+ Returns:
+ Buffer with cursor positioning codes for each line
+ """
+ result = []
+ viewport_height = ctx.params.viewport_height if ctx.params else 24
+
+ for i, line in enumerate(data):
+ if i >= viewport_height:
+ break # Don't exceed viewport
+
+ # Check if line already has cursor positioning
+ if "\033[" in line and "H" in line:
+ # Already has cursor positioning - leave as-is
+ result.append(line)
+ else:
+ # Add cursor positioning for this line
+ # Row is 1-indexed
+ result.append(f"\033[{i + 1};1H{line}")
+
+ return result
+
+ def _to_relative(self, data: list[str], ctx: PipelineContext) -> list[str]:
+ """Convert buffer to relative positioning (use newlines).
+
+ This mode removes explicit cursor positioning codes from lines
+ (except for effects that specifically add them).
+
+ Note: Effects like HUD add their own cursor positioning codes,
+ so we can't simply remove all of them. We rely on the terminal
+ display to join lines with newlines.
+
+ Args:
+ data: List of buffer lines
+ ctx: Pipeline context (unused)
+
+ Returns:
+ Buffer with minimal cursor positioning (only for overlays)
+ """
+ # For relative mode, we leave the buffer as-is
+ # The terminal display handles joining with newlines
+ # Effects that need absolute positioning will add their own codes
+
+ # Filter out lines that would cause double-positioning
+ result = []
+ for i, line in enumerate(data):
+ # Check if this line looks like base content (no cursor code at start)
+ # vs an effect line (has cursor code at start)
+ if line.startswith("\033[") and "H" in line[:20]:
+ # This is an effect with positioning - keep it
+ result.append(line)
+ else:
+ # Base content - strip any inline cursor codes (rare)
+ # but keep color codes
+ result.append(line)
+
+ return result
+
+ def cleanup(self) -> None:
+ """Clean up positioning stage."""
+ pass
+
+
+# Convenience function to create positioning stage
+def create_position_stage(
+ mode: str = "relative", name: str = "position"
+) -> PositionStage:
+ """Create a positioning stage with the specified mode.
+
+ Args:
+ mode: Positioning mode ("absolute", "relative", or "mixed")
+ name: Name for the stage
+
+ Returns:
+ PositionStage instance
+ """
+ try:
+ positioning_mode = PositioningMode(mode)
+ except ValueError:
+ positioning_mode = PositioningMode.RELATIVE
+
+ return PositionStage(mode=positioning_mode, name=name)
diff --git a/sideline/pipeline/adapters/transform.py b/sideline/pipeline/adapters/transform.py
new file mode 100644
index 0000000..291f7c1
--- /dev/null
+++ b/sideline/pipeline/adapters/transform.py
@@ -0,0 +1,293 @@
+"""Adapters for transform stages (viewport, font, image, canvas)."""
+
+from typing import Any
+
+import sideline.render
+from sideline.data_sources import SourceItem
+from sideline.pipeline.core import DataType, PipelineContext, Stage
+
+
+def estimate_simple_height(text: str, width: int) -> int:
+ """Estimate height in terminal rows using simple word wrap.
+
+ Uses conservative estimation suitable for headlines.
+ Each wrapped line is approximately 6 terminal rows (big block rendering).
+ """
+ words = text.split()
+ if not words:
+ return 6
+
+ lines = 1
+ current_len = 0
+ for word in words:
+ word_len = len(word)
+ if current_len + word_len + 1 > width - 4: # -4 for margins
+ lines += 1
+ current_len = word_len
+ else:
+ current_len += word_len + 1
+
+ return lines * 6 # 6 rows per line for big block rendering
+
+
+class ViewportFilterStage(Stage):
+ """Filter items to viewport height based on rendered height."""
+
+ def __init__(self, name: str = "viewport-filter"):
+ self.name = name
+ self.category = "render"
+ self.optional = True
+ self._layout: list[int] = []
+
+ @property
+ def stage_type(self) -> str:
+ return "render"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"source.filtered"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ # Always requires camera.state for viewport filtering
+ # CameraUpdateStage provides this (auto-injected if missing)
+ return {"source", "camera.state"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Filter items to viewport height based on rendered height."""
+ if data is None:
+ return data
+
+ if not isinstance(data, list):
+ return data
+
+ if not data:
+ return []
+
+ # Get viewport parameters from context
+ viewport_height = ctx.params.viewport_height if ctx.params else 24
+ viewport_width = ctx.params.viewport_width if ctx.params else 80
+ camera_y = ctx.get("camera_y", 0)
+
+ # Estimate height for each item and cache layout
+ self._layout = []
+ cumulative_heights = []
+ current_height = 0
+
+ for item in data:
+ title = item.content if isinstance(item, SourceItem) else str(item)
+ # Use simple height estimation (not PIL-based)
+ estimated_height = estimate_simple_height(title, viewport_width)
+ self._layout.append(estimated_height)
+ current_height += estimated_height
+ cumulative_heights.append(current_height)
+
+ # Find visible range based on camera_y and viewport_height
+ # camera_y is the scroll offset (how many rows are scrolled up)
+ start_y = camera_y
+ end_y = camera_y + viewport_height
+
+ # Find start index (first item that intersects with visible range)
+ start_idx = 0
+ start_item_y = 0 # Y position where the first visible item starts
+ for i, total_h in enumerate(cumulative_heights):
+ if total_h > start_y:
+ start_idx = i
+ # Calculate the Y position of the start of this item
+ if i > 0:
+ start_item_y = cumulative_heights[i - 1]
+ break
+
+ # Find end index (first item that extends beyond visible range)
+ end_idx = len(data)
+ for i, total_h in enumerate(cumulative_heights):
+ if total_h >= end_y:
+ end_idx = i + 1
+ break
+
+ # Adjust camera_y for the filtered buffer
+ # The filtered buffer starts at row 0, but the camera position
+ # needs to be relative to where the first visible item starts
+ filtered_camera_y = camera_y - start_item_y
+
+ # Update context with the filtered camera position
+ # This ensures CameraStage can correctly slice the filtered buffer
+ ctx.set_state("camera_y", filtered_camera_y)
+ ctx.set_state("camera_x", ctx.get("camera_x", 0)) # Keep camera_x unchanged
+
+ # Return visible items
+ return data[start_idx:end_idx]
+
+
+class FontStage(Stage):
+ """Render items using font."""
+
+ def __init__(self, name: str = "font"):
+ self.name = name
+ self.category = "render"
+ self.optional = False
+
+ @property
+ def stage_type(self) -> str:
+ return "render"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"render.output"}
+
+ @property
+ def stage_dependencies(self) -> set[str]:
+ # Must connect to viewport_filter stage to get filtered source
+ return {"viewport_filter"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ # Depend on source.filtered (provided by viewport_filter)
+ # This ensures we get the filtered/processed source, not raw source
+ return {"source.filtered"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Render items to text buffer using font."""
+ if data is None:
+ return []
+
+ if not isinstance(data, list):
+ return [str(data)]
+
+ import os
+
+ if os.environ.get("DEBUG_CAMERA"):
+ print(f"FontStage: input items={len(data)}")
+
+ viewport_width = ctx.params.viewport_width if ctx.params else 80
+
+ result = []
+ for item in data:
+ if isinstance(item, SourceItem):
+ title = item.content
+ src = item.source
+ ts = item.timestamp
+ content_lines, _, _ = engine.render.make_block(
+ title, src, ts, viewport_width
+ )
+ result.extend(content_lines)
+ elif hasattr(item, "content"):
+ title = str(item.content)
+ content_lines, _, _ = engine.render.make_block(
+ title, "", "", viewport_width
+ )
+ result.extend(content_lines)
+ else:
+ result.append(str(item))
+ return result
+
+
+class ImageToTextStage(Stage):
+ """Convert image items to text."""
+
+ def __init__(self, name: str = "image-to-text"):
+ self.name = name
+ self.category = "render"
+ self.optional = True
+
+ @property
+ def stage_type(self) -> str:
+ return "render"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"render.output"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return {"source"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Convert image items to text representation."""
+ if data is None:
+ return []
+
+ if not isinstance(data, list):
+ return [str(data)]
+
+ result = []
+ for item in data:
+ # Check if item is an image
+ if hasattr(item, "image_path") or hasattr(item, "image_data"):
+ # Placeholder: would normally render image to ASCII art
+ result.append(f"[Image: {getattr(item, 'image_path', 'data')}]")
+ elif isinstance(item, SourceItem):
+ result.extend(item.content.split("\n"))
+ else:
+ result.append(str(item))
+ return result
+
+
+class CanvasStage(Stage):
+ """Render items to canvas."""
+
+ def __init__(self, name: str = "canvas"):
+ self.name = name
+ self.category = "render"
+ self.optional = False
+
+ @property
+ def stage_type(self) -> str:
+ return "render"
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {"render.output"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return {"source"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.SOURCE_ITEMS}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Render items to canvas."""
+ if data is None:
+ return []
+
+ if not isinstance(data, list):
+ return [str(data)]
+
+ # Simple canvas rendering
+ result = []
+ for item in data:
+ if isinstance(item, SourceItem):
+ result.extend(item.content.split("\n"))
+ else:
+ result.append(str(item))
+ return result
diff --git a/sideline/pipeline/controller.py b/sideline/pipeline/controller.py
new file mode 100644
index 0000000..301c1ab
--- /dev/null
+++ b/sideline/pipeline/controller.py
@@ -0,0 +1,1056 @@
+"""
+Pipeline controller - DAG-based pipeline execution.
+
+The Pipeline class orchestrates stages in dependency order, handling
+the complete render cycle from source to display.
+"""
+
+import time
+from dataclasses import dataclass, field
+from typing import Any
+
+from sideline.pipeline.core import PipelineContext, Stage, StageError, StageResult
+from sideline.pipeline.params import PipelineParams
+from sideline.pipeline.registry import StageRegistry
+
+
+@dataclass
+class PipelineConfig:
+ """Configuration for a pipeline instance."""
+
+ source: str = "headlines"
+ display: str = "terminal"
+ camera: str = "vertical"
+ effects: list[str] = field(default_factory=list)
+ enable_metrics: bool = True
+
+
+@dataclass
+class StageMetrics:
+ """Metrics for a single stage execution."""
+
+ name: str
+ duration_ms: float
+ chars_in: int = 0
+ chars_out: int = 0
+
+
+@dataclass
+class FrameMetrics:
+ """Metrics for a single frame through the pipeline."""
+
+ frame_number: int
+ total_ms: float
+ stages: list[StageMetrics] = field(default_factory=list)
+
+
+class Pipeline:
+ """Main pipeline orchestrator.
+
+ Manages the execution of all stages in dependency order,
+ handling initialization, processing, and cleanup.
+
+ Supports dynamic mutation during runtime via the mutation API.
+ """
+
+ def __init__(
+ self,
+ config: PipelineConfig | None = None,
+ context: PipelineContext | None = None,
+ ):
+ self.config = config or PipelineConfig()
+ self.context = context or PipelineContext()
+ self._stages: dict[str, Stage] = {}
+ self._execution_order: list[str] = []
+ self._initialized = False
+ self._capability_map: dict[str, list[str]] = {}
+
+ self._metrics_enabled = self.config.enable_metrics
+ self._frame_metrics: list[FrameMetrics] = []
+ self._max_metrics_frames = 60
+
+ # Minimum capabilities required for pipeline to function
+ # NOTE: Research later - allow presets to override these defaults
+ self._minimum_capabilities: set[str] = {
+ "source",
+ "render.output",
+ "display.output",
+ "camera.state", # Always required for viewport filtering
+ }
+ self._current_frame_number = 0
+
+ def add_stage(self, name: str, stage: Stage, initialize: bool = True) -> "Pipeline":
+ """Add a stage to the pipeline.
+
+ Args:
+ name: Unique name for the stage
+ stage: Stage instance to add
+ initialize: If True, initialize the stage immediately
+
+ Returns:
+ Self for method chaining
+ """
+ self._stages[name] = stage
+ if self._initialized and initialize:
+ stage.init(self.context)
+ return self
+
+ def remove_stage(self, name: str, cleanup: bool = True) -> Stage | None:
+ """Remove a stage from the pipeline.
+
+ Args:
+ name: Name of the stage to remove
+ cleanup: If True, call cleanup() on the removed stage
+
+ Returns:
+ The removed stage, or None if not found
+ """
+ stage = self._stages.pop(name, None)
+ if stage and cleanup:
+ try:
+ stage.cleanup()
+ except Exception:
+ pass
+
+ # Rebuild execution order and capability map if stage was removed
+ if stage and self._initialized:
+ self._rebuild()
+
+ return stage
+
+ def remove_stage_safe(self, name: str, cleanup: bool = True) -> Stage | None:
+ """Remove a stage and rebuild execution order safely.
+
+ This is an alias for remove_stage() that explicitly rebuilds
+ the execution order after removal.
+
+ Args:
+ name: Name of the stage to remove
+ cleanup: If True, call cleanup() on the removed stage
+
+ Returns:
+ The removed stage, or None if not found
+ """
+ return self.remove_stage(name, cleanup)
+
+ def cleanup_stage(self, name: str) -> None:
+ """Clean up a specific stage without removing it.
+
+ This is useful for stages that need to release resources
+ (like display connections) without being removed from the pipeline.
+
+ Args:
+ name: Name of the stage to clean up
+ """
+ stage = self._stages.get(name)
+ if stage:
+ try:
+ stage.cleanup()
+ except Exception:
+ pass
+
+ def can_hot_swap(self, name: str) -> bool:
+ """Check if a stage can be safely hot-swapped.
+
+ A stage can be hot-swapped if:
+ 1. It exists in the pipeline
+ 2. It's not required for basic pipeline function
+ 3. It doesn't have strict dependencies that can't be re-resolved
+
+ Args:
+ name: Name of the stage to check
+
+ Returns:
+ True if the stage can be hot-swapped, False otherwise
+ """
+ # Check if stage exists
+ if name not in self._stages:
+ return False
+
+ # Check if stage is a minimum capability provider
+ stage = self._stages[name]
+ stage_caps = stage.capabilities if hasattr(stage, "capabilities") else set()
+ minimum_caps = self._minimum_capabilities
+
+ # If stage provides a minimum capability, it's more critical
+ # but still potentially swappable if another stage provides the same capability
+ for cap in stage_caps:
+ if cap in minimum_caps:
+ # Check if another stage provides this capability
+ providers = self._capability_map.get(cap, [])
+ # This stage is the sole provider - might be critical
+ # but still allow hot-swap if pipeline is not initialized
+ if len(providers) <= 1 and self._initialized:
+ return False
+
+ return True
+
+ def replace_stage(
+ self, name: str, new_stage: Stage, preserve_state: bool = True
+ ) -> Stage | None:
+ """Replace a stage in the pipeline with a new one.
+
+ Args:
+ name: Name of the stage to replace
+ new_stage: New stage instance
+ preserve_state: If True, copy relevant state from old stage
+
+ Returns:
+ The old stage, or None if not found
+ """
+ old_stage = self._stages.get(name)
+ if not old_stage:
+ return None
+
+ if preserve_state:
+ self._copy_stage_state(old_stage, new_stage)
+
+ old_stage.cleanup()
+ self._stages[name] = new_stage
+ new_stage.init(self.context)
+
+ if self._initialized:
+ self._rebuild()
+
+ return old_stage
+
+ def swap_stages(self, name1: str, name2: str) -> bool:
+ """Swap two stages in the pipeline.
+
+ Args:
+ name1: First stage name
+ name2: Second stage name
+
+ Returns:
+ True if successful, False if either stage not found
+ """
+ stage1 = self._stages.get(name1)
+ stage2 = self._stages.get(name2)
+
+ if not stage1 or not stage2:
+ return False
+
+ self._stages[name1] = stage2
+ self._stages[name2] = stage1
+
+ if self._initialized:
+ self._rebuild()
+
+ return True
+
+ def move_stage(
+ self, name: str, after: str | None = None, before: str | None = None
+ ) -> bool:
+ """Move a stage's position in execution order.
+
+ Args:
+ name: Stage to move
+ after: Place this stage after this stage name
+ before: Place this stage before this stage name
+
+ Returns:
+ True if successful, False if stage not found
+ """
+ if name not in self._stages:
+ return False
+
+ if not self._initialized:
+ return False
+
+ current_order = list(self._execution_order)
+ if name not in current_order:
+ return False
+
+ current_order.remove(name)
+
+ if after and after in current_order:
+ idx = current_order.index(after) + 1
+ current_order.insert(idx, name)
+ elif before and before in current_order:
+ idx = current_order.index(before)
+ current_order.insert(idx, name)
+ else:
+ current_order.append(name)
+
+ self._execution_order = current_order
+ return True
+
+ def _copy_stage_state(self, old_stage: Stage, new_stage: Stage) -> None:
+ """Copy relevant state from old stage to new stage during replacement.
+
+ Args:
+ old_stage: The old stage being replaced
+ new_stage: The new stage
+ """
+ if hasattr(old_stage, "_enabled"):
+ new_stage._enabled = old_stage._enabled
+
+ # Preserve camera state
+ if hasattr(old_stage, "save_state") and hasattr(new_stage, "restore_state"):
+ try:
+ state = old_stage.save_state()
+ new_stage.restore_state(state)
+ except Exception:
+ # If state preservation fails, continue without it
+ pass
+
+ def _rebuild(self) -> None:
+ """Rebuild execution order after mutation or auto-injection."""
+ was_initialized = self._initialized
+ self._initialized = False
+
+ self._capability_map = self._build_capability_map()
+ self._execution_order = self._resolve_dependencies()
+
+ # Note: We intentionally DO NOT validate dependencies here.
+ # Mutation operations (remove/swap/move) might leave the pipeline
+ # temporarily invalid (e.g., removing a stage that others depend on).
+ # Validation is performed explicitly in build() or can be checked
+ # manually via validate_minimum_capabilities().
+ # try:
+ # self._validate_dependencies()
+ # self._validate_types()
+ # except StageError:
+ # pass
+
+ # Restore initialized state
+ self._initialized = was_initialized
+
+ def get_stage(self, name: str) -> Stage | None:
+ """Get a stage by name."""
+ return self._stages.get(name)
+
+ def enable_stage(self, name: str) -> bool:
+ """Enable a stage in the pipeline.
+
+ Args:
+ name: Stage name to enable
+
+ Returns:
+ True if successful, False if stage not found
+ """
+ stage = self._stages.get(name)
+ if stage:
+ stage.set_enabled(True)
+ return True
+ return False
+
+ def disable_stage(self, name: str) -> bool:
+ """Disable a stage in the pipeline.
+
+ Args:
+ name: Stage name to disable
+
+ Returns:
+ True if successful, False if stage not found
+ """
+ stage = self._stages.get(name)
+ if stage:
+ stage.set_enabled(False)
+ return True
+ return False
+
+ def get_stage_info(self, name: str) -> dict | None:
+ """Get detailed information about a stage.
+
+ Args:
+ name: Stage name
+
+ Returns:
+ Dictionary with stage information, or None if not found
+ """
+ stage = self._stages.get(name)
+ if not stage:
+ return None
+
+ return {
+ "name": name,
+ "category": stage.category,
+ "stage_type": stage.stage_type,
+ "enabled": stage.is_enabled(),
+ "optional": stage.optional,
+ "capabilities": list(stage.capabilities),
+ "dependencies": list(stage.dependencies),
+ "inlet_types": [dt.name for dt in stage.inlet_types],
+ "outlet_types": [dt.name for dt in stage.outlet_types],
+ "render_order": stage.render_order,
+ "is_overlay": stage.is_overlay,
+ }
+
+ def get_pipeline_info(self) -> dict:
+ """Get comprehensive information about the pipeline.
+
+ Returns:
+ Dictionary with pipeline state
+ """
+ return {
+ "stages": {name: self.get_stage_info(name) for name in self._stages},
+ "execution_order": self._execution_order.copy(),
+ "initialized": self._initialized,
+ "stage_count": len(self._stages),
+ }
+
+ @property
+ def minimum_capabilities(self) -> set[str]:
+ """Get minimum capabilities required for pipeline to function."""
+ return self._minimum_capabilities
+
+ @minimum_capabilities.setter
+ def minimum_capabilities(self, value: set[str]):
+ """Set minimum required capabilities.
+
+ NOTE: Research later - allow presets to override these defaults
+ """
+ self._minimum_capabilities = value
+
+ def validate_minimum_capabilities(self) -> tuple[bool, list[str]]:
+ """Validate that all minimum capabilities are provided.
+
+ Returns:
+ Tuple of (is_valid, missing_capabilities)
+ """
+ missing = []
+ for cap in self._minimum_capabilities:
+ if not self._find_stage_with_capability(cap):
+ missing.append(cap)
+ return len(missing) == 0, missing
+
+ def ensure_minimum_capabilities(self) -> list[str]:
+ """Automatically inject MVP stages if minimum capabilities are missing.
+
+ Auto-injection is always on, but defaults are trivial to override.
+ Returns:
+ List of stages that were injected
+ """
+ from sideline.camera import Camera
+ from sideline.data_sources.sources import EmptyDataSource
+ from sideline.display import DisplayRegistry
+ from sideline.pipeline.adapters import (
+ CameraClockStage,
+ CameraStage,
+ DataSourceStage,
+ DisplayStage,
+ SourceItemsToBufferStage,
+ )
+
+ injected = []
+
+ # Check for source capability
+ if (
+ not self._find_stage_with_capability("source")
+ and "source" not in self._stages
+ ):
+ empty_source = EmptyDataSource(width=80, height=24)
+ self.add_stage("source", DataSourceStage(empty_source, name="empty"))
+ injected.append("source")
+
+ # Check for camera.state capability (must be BEFORE render to accept SOURCE_ITEMS)
+ camera = None
+ if not self._find_stage_with_capability("camera.state"):
+ # Inject static camera (trivial, no movement)
+ camera = Camera.scroll(speed=0.0)
+ camera.set_canvas_size(200, 200)
+ if "camera_update" not in self._stages:
+ self.add_stage(
+ "camera_update", CameraClockStage(camera, name="camera-clock")
+ )
+ injected.append("camera_update")
+
+ # Check for render capability
+ if (
+ not self._find_stage_with_capability("render.output")
+ and "render" not in self._stages
+ ):
+ self.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
+ injected.append("render")
+
+ # Check for camera stage (must be AFTER render to accept TEXT_BUFFER)
+ if camera and "camera" not in self._stages:
+ self.add_stage("camera", CameraStage(camera, name="static"))
+ injected.append("camera")
+
+ # Check for display capability
+ if (
+ not self._find_stage_with_capability("display.output")
+ and "display" not in self._stages
+ ):
+ display_name = self.config.display or "terminal"
+ display = DisplayRegistry.create(display_name)
+ if display:
+ self.add_stage("display", DisplayStage(display, name=display_name))
+ injected.append("display")
+
+ # Rebuild pipeline if stages were injected
+ if injected:
+ self._rebuild()
+
+ return injected
+
+ def build(self, auto_inject: bool = True) -> "Pipeline":
+ """Build execution order based on dependencies.
+
+ Args:
+ auto_inject: If True, automatically inject MVP stages for missing capabilities
+ """
+ self._capability_map = self._build_capability_map()
+ self._execution_order = self._resolve_dependencies()
+
+ # Validate minimum capabilities and auto-inject if needed
+ if auto_inject:
+ is_valid, missing = self.validate_minimum_capabilities()
+ if not is_valid:
+ injected = self.ensure_minimum_capabilities()
+ if injected:
+ print(
+ f" \033[38;5;226mAuto-injected stages for missing capabilities: {injected}\033[0m"
+ )
+ # Rebuild after auto-injection
+ self._capability_map = self._build_capability_map()
+ self._execution_order = self._resolve_dependencies()
+
+ # Re-validate after injection attempt (whether anything was injected or not)
+ # If injection didn't run (injected empty), we still need to check if we're valid
+ # If injection ran but failed to fix (injected empty), we need to check
+ is_valid, missing = self.validate_minimum_capabilities()
+ if not is_valid:
+ raise StageError(
+ "build",
+ f"Auto-injection failed to provide minimum capabilities: {missing}",
+ )
+
+ self._validate_dependencies()
+ self._validate_types()
+ self._initialized = True
+ return self
+
+ def _build_capability_map(self) -> dict[str, list[str]]:
+ """Build a map of capabilities to stage names.
+
+ Returns:
+ Dict mapping capability -> list of stage names that provide it
+ """
+ capability_map: dict[str, list[str]] = {}
+ for name, stage in self._stages.items():
+ for cap in stage.capabilities:
+ if cap not in capability_map:
+ capability_map[cap] = []
+ capability_map[cap].append(name)
+ return capability_map
+
+ def _find_stage_with_capability(self, capability: str) -> str | None:
+ """Find a stage that provides the given capability.
+
+ Supports wildcard matching:
+ - "source" matches "source.headlines" (prefix match)
+ - "source.*" matches "source.headlines"
+ - "source.headlines" matches exactly
+
+ Args:
+ capability: The capability to find
+
+ Returns:
+ Stage name that provides the capability, or None if not found
+ """
+ # Exact match
+ if capability in self._capability_map:
+ return self._capability_map[capability][0]
+
+ # Prefix match (e.g., "source" -> "source.headlines")
+ for cap, stages in self._capability_map.items():
+ if cap.startswith(capability + "."):
+ return stages[0]
+
+ # Wildcard match (e.g., "source.*" -> "source.headlines")
+ if ".*" in capability:
+ prefix = capability[:-2] # Remove ".*"
+ for cap in self._capability_map:
+ if cap.startswith(prefix + "."):
+ return self._capability_map[cap][0]
+
+ return None
+
+ def _resolve_dependencies(self) -> list[str]:
+ """Resolve stage execution order using topological sort with capability matching."""
+ ordered = []
+ visited = set()
+ temp_mark = set()
+
+ def visit(name: str) -> None:
+ if name in temp_mark:
+ raise StageError(name, "Circular dependency detected")
+ if name in visited:
+ return
+
+ temp_mark.add(name)
+ stage = self._stages.get(name)
+ if stage:
+ # Handle capability-based dependencies
+ for dep in stage.dependencies:
+ # Find a stage that provides this capability
+ dep_stage_name = self._find_stage_with_capability(dep)
+ if dep_stage_name:
+ visit(dep_stage_name)
+
+ # Handle direct stage dependencies
+ for stage_dep in stage.stage_dependencies:
+ if stage_dep in self._stages:
+ visit(stage_dep)
+ else:
+ # Stage dependency not found - this is an error
+ raise StageError(
+ name,
+ f"Missing stage dependency: '{stage_dep}' not found in pipeline",
+ )
+
+ temp_mark.remove(name)
+ visited.add(name)
+ ordered.append(name)
+
+ for name in self._stages:
+ if name not in visited:
+ visit(name)
+
+ return ordered
+
+ def _validate_dependencies(self) -> None:
+ """Validate that all dependencies can be satisfied.
+
+ Raises StageError if any dependency cannot be resolved.
+ """
+ missing: list[tuple[str, str]] = [] # (stage_name, capability)
+
+ for name, stage in self._stages.items():
+ for dep in stage.dependencies:
+ if not self._find_stage_with_capability(dep):
+ missing.append((name, dep))
+
+ if missing:
+ msgs = [f" - {stage} needs {cap}" for stage, cap in missing]
+ raise StageError(
+ "validation",
+ "Missing capabilities:\n" + "\n".join(msgs),
+ )
+
+ def _validate_types(self) -> None:
+ """Validate inlet/outlet types between connected stages.
+
+ PureData-style type validation. Each stage declares its inlet_types
+ (what it accepts) and outlet_types (what it produces). This method
+ validates that connected stages have compatible types.
+
+ Raises StageError if type mismatch is detected.
+ """
+ from sideline.pipeline.core import DataType
+
+ errors: list[str] = []
+
+ for i, name in enumerate(self._execution_order):
+ stage = self._stages.get(name)
+ if not stage:
+ continue
+
+ inlet_types = stage.inlet_types
+
+ # Check against previous stage's outlet types
+ if i > 0:
+ prev_name = self._execution_order[i - 1]
+ prev_stage = self._stages.get(prev_name)
+ if prev_stage:
+ prev_outlets = prev_stage.outlet_types
+
+ # Check if any outlet type is accepted by this inlet
+ compatible = (
+ DataType.ANY in inlet_types
+ or DataType.ANY in prev_outlets
+ or bool(prev_outlets & inlet_types)
+ )
+
+ if not compatible:
+ errors.append(
+ f" - {name} (inlet: {inlet_types}) "
+ f"← {prev_name} (outlet: {prev_outlets})"
+ )
+
+ # Check display/sink stages (should accept TEXT_BUFFER)
+ if (
+ stage.category == "display"
+ and DataType.TEXT_BUFFER not in inlet_types
+ and DataType.ANY not in inlet_types
+ ):
+ errors.append(f" - {name} is display but doesn't accept TEXT_BUFFER")
+
+ if errors:
+ raise StageError(
+ "type_validation",
+ "Type mismatch in pipeline connections:\n" + "\n".join(errors),
+ )
+
+ def initialize(self) -> bool:
+ """Initialize all stages in execution order."""
+ for name in self._execution_order:
+ stage = self._stages.get(name)
+ if stage and not stage.init(self.context) and not stage.optional:
+ return False
+ return True
+
+ def execute(self, data: Any | None = None) -> StageResult:
+ """Execute the pipeline with the given input data.
+
+ Pipeline execution:
+ 1. Execute all non-overlay stages in dependency order
+ 2. Apply overlay stages on top (sorted by render_order)
+ """
+ import os
+ import sys
+
+ debug = os.environ.get("MAINLINE_DEBUG_DATAFLOW") == "1"
+
+ if debug:
+ print(
+ f"[PIPELINE.execute] Starting with data type: {type(data).__name__ if data else 'None'}",
+ file=sys.stderr,
+ flush=True,
+ )
+
+ if not self._initialized:
+ self.build()
+
+ if not self._initialized:
+ return StageResult(
+ success=False,
+ data=None,
+ error="Pipeline not initialized",
+ )
+
+ current_data = data
+ frame_start = time.perf_counter() if self._metrics_enabled else 0
+ stage_timings: list[StageMetrics] = []
+
+ # Separate overlay stages and display stage from regular stages
+ overlay_stages: list[tuple[int, Stage]] = []
+ display_stage: Stage | None = None
+ regular_stages: list[str] = []
+
+ for name in self._execution_order:
+ stage = self._stages.get(name)
+ if not stage or not stage.is_enabled():
+ continue
+
+ # Check if this is the display stage - execute last
+ if stage.category == "display":
+ display_stage = stage
+ continue
+
+ # Safely check is_overlay - handle MagicMock and other non-bool returns
+ try:
+ is_overlay = bool(getattr(stage, "is_overlay", False))
+ except Exception:
+ is_overlay = False
+
+ if is_overlay:
+ # Safely get render_order
+ try:
+ render_order = int(getattr(stage, "render_order", 0))
+ except Exception:
+ render_order = 0
+ overlay_stages.append((render_order, stage))
+ else:
+ regular_stages.append(name)
+
+ # Execute regular stages in dependency order (excluding display)
+ for name in regular_stages:
+ stage = self._stages.get(name)
+ if not stage or not stage.is_enabled():
+ continue
+
+ stage_start = time.perf_counter() if self._metrics_enabled else 0
+
+ try:
+ if debug:
+ data_info = type(current_data).__name__
+ if isinstance(current_data, list):
+ data_info += f"[{len(current_data)}]"
+ print(
+ f"[STAGE.{name}] Starting with: {data_info}",
+ file=sys.stderr,
+ flush=True,
+ )
+
+ current_data = stage.process(current_data, self.context)
+
+ if debug:
+ data_info = type(current_data).__name__
+ if isinstance(current_data, list):
+ data_info += f"[{len(current_data)}]"
+ print(
+ f"[STAGE.{name}] Completed, output: {data_info}",
+ file=sys.stderr,
+ flush=True,
+ )
+ except Exception as e:
+ if debug:
+ print(f"[STAGE.{name}] ERROR: {e}", file=sys.stderr, flush=True)
+ if not stage.optional:
+ return StageResult(
+ success=False,
+ data=current_data,
+ error=str(e),
+ stage_name=name,
+ )
+ continue
+
+ if self._metrics_enabled:
+ stage_duration = (time.perf_counter() - stage_start) * 1000
+ chars_in = len(str(data)) if data else 0
+ chars_out = len(str(current_data)) if current_data else 0
+ stage_timings.append(
+ StageMetrics(
+ name=name,
+ duration_ms=stage_duration,
+ chars_in=chars_in,
+ chars_out=chars_out,
+ )
+ )
+
+ # Apply overlay stages (sorted by render_order)
+ overlay_stages.sort(key=lambda x: x[0])
+ for render_order, stage in overlay_stages:
+ stage_start = time.perf_counter() if self._metrics_enabled else 0
+ stage_name = f"[overlay]{stage.name}"
+
+ try:
+ # Overlays receive current_data but don't pass their output to next stage
+ # Instead, their output is composited on top
+ overlay_output = stage.process(current_data, self.context)
+ # For now, we just let the overlay output pass through
+ # In a more sophisticated implementation, we'd composite it
+ if overlay_output is not None:
+ current_data = overlay_output
+ except Exception as e:
+ if not stage.optional:
+ return StageResult(
+ success=False,
+ data=current_data,
+ error=str(e),
+ stage_name=stage_name,
+ )
+
+ if self._metrics_enabled:
+ stage_duration = (time.perf_counter() - stage_start) * 1000
+ chars_in = len(str(data)) if data else 0
+ chars_out = len(str(current_data)) if current_data else 0
+ stage_timings.append(
+ StageMetrics(
+ name=stage_name,
+ duration_ms=stage_duration,
+ chars_in=chars_in,
+ chars_out=chars_out,
+ )
+ )
+
+ # Execute display stage LAST (after overlay stages)
+ # This ensures overlay effects like HUD are visible in the final output
+ if display_stage:
+ stage_start = time.perf_counter() if self._metrics_enabled else 0
+
+ try:
+ current_data = display_stage.process(current_data, self.context)
+ except Exception as e:
+ if not display_stage.optional:
+ return StageResult(
+ success=False,
+ data=current_data,
+ error=str(e),
+ stage_name=display_stage.name,
+ )
+
+ if self._metrics_enabled:
+ stage_duration = (time.perf_counter() - stage_start) * 1000
+ chars_in = len(str(data)) if data else 0
+ chars_out = len(str(current_data)) if current_data else 0
+ stage_timings.append(
+ StageMetrics(
+ name=display_stage.name,
+ duration_ms=stage_duration,
+ chars_in=chars_in,
+ chars_out=chars_out,
+ )
+ )
+
+ if self._metrics_enabled:
+ total_duration = (time.perf_counter() - frame_start) * 1000
+ self._frame_metrics.append(
+ FrameMetrics(
+ frame_number=self._current_frame_number,
+ total_ms=total_duration,
+ stages=stage_timings,
+ )
+ )
+
+ # Store metrics in context for other stages (like HUD)
+ # This makes metrics a first-class pipeline citizen
+ if self.context:
+ self.context.state["metrics"] = self.get_metrics_summary()
+
+ if len(self._frame_metrics) > self._max_metrics_frames:
+ self._frame_metrics.pop(0)
+ self._current_frame_number += 1
+
+ return StageResult(success=True, data=current_data)
+
+ def cleanup(self) -> None:
+ """Clean up all stages in reverse order."""
+ for name in reversed(self._execution_order):
+ stage = self._stages.get(name)
+ if stage:
+ try:
+ stage.cleanup()
+ except Exception:
+ pass
+ self._stages.clear()
+ self._initialized = False
+
+ @property
+ def stages(self) -> dict[str, Stage]:
+ """Get all stages."""
+ return self._stages.copy()
+
+ @property
+ def execution_order(self) -> list[str]:
+ """Get execution order."""
+ return self._execution_order.copy()
+
+ def get_stage_names(self) -> list[str]:
+ """Get list of stage names."""
+ return list(self._stages.keys())
+
+ def get_overlay_stages(self) -> list[Stage]:
+ """Get all overlay stages sorted by render_order."""
+ overlays = [stage for stage in self._stages.values() if stage.is_overlay]
+ overlays.sort(key=lambda s: s.render_order)
+ return overlays
+
+ def get_stage_type(self, name: str) -> str:
+ """Get the stage_type for a stage."""
+ stage = self._stages.get(name)
+ return stage.stage_type if stage else ""
+
+ def get_render_order(self, name: str) -> int:
+ """Get the render_order for a stage."""
+ stage = self._stages.get(name)
+ return stage.render_order if stage else 0
+
+ def get_metrics_summary(self) -> dict:
+ """Get summary of collected metrics."""
+ if not self._frame_metrics:
+ return {"error": "No metrics collected"}
+
+ total_times = [f.total_ms for f in self._frame_metrics]
+ avg_total = sum(total_times) / len(total_times)
+ min_total = min(total_times)
+ max_total = max(total_times)
+
+ stage_stats: dict[str, dict] = {}
+ for frame in self._frame_metrics:
+ for stage in frame.stages:
+ if stage.name not in stage_stats:
+ stage_stats[stage.name] = {"times": [], "total_chars": 0}
+ stage_stats[stage.name]["times"].append(stage.duration_ms)
+ stage_stats[stage.name]["total_chars"] += stage.chars_out
+
+ for name, stats in stage_stats.items():
+ times = stats["times"]
+ stats["avg_ms"] = sum(times) / len(times)
+ stats["min_ms"] = min(times)
+ stats["max_ms"] = max(times)
+ del stats["times"]
+
+ return {
+ "frame_count": len(self._frame_metrics),
+ "pipeline": {
+ "avg_ms": avg_total,
+ "min_ms": min_total,
+ "max_ms": max_total,
+ },
+ "stages": stage_stats,
+ }
+
+ def reset_metrics(self) -> None:
+ """Reset collected metrics."""
+ self._frame_metrics.clear()
+ self._current_frame_number = 0
+
+ def get_frame_times(self) -> list[float]:
+ """Get historical frame times for sparklines/charts."""
+ return [f.total_ms for f in self._frame_metrics]
+
+
+class PipelineRunner:
+ """High-level pipeline runner with animation support."""
+
+ def __init__(
+ self,
+ pipeline: Pipeline,
+ params: PipelineParams | None = None,
+ ):
+ self.pipeline = pipeline
+ self.params = params or PipelineParams()
+ self._running = False
+
+ def start(self) -> bool:
+ """Start the pipeline."""
+ self._running = True
+ return self.pipeline.initialize()
+
+ def step(self, input_data: Any | None = None) -> Any:
+ """Execute one pipeline step."""
+ self.params.frame_number += 1
+ self.pipeline.context.params = self.params
+ result = self.pipeline.execute(input_data)
+ return result.data if result.success else None
+
+ def stop(self) -> None:
+ """Stop and clean up the pipeline."""
+ self._running = False
+ self.pipeline.cleanup()
+
+ @property
+ def is_running(self) -> bool:
+ """Check if runner is active."""
+ return self._running
+
+
+def create_pipeline_from_params(params: PipelineParams) -> Pipeline:
+ """Create a pipeline from PipelineParams."""
+ config = PipelineConfig(
+ source=params.source,
+ display=params.display,
+ camera=params.camera_mode,
+ effects=params.effect_order,
+ )
+ return Pipeline(config=config)
+
+
+def create_default_pipeline() -> Pipeline:
+ """Create a default pipeline with all standard components."""
+ from sideline.data_sources.sources import HeadlinesDataSource
+ from sideline.pipeline.adapters import (
+ DataSourceStage,
+ SourceItemsToBufferStage,
+ )
+
+ pipeline = Pipeline()
+
+ # Add source stage (wrapped as Stage)
+ source = HeadlinesDataSource()
+ pipeline.add_stage("source", DataSourceStage(source, name="headlines"))
+
+ # Add render stage to convert items to text buffer
+ pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer"))
+
+ # Add display stage
+ display = StageRegistry.create("display", "terminal")
+ if display:
+ pipeline.add_stage("display", display)
+
+ return pipeline.build()
diff --git a/sideline/pipeline/core.py b/sideline/pipeline/core.py
new file mode 100644
index 0000000..52442f3
--- /dev/null
+++ b/sideline/pipeline/core.py
@@ -0,0 +1,317 @@
+"""
+Pipeline core - Unified Stage abstraction and PipelineContext.
+
+This module provides the foundation for a clean, dependency-managed pipeline:
+- Stage: Base class for all pipeline components (sources, effects, displays, cameras)
+- PipelineContext: Dependency injection context for runtime data exchange
+- Capability system: Explicit capability declarations with duck-typing support
+- DataType: PureData-style inlet/outlet typing for validation
+"""
+
+from abc import ABC, abstractmethod
+from collections.abc import Callable
+from dataclasses import dataclass, field
+from enum import Enum, auto
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+ from sideline.pipeline.params import PipelineParams
+
+
+class DataType(Enum):
+ """PureData-style data types for inlet/outlet validation.
+
+ Each type represents a specific data format that flows through the pipeline.
+ This enables compile-time-like validation of connections.
+
+ Examples:
+ SOURCE_ITEMS: List[SourceItem] - raw items from sources
+ ITEM_TUPLES: List[tuple] - (title, source, timestamp) tuples
+ TEXT_BUFFER: List[str] - rendered ANSI buffer for display
+ RAW_TEXT: str - raw text strings
+ PIL_IMAGE: PIL Image object
+ """
+
+ SOURCE_ITEMS = auto() # List[SourceItem] - from DataSource
+ ITEM_TUPLES = auto() # List[tuple] - (title, source, ts)
+ TEXT_BUFFER = auto() # List[str] - ANSI buffer
+ RAW_TEXT = auto() # str - raw text
+ PIL_IMAGE = auto() # PIL Image object
+ ANY = auto() # Accepts any type
+ NONE = auto() # No data (terminator)
+
+
+@dataclass
+class StageConfig:
+ """Configuration for a single stage."""
+
+ name: str
+ category: str
+ enabled: bool = True
+ optional: bool = False
+ params: dict[str, Any] = field(default_factory=dict)
+
+
+class Stage(ABC):
+ """Abstract base class for all pipeline stages.
+
+ A Stage is a single component in the rendering pipeline. Stages can be:
+ - Sources: Data providers (headlines, poetry, pipeline viz)
+ - Effects: Post-processors (noise, fade, glitch, hud)
+ - Displays: Output backends (terminal, pygame, websocket)
+ - Cameras: Viewport controllers (vertical, horizontal, omni)
+ - Overlays: UI elements that compose on top (HUD)
+
+ Stages declare:
+ - capabilities: What they provide to other stages
+ - dependencies: What they need from other stages
+ - stage_type: Category of stage (source, effect, overlay, display)
+ - render_order: Execution order within category
+ - is_overlay: If True, output is composited on top, not passed downstream
+
+ Duck-typing is supported: any class with the required methods can act as a Stage.
+ """
+
+ name: str
+ category: str # "source", "effect", "overlay", "display", "camera"
+ optional: bool = False # If True, pipeline continues even if stage fails
+
+ @property
+ def stage_type(self) -> str:
+ """Category of stage for ordering.
+
+ Valid values: "source", "effect", "overlay", "display", "camera"
+ Defaults to category for backwards compatibility.
+ """
+ return self.category
+
+ @property
+ def render_order(self) -> int:
+ """Execution order within stage_type group.
+
+ Higher values execute later. Useful for ordering overlays
+ or effects that need specific execution order.
+ """
+ return 0
+
+ @property
+ def is_overlay(self) -> bool:
+ """If True, this stage's output is composited on top of the buffer.
+
+ Overlay stages don't pass their output to the next stage.
+ Instead, their output is layered on top of the final buffer.
+ Use this for HUD, status displays, and similar UI elements.
+ """
+ return False
+
+ @property
+ def inlet_types(self) -> set[DataType]:
+ """Return set of data types this stage accepts.
+
+ PureData-style inlet typing. If the connected upstream stage's
+ outlet_type is not in this set, the pipeline will raise an error.
+
+ Examples:
+ - Source stages: {DataType.NONE} (no input needed)
+ - Transform stages: {DataType.ITEM_TUPLES, DataType.TEXT_BUFFER}
+ - Display stages: {DataType.TEXT_BUFFER}
+ """
+ return {DataType.ANY}
+
+ @property
+ def outlet_types(self) -> set[DataType]:
+ """Return set of data types this stage produces.
+
+ PureData-style outlet typing. Downstream stages must accept
+ this type in their inlet_types.
+
+ Examples:
+ - Source stages: {DataType.SOURCE_ITEMS}
+ - Transform stages: {DataType.TEXT_BUFFER}
+ - Display stages: {DataType.NONE} (consumes data)
+ """
+ return {DataType.ANY}
+
+ @property
+ def capabilities(self) -> set[str]:
+ """Return set of capabilities this stage provides.
+
+ Examples:
+ - "source.headlines"
+ - "effect.noise"
+ - "display.output"
+ - "camera"
+ """
+ return {f"{self.category}.{self.name}"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ """Return set of capability names this stage needs.
+
+ Examples:
+ - {"display.output"}
+ - {"source.headlines"}
+ - {"camera"}
+ """
+ return set()
+
+ @property
+ def stage_dependencies(self) -> set[str]:
+ """Return set of stage names this stage must connect to directly.
+
+ This allows explicit stage-to-stage dependencies, useful for enforcing
+ pipeline structure when capability matching alone is insufficient.
+
+ Examples:
+ - {"viewport_filter"} # Must connect to viewport_filter stage
+ - {"camera_update"} # Must connect to camera_update stage
+
+ NOTE: These are stage names (as added to pipeline), not capabilities.
+ """
+ return set()
+
+ def init(self, ctx: "PipelineContext") -> bool:
+ """Initialize stage with pipeline context.
+
+ Args:
+ ctx: PipelineContext for accessing services
+
+ Returns:
+ True if initialization succeeded, False otherwise
+ """
+ return True
+
+ @abstractmethod
+ def process(self, data: Any, ctx: "PipelineContext") -> Any:
+ """Process input data and return output.
+
+ Args:
+ data: Input data from previous stage (or initial data for first stage)
+ ctx: PipelineContext for accessing services and state
+
+ Returns:
+ Processed data for next stage
+ """
+ ...
+
+ def cleanup(self) -> None: # noqa: B027
+ """Clean up resources when pipeline shuts down."""
+ pass
+
+ def get_config(self) -> StageConfig:
+ """Return current configuration of this stage."""
+ return StageConfig(
+ name=self.name,
+ category=self.category,
+ optional=self.optional,
+ )
+
+ def set_enabled(self, enabled: bool) -> None:
+ """Enable or disable this stage."""
+ self._enabled = enabled # type: ignore[attr-defined]
+
+ def is_enabled(self) -> bool:
+ """Check if stage is enabled."""
+ return getattr(self, "_enabled", True)
+
+
+@dataclass
+class StageResult:
+ """Result of stage processing, including success/failure info."""
+
+ success: bool
+ data: Any
+ error: str | None = None
+ stage_name: str = ""
+
+
+class PipelineContext:
+ """Dependency injection context passed through the pipeline.
+
+ Provides:
+ - services: Named services (display, config, event_bus, etc.)
+ - state: Runtime state shared between stages
+ - params: PipelineParams for animation-driven config
+
+ Services can be injected at construction time or lazily resolved.
+ """
+
+ def __init__(
+ self,
+ services: dict[str, Any] | None = None,
+ initial_state: dict[str, Any] | None = None,
+ ):
+ self.services: dict[str, Any] = services or {}
+ self.state: dict[str, Any] = initial_state or {}
+ self._params: PipelineParams | None = None
+
+ # Lazy resolvers for services (can be added by applications)
+ self._lazy_resolvers: dict[str, Callable[[], Any]] = {}
+
+ def register_service(self, name: str, resolver: Callable[[], Any]) -> None:
+ """Register a lazy service resolver.
+
+ Args:
+ name: Service name (e.g., 'config', 'event_bus')
+ resolver: Function that returns the service instance
+ """
+ self._lazy_resolvers[name] = resolver
+
+ def get(self, key: str, default: Any = None) -> Any:
+ """Get a service or state value by key.
+
+ First checks services, then state, then lazy resolution.
+ """
+ if key in self.services:
+ return self.services[key]
+ if key in self.state:
+ return self.state[key]
+ if key in self._lazy_resolvers:
+ try:
+ return self._lazy_resolvers[key]()
+ except Exception:
+ return default
+ return default
+
+ def set(self, key: str, value: Any) -> None:
+ """Set a service or state value."""
+ self.services[key] = value
+
+ def set_state(self, key: str, value: Any) -> None:
+ """Set a runtime state value."""
+ self.state[key] = value
+
+ def get_state(self, key: str, default: Any = None) -> Any:
+ """Get a runtime state value."""
+ return self.state.get(key, default)
+
+ @property
+ def params(self) -> "PipelineParams | None":
+ """Get current pipeline params (for animation)."""
+ return self._params
+
+ @params.setter
+ def params(self, value: "PipelineParams") -> None:
+ """Set pipeline params (from animation controller)."""
+ self._params = value
+
+ def has_capability(self, capability: str) -> bool:
+ """Check if a capability is available."""
+ return capability in self.services or capability in self._lazy_resolvers
+
+
+class StageError(Exception):
+ """Raised when a stage fails to process."""
+
+ def __init__(self, stage_name: str, message: str, is_optional: bool = False):
+ self.stage_name = stage_name
+ self.message = message
+ self.is_optional = is_optional
+ super().__init__(f"Stage '{stage_name}' failed: {message}")
+
+
+def create_stage_error(
+ stage_name: str, error: Exception, is_optional: bool = False
+) -> StageError:
+ """Helper to create a StageError from an exception."""
+ return StageError(stage_name, str(error), is_optional)
diff --git a/sideline/pipeline/params.py b/sideline/pipeline/params.py
new file mode 100644
index 0000000..d901cf2
--- /dev/null
+++ b/sideline/pipeline/params.py
@@ -0,0 +1,152 @@
+"""
+Pipeline parameters - Runtime configuration layer for animation control.
+
+PipelineParams is the target for AnimationController - animation events
+modify these params, which the pipeline then applies to its stages.
+"""
+
+from dataclasses import dataclass, field
+from typing import Any
+
+try:
+ from sideline.display import BorderMode
+except ImportError:
+ BorderMode = object # Fallback for type checking
+
+
+@dataclass
+class PipelineParams:
+ """Runtime configuration for the pipeline.
+
+ This is the canonical config object that AnimationController modifies.
+ Stages read from these params to adjust their behavior.
+ """
+
+ # Source config
+ source: str = "headlines"
+ source_refresh_interval: float = 60.0
+
+ # Display config
+ display: str = "terminal"
+ border: bool | BorderMode = False
+ positioning: str = "mixed" # Positioning mode: "absolute", "relative", "mixed"
+
+ # Camera config
+ camera_mode: str = "vertical"
+ camera_speed: float = 1.0 # Default speed
+ camera_x: int = 0 # For horizontal scrolling
+
+ # Effect config
+ effect_order: list[str] = field(
+ default_factory=lambda: ["noise", "fade", "glitch", "firehose"]
+ )
+ effect_enabled: dict[str, bool] = field(default_factory=dict)
+ effect_intensity: dict[str, float] = field(default_factory=dict)
+
+ # Animation-driven state (set by AnimationController)
+ pulse: float = 0.0
+ current_effect: str | None = None
+ path_progress: float = 0.0
+
+ # Viewport
+ viewport_width: int = 80
+ viewport_height: int = 24
+
+ # Firehose
+ firehose_enabled: bool = False
+
+ # Runtime state
+ frame_number: int = 0
+ fps: float = 60.0
+
+ def get_effect_config(self, name: str) -> tuple[bool, float]:
+ """Get (enabled, intensity) for an effect."""
+ enabled = self.effect_enabled.get(name, True)
+ intensity = self.effect_intensity.get(name, 1.0)
+ return enabled, intensity
+
+ def set_effect_config(self, name: str, enabled: bool, intensity: float) -> None:
+ """Set effect configuration."""
+ self.effect_enabled[name] = enabled
+ self.effect_intensity[name] = intensity
+
+ def is_effect_enabled(self, name: str) -> bool:
+ """Check if an effect is enabled."""
+ if name not in self.effect_enabled:
+ return True # Default to enabled
+ return self.effect_enabled.get(name, True)
+
+ def get_effect_intensity(self, name: str) -> float:
+ """Get effect intensity (0.0 to 1.0)."""
+ return self.effect_intensity.get(name, 1.0)
+
+ def to_dict(self) -> dict[str, Any]:
+ """Convert to dictionary for serialization."""
+ return {
+ "source": self.source,
+ "display": self.display,
+ "positioning": self.positioning,
+ "camera_mode": self.camera_mode,
+ "camera_speed": self.camera_speed,
+ "effect_order": self.effect_order,
+ "effect_enabled": self.effect_enabled.copy(),
+ "effect_intensity": self.effect_intensity.copy(),
+ "pulse": self.pulse,
+ "current_effect": self.current_effect,
+ "viewport_width": self.viewport_width,
+ "viewport_height": self.viewport_height,
+ "firehose_enabled": self.firehose_enabled,
+ }
+
+ @classmethod
+ def from_dict(cls, data: dict[str, Any]) -> "PipelineParams":
+ """Create from dictionary."""
+ params = cls()
+ for key, value in data.items():
+ if hasattr(params, key):
+ setattr(params, key, value)
+ return params
+
+ def copy(self) -> "PipelineParams":
+ """Create a copy of this params object."""
+ params = PipelineParams()
+ params.source = self.source
+ params.display = self.display
+ params.camera_mode = self.camera_mode
+ params.camera_speed = self.camera_speed
+ params.camera_x = self.camera_x
+ params.effect_order = self.effect_order.copy()
+ params.effect_enabled = self.effect_enabled.copy()
+ params.effect_intensity = self.effect_intensity.copy()
+ params.pulse = self.pulse
+ params.current_effect = self.current_effect
+ params.path_progress = self.path_progress
+ params.viewport_width = self.viewport_width
+ params.viewport_height = self.viewport_height
+ params.firehose_enabled = self.firehose_enabled
+ params.frame_number = self.frame_number
+ params.fps = self.fps
+ return params
+
+
+# Default params for different modes
+DEFAULT_HEADLINE_PARAMS = PipelineParams(
+ source="headlines",
+ display="terminal",
+ camera_mode="vertical",
+ effect_order=["noise", "fade", "glitch", "firehose"],
+)
+
+DEFAULT_PYGAME_PARAMS = PipelineParams(
+ source="headlines",
+ display="pygame",
+ camera_mode="vertical",
+ effect_order=["noise", "fade", "glitch", "firehose"],
+)
+
+DEFAULT_PIPELINE_PARAMS = PipelineParams(
+ source="pipeline",
+ display="pygame",
+ camera_mode="trace",
+ effect_order=[], # No effects for pipeline viz
+)
diff --git a/sideline/pipeline/registry.py b/sideline/pipeline/registry.py
new file mode 100644
index 0000000..47f12ce
--- /dev/null
+++ b/sideline/pipeline/registry.py
@@ -0,0 +1,242 @@
+"""
+Stage registry - Unified registration for all pipeline stages.
+
+Provides a single registry for sources, effects, displays, and cameras.
+Supports plugin discovery via entry points and explicit registration.
+"""
+
+from __future__ import annotations
+
+import importlib
+import importlib.metadata
+import logging
+from typing import TYPE_CHECKING, Any, TypeVar
+
+from sideline.pipeline.core import Stage
+
+if TYPE_CHECKING:
+ from sideline.pipeline.core import Stage
+
+T = TypeVar("T")
+logger = logging.getLogger(__name__)
+
+
+class StageRegistry:
+ """Unified registry for all pipeline stage types.
+
+ Supports both explicit registration and automatic discovery via entry points.
+ Plugins can be registered manually or discovered automatically.
+ """
+
+ _categories: dict[str, dict[str, type[Any]]] = {}
+ _discovered: bool = False
+ _instances: dict[str, Stage] = {}
+ _plugins_discovered: bool = False
+ _plugin_modules: set[str] = set() # Track loaded plugin modules
+
+ @classmethod
+ def register(cls, category: str, stage_class: type[Any]) -> None:
+ """Register a stage class in a category.
+
+ Args:
+ category: Category name (source, effect, display, camera)
+ stage_class: Stage subclass to register
+ """
+ if category not in cls._categories:
+ cls._categories[category] = {}
+
+ key = getattr(stage_class, "__name__", stage_class.__class__.__name__)
+ cls._categories[category][key] = stage_class
+
+ @classmethod
+ def get(cls, category: str, name: str) -> type[Any] | None:
+ """Get a stage class by category and name."""
+ return cls._categories.get(category, {}).get(name)
+
+ @classmethod
+ def list(cls, category: str) -> list[str]:
+ """List all stage names in a category."""
+ return list(cls._categories.get(category, {}).keys())
+
+ @classmethod
+ def list_categories(cls) -> list[str]:
+ """List all registered categories."""
+ return list(cls._categories.keys())
+
+ @classmethod
+ def create(cls, category: str, name: str, **kwargs) -> Stage | None:
+ """Create a stage instance by category and name."""
+ stage_class = cls.get(category, name)
+ if stage_class:
+ return stage_class(**kwargs)
+ return None
+
+ @classmethod
+ def create_instance(cls, stage: Stage | type[Stage], **kwargs) -> Stage:
+ """Create an instance from a stage class or return as-is."""
+ if isinstance(stage, Stage):
+ return stage
+ if isinstance(stage, type) and issubclass(stage, Stage):
+ return stage(**kwargs)
+ raise TypeError(f"Expected Stage class or instance, got {type(stage)}")
+
+ @classmethod
+ def register_instance(cls, name: str, stage: Stage) -> None:
+ """Register a stage instance by name."""
+ cls._instances[name] = stage
+
+ @classmethod
+ def get_instance(cls, name: str) -> Stage | None:
+ """Get a registered stage instance by name."""
+ return cls._instances.get(name)
+
+ @classmethod
+ def register_plugin_module(cls, plugin_module: str) -> None:
+ """Register stages from an external plugin module.
+
+ The module should define a register_stages(registry) function.
+
+ Args:
+ plugin_module: Full module path (e.g., 'engine.plugins')
+ """
+ if plugin_module in cls._plugin_modules:
+ logger.debug(f"Plugin module {plugin_module} already loaded")
+ return
+
+ try:
+ module = importlib.import_module(plugin_module)
+ if hasattr(module, "register_stages"):
+ module.register_stages(cls)
+ cls._plugin_modules.add(plugin_module)
+ logger.info(f"Registered stages from {plugin_module}")
+ else:
+ logger.warning(
+ f"Plugin module {plugin_module} has no register_stages function"
+ )
+ except ImportError as e:
+ logger.warning(f"Failed to import plugin module {plugin_module}: {e}")
+
+ # Backward compatibility alias
+ register_plugin = register_plugin_module
+
+ @classmethod
+ def discover_plugins(cls) -> None:
+ """Auto-discover and register plugins via entry points.
+
+ Looks for 'sideline.stages' entry points in installed packages.
+ Each entry point should point to a register_stages(registry) function.
+ """
+ if cls._plugins_discovered:
+ return
+
+ try:
+ # Discover entry points for sideline.stages
+ # Python 3.12+ changed the entry_points() API
+ try:
+ entry_points = importlib.metadata.entry_points()
+ if hasattr(entry_points, "get"):
+ # Python < 3.12
+ stages_eps = entry_points.get("sideline.stages", [])
+ else:
+ # Python 3.12+
+ stages_eps = entry_points.select(group="sideline.stages")
+ except Exception:
+ # Fallback: try both approaches
+ try:
+ entry_points = importlib.metadata.entry_points()
+ stages_eps = entry_points.get("sideline.stages", [])
+ except Exception:
+ stages_eps = []
+
+ for ep in stages_eps:
+ try:
+ register_func = ep.load()
+ if callable(register_func):
+ register_func(cls)
+ logger.info(f"Discovered and registered plugin: {ep.name}")
+ except Exception as e:
+ logger.warning(f"Failed to load entry point {ep.name}: {e}")
+
+ cls._plugins_discovered = True
+ except Exception as e:
+ logger.warning(f"Failed to discover plugins: {e}")
+
+ @classmethod
+ def get_discovered_modules(cls) -> set[str]:
+ """Get set of plugin modules that have been loaded."""
+ return cls._plugin_modules.copy()
+
+
+def discover_stages() -> None:
+ """Auto-discover and register all stage implementations.
+
+ This function now only registers framework-level stages (displays, etc.).
+ Application-specific stages should be registered via plugins.
+ """
+ if StageRegistry._discovered:
+ return
+
+ # Register display stages (framework-level)
+ _register_display_stages()
+
+ # Discover plugins via entry points
+ StageRegistry.discover_plugins()
+
+ StageRegistry._discovered = True
+
+
+def _register_display_stages() -> None:
+ """Register display backends as stages."""
+ try:
+ from sideline.display import DisplayRegistry
+ except ImportError:
+ return
+
+ DisplayRegistry.initialize()
+
+ for backend_name in DisplayRegistry.list_backends():
+ factory = _DisplayStageFactory(backend_name)
+ StageRegistry._categories.setdefault("display", {})[backend_name] = factory
+
+
+class _DisplayStageFactory:
+ """Factory that creates DisplayStage instances for a specific backend."""
+
+ def __init__(self, backend_name: str):
+ self._backend_name = backend_name
+
+ def __call__(self):
+ from sideline.display import DisplayRegistry
+ from sideline.pipeline.adapters import DisplayStage
+
+ display = DisplayRegistry.create(self._backend_name)
+ if display is None:
+ raise RuntimeError(
+ f"Failed to create display backend: {self._backend_name}"
+ )
+ return DisplayStage(display, name=self._backend_name)
+
+ @property
+ def __name__(self) -> str:
+ return self._backend_name.capitalize() + "Stage"
+
+
+# Convenience functions
+def register_source(stage_class: type[Stage]) -> None:
+ """Register a source stage."""
+ StageRegistry.register("source", stage_class)
+
+
+def register_effect(stage_class: type[Stage]) -> None:
+ """Register an effect stage."""
+ StageRegistry.register("effect", stage_class)
+
+
+def register_display(stage_class: type[Stage]) -> None:
+ """Register a display stage."""
+ StageRegistry.register("display", stage_class)
+
+
+def register_camera(stage_class: type[Stage]) -> None:
+ """Register a camera stage."""
+ StageRegistry.register("camera", stage_class)
diff --git a/sideline/pipeline/stages/framebuffer.py b/sideline/pipeline/stages/framebuffer.py
new file mode 100644
index 0000000..76e5956
--- /dev/null
+++ b/sideline/pipeline/stages/framebuffer.py
@@ -0,0 +1,174 @@
+"""
+Frame buffer stage - stores previous frames for temporal effects.
+
+Provides (per-instance, using instance name):
+- framebuffer.{name}.history: list of previous buffers (most recent first)
+- framebuffer.{name}.intensity_history: list of corresponding intensity maps
+- framebuffer.{name}.current_intensity: intensity map for current frame
+
+Capability: "framebuffer.history.{name}"
+"""
+
+import threading
+from dataclasses import dataclass
+from typing import Any
+
+from sideline.display import _strip_ansi
+from sideline.pipeline.core import DataType, PipelineContext, Stage
+
+
+@dataclass
+class FrameBufferConfig:
+ """Configuration for FrameBufferStage."""
+
+ history_depth: int = 2 # Number of previous frames to keep
+ name: str = "default" # Unique instance name for capability and context keys
+
+
+class FrameBufferStage(Stage):
+ """Stores frame history and computes intensity maps.
+
+ Supports multiple instances with unique capabilities and context keys.
+ """
+
+ name = "framebuffer"
+ category = "effect" # It's an effect that enriches context with frame history
+
+ def __init__(
+ self,
+ config: FrameBufferConfig | None = None,
+ history_depth: int = 2,
+ name: str = "default",
+ ):
+ self.config = config or FrameBufferConfig(
+ history_depth=history_depth, name=name
+ )
+ self._lock = threading.Lock()
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {f"framebuffer.history.{self.config.name}"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ # Depends on rendered output (since we want to capture final buffer)
+ return {"render.output"}
+
+ @property
+ def inlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER}
+
+ @property
+ def outlet_types(self) -> set:
+ return {DataType.TEXT_BUFFER} # Pass through unchanged
+
+ def init(self, ctx: PipelineContext) -> bool:
+ """Initialize framebuffer state in context."""
+ prefix = f"framebuffer.{self.config.name}"
+ ctx.set(f"{prefix}.history", [])
+ ctx.set(f"{prefix}.intensity_history", [])
+ return True
+
+ def process(self, data: Any, ctx: PipelineContext) -> Any:
+ """Store frame in history and compute intensity.
+
+ Args:
+ data: Current text buffer (list[str])
+ ctx: Pipeline context
+
+ Returns:
+ Same buffer (pass-through)
+ """
+ if not isinstance(data, list):
+ return data
+
+ prefix = f"framebuffer.{self.config.name}"
+
+ # Compute intensity map for current buffer (per-row, length = buffer rows)
+ intensity_map = self._compute_buffer_intensity(data, len(data))
+
+ # Store in context
+ ctx.set(f"{prefix}.current_intensity", intensity_map)
+
+ with self._lock:
+ # Get existing histories
+ history = ctx.get(f"{prefix}.history", [])
+ intensity_hist = ctx.get(f"{prefix}.intensity_history", [])
+
+ # Prepend current frame to history
+ history.insert(0, data.copy())
+ intensity_hist.insert(0, intensity_map)
+
+ # Trim to configured depth
+ max_depth = self.config.history_depth
+ ctx.set(f"{prefix}.history", history[:max_depth])
+ ctx.set(f"{prefix}.intensity_history", intensity_hist[:max_depth])
+
+ return data
+
+ def _compute_buffer_intensity(
+ self, buf: list[str], max_rows: int = 24
+ ) -> list[float]:
+ """Compute average intensity per row in buffer.
+
+ Uses ANSI color if available; falls back to character density.
+
+ Args:
+ buf: Text buffer (list of strings)
+ max_rows: Maximum number of rows to process
+
+ Returns:
+ List of intensity values (0.0-1.0) per row
+ """
+ intensities = []
+ # Limit to viewport height
+ lines = buf[:max_rows]
+
+ for line in lines:
+ # Strip ANSI codes for length calc
+
+ plain = _strip_ansi(line)
+ if not plain:
+ intensities.append(0.0)
+ continue
+
+ # Simple heuristic: ratio of non-space characters
+ # More sophisticated version could parse ANSI RGB brightness
+ filled = sum(1 for c in plain if c not in (" ", "\t"))
+ total = len(plain)
+ intensity = filled / total if total > 0 else 0.0
+ intensities.append(max(0.0, min(1.0, intensity)))
+
+ # Pad to max_rows if needed
+ while len(intensities) < max_rows:
+ intensities.append(0.0)
+
+ return intensities
+
+ def get_frame(
+ self, index: int = 0, ctx: PipelineContext | None = None
+ ) -> list[str] | None:
+ """Get frame from history by index (0 = current, 1 = previous, etc)."""
+ if ctx is None:
+ return None
+ prefix = f"framebuffer.{self.config.name}"
+ history = ctx.get(f"{prefix}.history", [])
+ if 0 <= index < len(history):
+ return history[index]
+ return None
+
+ def get_intensity(
+ self, index: int = 0, ctx: PipelineContext | None = None
+ ) -> list[float] | None:
+ """Get intensity map from history by index."""
+ if ctx is None:
+ return None
+ prefix = f"framebuffer.{self.config.name}"
+ intensity_hist = ctx.get(f"{prefix}.intensity_history", [])
+ if 0 <= index < len(intensity_hist):
+ return intensity_hist[index]
+ return None
+
+ def cleanup(self) -> None:
+ """Cleanup resources."""
+ pass
diff --git a/sideline/plugins/__init__.py b/sideline/plugins/__init__.py
new file mode 100644
index 0000000..2e01575
--- /dev/null
+++ b/sideline/plugins/__init__.py
@@ -0,0 +1,26 @@
+"""
+Sideline Plugin System.
+
+This module provides the plugin framework for Sideline, allowing applications
+to extend the pipeline with custom stages, effects, and sources.
+
+Features:
+- Plugin base classes with metadata
+- Security permission system
+- Compatibility management
+- Entry point discovery
+"""
+
+from sideline.plugins.base import StagePlugin, Plugin, PluginMetadata
+from sideline.plugins.security import SecurityCapability, SecurityManager
+from sideline.plugins.compatibility import VersionConstraint, CompatibilityManager
+
+__all__ = [
+ "StagePlugin",
+ "Plugin", # Backward compatibility alias
+ "PluginMetadata",
+ "SecurityCapability",
+ "SecurityManager",
+ "VersionConstraint",
+ "CompatibilityManager",
+]
diff --git a/sideline/plugins/base.py b/sideline/plugins/base.py
new file mode 100644
index 0000000..c2d8ac6
--- /dev/null
+++ b/sideline/plugins/base.py
@@ -0,0 +1,78 @@
+"""
+Base classes for Sideline plugins.
+
+Provides Plugin base class and PluginMetadata for plugin registration.
+"""
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import ClassVar, Set
+
+
+@dataclass
+class PluginMetadata:
+ """Plugin metadata with security and compatibility information."""
+
+ name: str
+ version: str
+ author: str
+ description: str
+ sideline_version: str # Compatible Sideline version (semver constraint)
+ permissions: Set[str] = field(default_factory=set) # Required security permissions
+ capabilities: Set[str] = field(default_factory=set) # Provided capabilities
+
+ def validate(self) -> None:
+ """Validate metadata fields."""
+ if not self.name:
+ raise ValueError("Plugin name cannot be empty")
+ if not self.version:
+ raise ValueError("Plugin version cannot be empty")
+ if not self.author:
+ raise ValueError("Plugin author cannot be empty")
+ if not self.sideline_version:
+ raise ValueError("Plugin sideline_version cannot be empty")
+
+
+class StagePlugin(ABC):
+ """Base class for Sideline stage plugins (distributable pipeline components).
+
+ A StagePlugin represents a distributable unit that can contain one or more
+ pipeline stages. Plugins provide metadata for security, compatibility,
+ and versioning.
+
+ Subclasses must implement:
+ - validate_security(granted_permissions) -> bool
+ """
+
+ metadata: ClassVar[PluginMetadata]
+
+ @abstractmethod
+ def validate_security(self, granted_permissions: Set[str]) -> bool:
+ """Check if plugin has required permissions.
+
+ Args:
+ granted_permissions: Set of granted security permissions
+
+ Returns:
+ True if plugin has all required permissions
+ """
+ pass
+
+ @classmethod
+ def get_metadata(cls) -> PluginMetadata:
+ """Get plugin metadata."""
+ return cls.metadata
+
+ @classmethod
+ def get_required_permissions(cls) -> Set[str]:
+ """Get required security permissions."""
+ return cls.metadata.permissions
+
+ @classmethod
+ def get_provided_capabilities(cls) -> Set[str]:
+ """Get provided capabilities."""
+ return cls.metadata.capabilities
+
+
+# Backward compatibility alias
+Plugin = StagePlugin
diff --git a/sideline/plugins/compatibility.py b/sideline/plugins/compatibility.py
new file mode 100644
index 0000000..9cff5c1
--- /dev/null
+++ b/sideline/plugins/compatibility.py
@@ -0,0 +1,259 @@
+"""
+Compatibility management for Sideline plugins.
+
+Provides semantic version constraint checking and validation.
+"""
+
+import re
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+
+@dataclass
+class Version:
+ """Semantic version representation."""
+
+ major: int
+ minor: int
+ patch: int
+ pre_release: Optional[str] = None
+ build_metadata: Optional[str] = None
+
+ @classmethod
+ def parse(cls, version_str: str) -> "Version":
+ """Parse version string into Version object.
+
+ Supports formats like:
+ - 1.2.3
+ - 1.2.3-alpha
+ - 1.2.3-beta.1
+ - 1.2.3+build.123
+ """
+ # Remove build metadata if present
+ if "+" in version_str:
+ version_str, build_metadata = version_str.split("+", 1)
+ else:
+ build_metadata = None
+
+ # Parse pre-release if present
+ pre_release = None
+ if "-" in version_str:
+ version_str, pre_release = version_str.split("-", 1)
+
+ # Parse major.minor.patch
+ parts = version_str.split(".")
+ if len(parts) != 3:
+ raise ValueError(f"Invalid version format: {version_str}")
+
+ try:
+ major = int(parts[0])
+ minor = int(parts[1])
+ patch = int(parts[2])
+ except ValueError:
+ raise ValueError(f"Invalid version numbers: {version_str}")
+
+ return cls(major, minor, patch, pre_release, build_metadata)
+
+ def __str__(self) -> str:
+ result = f"{self.major}.{self.minor}.{self.patch}"
+ if self.pre_release:
+ result += f"-{self.pre_release}"
+ if self.build_metadata:
+ result += f"+{self.build_metadata}"
+ return result
+
+ def __lt__(self, other: "Version") -> bool:
+ if not isinstance(other, Version):
+ return NotImplemented
+
+ # Compare major.minor.patch
+ if (self.major, self.minor, self.patch) < (
+ other.major,
+ other.minor,
+ other.patch,
+ ):
+ return True
+ if (self.major, self.minor, self.patch) > (
+ other.major,
+ other.minor,
+ other.patch,
+ ):
+ return False
+
+ # Pre-release versions have lower precedence
+ if self.pre_release and not other.pre_release:
+ return True
+ if not self.pre_release and other.pre_release:
+ return False
+ if self.pre_release and other.pre_release:
+ return self.pre_release < other.pre_release
+
+ return False
+
+ def __le__(self, other: "Version") -> bool:
+ return self < other or self == other
+
+ def __gt__(self, other: "Version") -> bool:
+ return not (self <= other)
+
+ def __ge__(self, other: "Version") -> bool:
+ return not (self < other)
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Version):
+ return NotImplemented
+ return (
+ self.major == other.major
+ and self.minor == other.minor
+ and self.patch == other.patch
+ and self.pre_release == other.pre_release
+ and self.build_metadata == other.build_metadata
+ )
+
+ def __ne__(self, other: object) -> bool:
+ return not self.__eq__(other)
+
+
+class VersionConstraint:
+ """Semantic version constraint parser and evaluator."""
+
+ def __init__(self, constraint: str):
+ """Parse version constraint string.
+
+ Supports formats:
+ - "1.2.3" - exact version
+ - ">=1.2.3" - minimum version
+ - "<2.0.0" - maximum version
+ - ">=1.0.0,<2.0.0" - version range
+ - "~1.2.3" - pessimistic constraint (>=1.2.3,<1.3.0)
+ - "^1.2.3" - caret constraint (>=1.2.3,<2.0.0)
+ """
+ self.constraint_str = constraint
+ self.min_version: Optional[Version] = None
+ self.max_version: Optional[Version] = None
+ self.exact_version: Optional[Version] = None
+
+ self._parse(constraint)
+
+ def _parse(self, constraint: str) -> None:
+ """Parse constraint string."""
+ # Handle comma-separated constraints
+ if "," in constraint:
+ parts = [p.strip() for p in constraint.split(",")]
+ for part in parts:
+ self._parse_single(part)
+ else:
+ self._parse_single(constraint)
+
+ def _parse_single(self, constraint: str) -> None:
+ """Parse a single constraint."""
+ constraint = constraint.strip()
+
+ # Exact version
+ if not any(op in constraint for op in [">=", "<=", ">", "<", "~", "^"]):
+ self.exact_version = Version.parse(constraint)
+ return
+
+ # Operator-based constraints
+ if ">=" in constraint:
+ op, version_str = constraint.split(">=", 1)
+ self.min_version = Version.parse(version_str.strip())
+ elif "<=" in constraint:
+ op, version_str = constraint.split("<=", 1)
+ self.max_version = Version.parse(version_str.strip())
+ elif ">" in constraint:
+ op, version_str = constraint.split(">", 1)
+ # Strict greater than - increment patch version
+ v = Version.parse(version_str.strip())
+ self.min_version = Version(v.major, v.minor, v.patch + 1)
+ elif "<" in constraint:
+ op, version_str = constraint.split("<", 1)
+ # Strict less than - decrement patch version (simplified)
+ v = Version.parse(version_str.strip())
+ self.max_version = Version(v.major, v.minor, v.patch - 1)
+ elif "~" in constraint:
+ # Pessimistic constraint: ~1.2.3 means >=1.2.3,<1.3.0
+ version_str = constraint[1:] # Remove ~
+ v = Version.parse(version_str.strip())
+ self.min_version = v
+ self.max_version = Version(v.major, v.minor + 1, 0)
+ elif "^" in constraint:
+ # Caret constraint: ^1.2.3 means >=1.2.3,<2.0.0
+ version_str = constraint[1:] # Remove ^
+ v = Version.parse(version_str.strip())
+ self.min_version = v
+ self.max_version = Version(v.major + 1, 0, 0)
+
+ def is_compatible(self, version: Version | str) -> bool:
+ """Check if a version satisfies this constraint."""
+ if isinstance(version, str):
+ version = Version.parse(version)
+
+ # Exact version match
+ if self.exact_version is not None:
+ return version == self.exact_version
+
+ # Check minimum version
+ if self.min_version is not None:
+ if version < self.min_version:
+ return False
+
+ # Check maximum version
+ if self.max_version is not None:
+ if version >= self.max_version:
+ return False
+
+ return True
+
+ def __str__(self) -> str:
+ return self.constraint_str
+
+
+class CompatibilityManager:
+ """Manages plugin compatibility with Sideline."""
+
+ @classmethod
+ def get_sideline_version(cls) -> Version:
+ """Get the current Sideline version."""
+ # Import here to avoid circular imports
+ import sideline
+
+ return Version.parse(sideline.__version__)
+
+ @classmethod
+ def validate_compatibility(cls, plugin_version_constraint: str) -> bool:
+ """Validate plugin is compatible with current Sideline version.
+
+ Args:
+ plugin_version_constraint: Version constraint string from plugin metadata
+
+ Returns:
+ True if compatible, False otherwise
+ """
+ try:
+ sideline_version = cls.get_sideline_version()
+ constraint = VersionConstraint(plugin_version_constraint)
+ return constraint.is_compatible(sideline_version)
+ except Exception as e:
+ # If parsing fails, consider incompatible
+ import logging
+
+ logger = logging.getLogger(__name__)
+ logger.warning(f"Failed to validate compatibility: {e}")
+ return False
+
+ @classmethod
+ def get_compatibility_error(cls, plugin_version_constraint: str) -> Optional[str]:
+ """Get compatibility error message if incompatible.
+
+ Returns:
+ Error message string or None if compatible
+ """
+ if cls.validate_compatibility(plugin_version_constraint):
+ return None
+
+ sideline_version = cls.get_sideline_version()
+ return (
+ f"Plugin requires Sideline {plugin_version_constraint}, "
+ f"but current version is {sideline_version}"
+ )
diff --git a/sideline/plugins/security.py b/sideline/plugins/security.py
new file mode 100644
index 0000000..f08bf90
--- /dev/null
+++ b/sideline/plugins/security.py
@@ -0,0 +1,92 @@
+"""
+Security system for Sideline plugins.
+
+Provides permission-based security model for plugin execution.
+"""
+
+from enum import Enum, auto
+from typing import Set
+
+
+class SecurityCapability(Enum):
+ """Security capability/permission definitions."""
+
+ READ = auto() # Read access to buffer/data
+ WRITE = auto() # Write access to buffer
+ NETWORK = auto() # Network access
+ FILESYSTEM = auto() # File system access
+ SYSTEM = auto() # System information access
+
+ def __str__(self) -> str:
+ return f"security.{self.name.lower()}"
+
+ @classmethod
+ def from_string(cls, permission: str) -> "SecurityCapability":
+ """Parse security capability from string."""
+ if permission.startswith("security."):
+ permission = permission[9:] # Remove "security." prefix
+ try:
+ return cls[permission.upper()]
+ except KeyError:
+ raise ValueError(f"Unknown security capability: {permission}")
+
+
+class SecurityManager:
+ """Manages security permissions for plugin execution."""
+
+ def __init__(self):
+ self._granted_permissions: Set[str] = set()
+
+ def grant(self, permission: SecurityCapability | str) -> None:
+ """Grant a security permission."""
+ if isinstance(permission, SecurityCapability):
+ permission = str(permission)
+ self._granted_permissions.add(permission)
+
+ def revoke(self, permission: SecurityCapability | str) -> None:
+ """Revoke a security permission."""
+ if isinstance(permission, SecurityCapability):
+ permission = str(permission)
+ self._granted_permissions.discard(permission)
+
+ def has(self, permission: SecurityCapability | str) -> bool:
+ """Check if a permission is granted."""
+ if isinstance(permission, SecurityCapability):
+ permission = str(permission)
+ return permission in self._granted_permissions
+
+ def has_all(self, permissions: Set[str]) -> bool:
+ """Check if all permissions are granted."""
+ return all(self.has(p) for p in permissions)
+
+ def get_granted(self) -> Set[str]:
+ """Get all granted permissions."""
+ return self._granted_permissions.copy()
+
+ def reset(self) -> None:
+ """Reset all permissions."""
+ self._granted_permissions.clear()
+
+
+# Global security manager instance
+_global_security = SecurityManager()
+
+
+def get_global_security() -> SecurityManager:
+ """Get the global security manager instance."""
+ return _global_security
+
+
+def grant(permission: SecurityCapability | str) -> None:
+ """Grant a global security permission."""
+ _global_security.grant(permission)
+
+
+def revoke(permission: SecurityCapability | str) -> None:
+ """Revoke a global security permission."""
+ _global_security.revoke(permission)
+
+
+def has(permission: SecurityCapability | str) -> bool:
+ """Check if a global permission is granted."""
+ return _global_security.has(permission)
diff --git a/sideline/preset_packs/__init__.py b/sideline/preset_packs/__init__.py
new file mode 100644
index 0000000..35b2250
--- /dev/null
+++ b/sideline/preset_packs/__init__.py
@@ -0,0 +1,17 @@
+"""
+Preset pack system for Sideline.
+
+Allows bundling plugins, presets, and configurations into distributable packs
+with ASCII art encoding for fun and version control friendly storage.
+"""
+
+from sideline.preset_packs.pack_format import PresetPack, PresetPackMetadata
+from sideline.preset_packs.manager import PresetPackManager
+from sideline.preset_packs.encoder import PresetPackEncoder
+
+__all__ = [
+ "PresetPack",
+ "PresetPackMetadata",
+ "PresetPackManager",
+ "PresetPackEncoder",
+]
diff --git a/sideline/preset_packs/encoder.py b/sideline/preset_packs/encoder.py
new file mode 100644
index 0000000..c024c58
--- /dev/null
+++ b/sideline/preset_packs/encoder.py
@@ -0,0 +1,211 @@
+"""
+Preset pack encoder with ASCII art compression.
+
+Compresses plugin code and encodes it as ASCII art for fun and version control.
+"""
+
+import base64
+import zlib
+import textwrap
+from typing import Tuple
+
+
+class PresetPackEncoder:
+ """Encodes and decodes preset packs with ASCII art compression."""
+
+ # ASCII art frame characters
+ FRAME_TOP_LEFT = "┌"
+ FRAME_TOP_RIGHT = "┐"
+ FRAME_BOTTOM_LEFT = "└"
+ FRAME_BOTTOM_RIGHT = "┘"
+ FRAME_HORIZONTAL = "─"
+ FRAME_VERTICAL = "│"
+
+ # Data block characters (for visual representation)
+ DATA_CHARS = " ░▒▓█"
+
+ @classmethod
+ def encode_plugin_code(cls, code: str, name: str = "plugin") -> str:
+ """Encode plugin code as ASCII art.
+
+ Args:
+ code: Python source code to encode
+ name: Plugin name for metadata
+
+ Returns:
+ ASCII art encoded plugin code
+ """
+ # Compress the code
+ compressed = zlib.compress(code.encode("utf-8"))
+
+ # Encode as base64
+ b64 = base64.b64encode(compressed).decode("ascii")
+
+ # Wrap in ASCII art frame
+ return cls._wrap_in_ascii_art(b64, name)
+
+ @classmethod
+ def decode_plugin_code(cls, ascii_art: str) -> str:
+ """Decode ASCII art to plugin code.
+
+ Args:
+ ascii_art: ASCII art encoded plugin code
+
+ Returns:
+ Decoded Python source code
+ """
+ # Extract base64 from ASCII art
+ b64 = cls._extract_from_ascii_art(ascii_art)
+
+ # Decode base64
+ compressed = base64.b64decode(b64)
+
+ # Decompress
+ code = zlib.decompress(compressed).decode("utf-8")
+
+ return code
+
+ @classmethod
+ def _wrap_in_ascii_art(cls, data: str, name: str) -> str:
+ """Wrap data in ASCII art frame."""
+ # Calculate frame width
+ max_line_length = 60
+ lines = textwrap.wrap(data, max_line_length)
+
+ # Find longest line for frame width
+ longest_line = max(len(line) for line in lines) if lines else 0
+ frame_width = longest_line + 4 # 2 padding + 2 borders
+
+ # Build ASCII art
+ result = []
+
+ # Top border
+ result.append(
+ cls.FRAME_TOP_LEFT
+ + cls.FRAME_HORIZONTAL * (frame_width - 2)
+ + cls.FRAME_TOP_RIGHT
+ )
+
+ # Plugin name header
+ name_line = f" {name} "
+ name_padding = frame_width - 2 - len(name_line)
+ left_pad = name_padding // 2
+ right_pad = name_padding - left_pad
+ result.append(
+ cls.FRAME_VERTICAL
+ + " " * left_pad
+ + name_line
+ + " " * right_pad
+ + cls.FRAME_VERTICAL
+ )
+
+ # Separator line
+ result.append(
+ cls.FRAME_VERTICAL
+ + cls.FRAME_HORIZONTAL * (frame_width - 2)
+ + cls.FRAME_VERTICAL
+ )
+
+ # Data lines
+ for line in lines:
+ padding = frame_width - 2 - len(line)
+ result.append(
+ cls.FRAME_VERTICAL + line + " " * padding + cls.FRAME_VERTICAL
+ )
+
+ # Bottom border
+ result.append(
+ cls.FRAME_BOTTOM_LEFT
+ + cls.FRAME_HORIZONTAL * (frame_width - 2)
+ + cls.FRAME_BOTTOM_RIGHT
+ )
+
+ return "\n".join(result)
+
+ @classmethod
+ def _extract_from_ascii_art(cls, ascii_art: str) -> str:
+ """Extract base64 data from ASCII art frame."""
+ lines = ascii_art.strip().split("\n")
+
+ # Skip top and bottom borders, header, and separator
+ data_lines = lines[3:-1]
+
+ # Extract data from between frame characters
+ extracted = []
+ for line in data_lines:
+ if len(line) > 2:
+ # Remove frame characters and extract content
+ content = line[1:-1].rstrip()
+ extracted.append(content)
+
+ return "".join(extracted)
+
+ @classmethod
+ def encode_toml(cls, toml_data: str, name: str = "pack") -> str:
+ """Encode TOML data as ASCII art.
+
+ Args:
+ toml_data: TOML configuration data
+ name: Pack name
+
+ Returns:
+ ASCII art encoded TOML
+ """
+ # Compress
+ compressed = zlib.compress(toml_data.encode("utf-8"))
+
+ # Encode as base64
+ b64 = base64.b64encode(compressed).decode("ascii")
+
+ # Create visual representation using data characters
+ visual_data = cls._data_to_visual(b64)
+
+ return cls._wrap_in_ascii_art(visual_data, name)
+
+ @classmethod
+ def decode_toml(cls, ascii_art: str) -> str:
+ """Decode ASCII art to TOML data.
+
+ Args:
+ ascii_art: ASCII art encoded TOML
+
+ Returns:
+ Decoded TOML data
+ """
+ # Extract base64 from ASCII art
+ b64 = cls._extract_from_ascii_art(ascii_art)
+
+ # Decode base64
+ compressed = base64.b64decode(b64)
+
+ # Decompress
+ toml_data = zlib.decompress(compressed).decode("utf-8")
+
+ return toml_data
+
+ @classmethod
+ def _data_to_visual(cls, data: str) -> str:
+ """Convert base64 data to visual representation.
+
+ This creates a fun visual pattern based on the data.
+ """
+ # Simple mapping: each character to a data block character
+ # This is purely for visual appeal
+ visual = ""
+ for i, char in enumerate(data):
+ # Use character code to select visual block
+ idx = ord(char) % len(cls.DATA_CHARS)
+ visual += cls.DATA_CHARS[idx]
+
+ # Add line breaks for visual appeal
+ if (i + 1) % 60 == 0:
+ visual += "\n"
+
+ return visual
+
+ @classmethod
+ def get_visual_representation(cls, data: str) -> str:
+ """Get a visual representation of data for display."""
+ compressed = zlib.compress(data.encode("utf-8"))
+ b64 = base64.b64encode(compressed).decode("ascii")
+ return cls._data_to_visual(b64)
diff --git a/sideline/preset_packs/manager.py b/sideline/preset_packs/manager.py
new file mode 100644
index 0000000..01771b2
--- /dev/null
+++ b/sideline/preset_packs/manager.py
@@ -0,0 +1,194 @@
+"""
+Preset pack manager for loading, validating, and managing preset packs.
+"""
+
+import logging
+import os
+from typing import Dict, List, Optional
+
+import tomli
+
+from sideline.preset_packs.pack_format import PresetPack, PresetPackMetadata
+from sideline.preset_packs.encoder import PresetPackEncoder
+from sideline.plugins.compatibility import CompatibilityManager
+
+logger = logging.getLogger(__name__)
+
+
+class PresetPackManager:
+ """Manages preset pack loading and validation."""
+
+ def __init__(self, pack_dir: Optional[str] = None):
+ """Initialize preset pack manager.
+
+ Args:
+ pack_dir: Directory to search for preset packs
+ """
+ self.pack_dir = pack_dir or os.path.expanduser("~/.config/sideline/packs")
+ self._packs: Dict[str, PresetPack] = {}
+
+ def load_pack(self, pack_path: str) -> Optional[PresetPack]:
+ """Load a preset pack from a file.
+
+ Args:
+ pack_path: Path to the preset pack file (.tpack or .toml)
+
+ Returns:
+ Loaded PresetPack or None if failed
+ """
+ try:
+ with open(pack_path, "rb") as f:
+ # Try loading as TOML first
+ if pack_path.endswith(".toml"):
+ data = tomli.load(f)
+ pack = PresetPack.from_dict(data)
+ elif pack_path.endswith(".tpack"):
+ # Load ASCII art encoded pack
+ content = f.read().decode("utf-8")
+ pack = self._load_ascii_pack(content)
+ else:
+ logger.error(f"Unknown file format: {pack_path}")
+ return None
+
+ # Validate compatibility
+ if not CompatibilityManager.validate_compatibility(
+ pack.metadata.sideline_version
+ ):
+ error = CompatibilityManager.get_compatibility_error(
+ pack.metadata.sideline_version
+ )
+ logger.warning(f"Pack {pack.metadata.name} incompatible: {error}")
+ return None
+
+ # Store pack
+ self._packs[pack.metadata.name] = pack
+ logger.info(
+ f"Loaded preset pack: {pack.metadata.name} v{pack.metadata.version}"
+ )
+ return pack
+
+ except Exception as e:
+ logger.error(f"Failed to load preset pack {pack_path}: {e}")
+ return None
+
+ def _load_ascii_pack(self, content: str) -> PresetPack:
+ """Load pack from ASCII art encoded content."""
+ # Extract TOML from ASCII art
+ toml_data = PresetPackEncoder.decode_toml(content)
+
+ # Parse TOML
+ import tomli
+
+ data = tomli.loads(toml_data)
+
+ return PresetPack.from_dict(data)
+
+ def load_directory(self, directory: Optional[str] = None) -> List[PresetPack]:
+ """Load all preset packs from a directory.
+
+ Args:
+ directory: Directory to search (defaults to pack_dir)
+
+ Returns:
+ List of loaded PresetPack objects
+ """
+ directory = directory or self.pack_dir
+
+ if not os.path.exists(directory):
+ logger.warning(f"Preset pack directory does not exist: {directory}")
+ return []
+
+ loaded = []
+ for filename in os.listdir(directory):
+ if filename.endswith((".toml", ".tpack")):
+ path = os.path.join(directory, filename)
+ pack = self.load_pack(path)
+ if pack:
+ loaded.append(pack)
+
+ return loaded
+
+ def save_pack(
+ self, pack: PresetPack, output_path: str, format: str = "toml"
+ ) -> bool:
+ """Save a preset pack to a file.
+
+ Args:
+ pack: PresetPack to save
+ output_path: Path to save the pack
+ format: Output format ("toml" or "tpack")
+
+ Returns:
+ True if successful, False otherwise
+ """
+ try:
+ if format == "toml":
+ import tomli_w
+
+ with open(output_path, "w") as f:
+ tomli_w.dump(pack.to_dict(), f)
+ elif format == "tpack":
+ # Encode as ASCII art
+ toml_data = self._pack_to_toml(pack)
+ ascii_art = PresetPackEncoder.encode_toml(toml_data, pack.metadata.name)
+
+ with open(output_path, "w") as f:
+ f.write(ascii_art)
+ else:
+ logger.error(f"Unknown format: {format}")
+ return False
+
+ logger.info(f"Saved preset pack: {output_path}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to save preset pack: {e}")
+ return False
+
+ def _pack_to_toml(self, pack: PresetPack) -> str:
+ """Convert PresetPack to TOML string."""
+ import tomli_w
+
+ return tomli_w.dumps(pack.to_dict())
+
+ def get_pack(self, name: str) -> Optional[PresetPack]:
+ """Get a loaded preset pack by name."""
+ return self._packs.get(name)
+
+ def list_packs(self) -> List[str]:
+ """List all loaded preset pack names."""
+ return list(self._packs.keys())
+
+ def register_pack_plugins(self, pack: PresetPack):
+ """Register all plugins from a preset pack.
+
+ Args:
+ pack: PresetPack containing plugins
+ """
+ from sideline.pipeline import StageRegistry
+
+ for plugin_entry in pack.plugins:
+ try:
+ # Decode plugin code
+ code = PresetPackEncoder.decode_plugin_code(plugin_entry.encoded_code)
+
+ # Execute plugin code to get the class
+ local_ns = {}
+ exec(code, local_ns)
+
+ # Find the plugin class (first class defined)
+ plugin_class = None
+ for obj in local_ns.values():
+ if isinstance(obj, type) and hasattr(obj, "metadata"):
+ plugin_class = obj
+ break
+
+ if plugin_class:
+ # Register the plugin
+ StageRegistry.register(plugin_entry.category, plugin_class)
+ logger.info(f"Registered plugin: {plugin_entry.name}")
+ else:
+ logger.warning(f"No plugin class found in {plugin_entry.name}")
+
+ except Exception as e:
+ logger.error(f"Failed to register plugin {plugin_entry.name}: {e}")
diff --git a/sideline/preset_packs/pack_format.py b/sideline/preset_packs/pack_format.py
new file mode 100644
index 0000000..a02548c
--- /dev/null
+++ b/sideline/preset_packs/pack_format.py
@@ -0,0 +1,127 @@
+"""
+Preset pack format definition.
+
+Defines the structure of preset packs and their TOML-based configuration.
+"""
+
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional
+
+
+@dataclass
+class PresetPackMetadata:
+ """Metadata for a preset pack."""
+
+ name: str
+ version: str
+ author: str
+ description: str
+ sideline_version: str # Compatible Sideline version
+ created: Optional[str] = None # ISO 8601 timestamp
+ tags: List[str] = field(default_factory=list)
+
+ def to_dict(self) -> Dict:
+ """Convert to dictionary for TOML serialization."""
+ return {
+ "name": self.name,
+ "version": self.version,
+ "author": self.author,
+ "description": self.description,
+ "sideline_version": self.sideline_version,
+ "created": self.created,
+ "tags": self.tags,
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict) -> "PresetPackMetadata":
+ """Create from dictionary."""
+ return cls(
+ name=data["name"],
+ version=data["version"],
+ author=data["author"],
+ description=data["description"],
+ sideline_version=data["sideline_version"],
+ created=data.get("created"),
+ tags=data.get("tags", []),
+ )
+
+
+@dataclass
+class PluginEntry:
+ """Entry for a plugin in the preset pack."""
+
+ name: str
+ category: str # source, effect, display, camera
+ encoded_code: str # ASCII art encoded plugin code
+ permissions: List[str] = field(default_factory=list)
+ capabilities: List[str] = field(default_factory=list)
+
+ def to_dict(self) -> Dict:
+ """Convert to dictionary for TOML serialization."""
+ return {
+ "name": self.name,
+ "category": self.category,
+ "code": self.encoded_code,
+ "permissions": self.permissions,
+ "capabilities": self.capabilities,
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict) -> "PluginEntry":
+ """Create from dictionary."""
+ return cls(
+ name=data["name"],
+ category=data["category"],
+ encoded_code=data["code"],
+ permissions=data.get("permissions", []),
+ capabilities=data.get("capabilities", []),
+ )
+
+
+@dataclass
+class PresetEntry:
+ """Entry for a preset in the preset pack."""
+
+ name: str
+ config: Dict # Preset configuration (TOML-compatible)
+
+ def to_dict(self) -> Dict:
+ """Convert to dictionary for TOML serialization."""
+ return {
+ "name": self.name,
+ "config": self.config,
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict) -> "PresetEntry":
+ """Create from dictionary."""
+ return cls(
+ name=data["name"],
+ config=data["config"],
+ )
+
+
+@dataclass
+class PresetPack:
+ """Complete preset pack with metadata, plugins, and presets."""
+
+ metadata: PresetPackMetadata
+ plugins: List[PluginEntry] = field(default_factory=list)
+ presets: List[PresetEntry] = field(default_factory=list)
+
+ def to_dict(self) -> Dict:
+ """Convert to dictionary for TOML serialization."""
+ return {
+ "pack": self.metadata.to_dict(),
+ "plugins": [p.to_dict() for p in self.plugins],
+ "presets": [p.to_dict() for p in self.presets],
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict) -> "PresetPack":
+ """Create from dictionary."""
+ return cls(
+ metadata=PresetPackMetadata.from_dict(data["pack"]),
+ plugins=[PluginEntry.from_dict(p) for p in data.get("plugins", [])],
+ presets=[PresetEntry.from_dict(p) for p in data.get("presets", [])],
+ )
diff --git a/sideline/render/__init__.py b/sideline/render/__init__.py
new file mode 100644
index 0000000..e2130c0
--- /dev/null
+++ b/sideline/render/__init__.py
@@ -0,0 +1,37 @@
+"""Modern block rendering system - OTF font to terminal half-block conversion.
+
+This module provides the core rendering capabilities for big block letters
+and styled text output using PIL fonts and ANSI terminal rendering.
+
+Exports:
+ - make_block: Render a headline into a content block with color
+ - big_wrap: Word-wrap text and render with OTF font
+ - render_line: Render a line of text as terminal rows using half-blocks
+ - font_for_lang: Get appropriate font for a language
+ - clear_font_cache: Reset cached font objects
+ - lr_gradient: Color block characters with left-to-right gradient
+ - lr_gradient_opposite: Complementary gradient coloring
+"""
+
+from sideline.render.blocks import (
+ big_wrap,
+ clear_font_cache,
+ font_for_lang,
+ list_font_faces,
+ load_font_face,
+ make_block,
+ render_line,
+)
+from sideline.render.gradient import lr_gradient, lr_gradient_opposite
+
+__all__ = [
+ "big_wrap",
+ "clear_font_cache",
+ "font_for_lang",
+ "list_font_faces",
+ "load_font_face",
+ "lr_gradient",
+ "lr_gradient_opposite",
+ "make_block",
+ "render_line",
+]
diff --git a/sideline/render/blocks.py b/sideline/render/blocks.py
new file mode 100644
index 0000000..00f6184
--- /dev/null
+++ b/sideline/render/blocks.py
@@ -0,0 +1,245 @@
+"""Block rendering core - Font loading, text rasterization, word-wrap, and headline assembly.
+
+Provides PIL font-based rendering to terminal half-block characters.
+"""
+
+import random
+import re
+from pathlib import Path
+from typing import Optional, Tuple
+
+from PIL import Image, ImageDraw, ImageFont
+
+from sideline.fonts import get_default_font_path, get_default_font_size
+
+
+# ─── FONT LOADING ─────────────────────────────────────────
+_FONT_OBJ = None
+_FONT_OBJ_KEY = None
+_FONT_CACHE = {}
+
+
+def font():
+ """Lazy-load the default Sideline font."""
+ global _FONT_OBJ, _FONT_OBJ_KEY
+
+ try:
+ font_path = get_default_font_path()
+ font_size = get_default_font_size()
+ except FileNotFoundError:
+ # Fallback to system default if Sideline font not found
+ return ImageFont.load_default()
+
+ key = (font_path, font_size)
+ if _FONT_OBJ is None or key != _FONT_OBJ_KEY:
+ try:
+ _FONT_OBJ = ImageFont.truetype(font_path, font_size)
+ _FONT_OBJ_KEY = key
+ except Exception:
+ # If loading fails, fall back to system default
+ _FONT_OBJ = ImageFont.load_default()
+ _FONT_OBJ_KEY = key
+ return _FONT_OBJ
+
+
+def clear_font_cache():
+ """Reset cached font objects."""
+ global _FONT_OBJ, _FONT_OBJ_KEY
+ _FONT_OBJ = None
+ _FONT_OBJ_KEY = None
+
+
+def load_font_face(font_path, font_index=0, size=None):
+ """Load a specific face from a font file or collection."""
+ if size is None:
+ size = get_default_font_size()
+ return ImageFont.truetype(font_path, size, index=font_index)
+
+
+def list_font_faces(font_path, max_faces=64):
+ """Return discoverable face indexes + display names from a font file."""
+ faces = []
+ for idx in range(max_faces):
+ try:
+ fnt = load_font_face(font_path, idx)
+ except Exception:
+ if idx == 0:
+ raise
+ break
+ family, style = fnt.getname()
+ display = f"{family} {style}".strip()
+ if not display:
+ display = f"{Path(font_path).stem} [{idx}]"
+ faces.append({"index": idx, "name": display})
+ return faces
+
+
+def font_for_lang(lang: Optional[str] = None):
+ """Get appropriate font for a language.
+
+ Currently uses the default Sideline font for all languages.
+ Language-specific fonts can be added via the font cache system.
+ """
+ if lang is None:
+ return font()
+ if lang not in _FONT_CACHE:
+ # Try to load language-specific font, fall back to default
+ try:
+ # Could add language-specific font logic here
+ _FONT_CACHE[lang] = font()
+ except Exception:
+ _FONT_CACHE[lang] = font()
+ return _FONT_CACHE[lang]
+
+
+# ─── RASTERIZATION ────────────────────────────────────────
+def render_line(text, fnt=None):
+ """Render a line of text as terminal rows using OTF font + half-blocks."""
+ if fnt is None:
+ fnt = font()
+ bbox = fnt.getbbox(text)
+ if not bbox or bbox[2] <= bbox[0]:
+ return [""]
+ pad = 4
+ img_w = bbox[2] - bbox[0] + pad * 2
+ img_h = bbox[3] - bbox[1] + pad * 2
+ img = Image.new("L", (img_w, img_h), 0)
+ draw = ImageDraw.Draw(img)
+ draw.text((-bbox[0] + pad, -bbox[1] + pad), text, fill=255, font=fnt)
+
+ # Rendering parameters (can be made configurable)
+ render_h = 6 # Terminal rows per rendered line
+ ssaa = 2 # Supersampling anti-aliasing factor
+
+ pix_h = render_h * 2
+ hi_h = pix_h * ssaa
+ scale = hi_h / max(img_h, 1)
+ new_w_hi = max(1, int(img_w * scale))
+ img = img.resize((new_w_hi, hi_h), Image.Resampling.LANCZOS)
+ new_w = max(1, int(new_w_hi / ssaa))
+ img = img.resize((new_w, pix_h), Image.Resampling.LANCZOS)
+ data = img.tobytes()
+ thr = 80
+ rows = []
+ for y in range(0, pix_h, 2):
+ row = []
+ for x in range(new_w):
+ top = data[y * new_w + x] > thr
+ bot = data[(y + 1) * new_w + x] > thr if y + 1 < pix_h else False
+ if top and bot:
+ row.append("█")
+ elif top:
+ row.append("▀")
+ elif bot:
+ row.append("▄")
+ else:
+ row.append(" ")
+ rows.append("".join(row))
+ return rows
+
+
+def big_wrap(text: str, width: int, fnt=None) -> list[str]:
+ """Wrap text and render to big block characters."""
+ if fnt is None:
+ fnt = font()
+ text = re.sub(r"\s+", " ", text.upper())
+ words = text.split()
+ lines = []
+ cur = ""
+
+ # Get font size for height calculation
+ try:
+ font_size = fnt.size if hasattr(fnt, "size") else get_default_font_size()
+ except Exception:
+ font_size = get_default_font_size()
+
+ render_h = 6 # Terminal rows per rendered line
+
+ for word in words:
+ test = f"{cur} {word}".strip() if cur else word
+ bbox = fnt.getbbox(test)
+ if bbox:
+ img_h = bbox[3] - bbox[1] + 8
+ pix_h = render_h * 2
+ scale = pix_h / max(img_h, 1)
+ term_w = int((bbox[2] - bbox[0] + 8) * scale)
+ else:
+ term_w = 0
+ max_term_w = width - 4 - 4
+ if term_w > max_term_w and cur:
+ lines.append(cur)
+ cur = word
+ else:
+ cur = test
+ if cur:
+ lines.append(cur)
+ out = []
+ for i, ln in enumerate(lines):
+ out.extend(render_line(ln, fnt))
+ if i < len(lines) - 1:
+ out.append("")
+ return out
+
+
+# ─── HEADLINE BLOCK ASSEMBLY ─────────────────────────────
+def make_block(title: str, src: str, ts: str, w: int) -> Tuple[list[str], str, int]:
+ """Render a headline into a content block with color.
+
+ Args:
+ title: Headline text to render
+ src: Source identifier (for metadata)
+ ts: Timestamp string (for metadata)
+ w: Width constraint in terminal characters
+
+ Returns:
+ tuple: (content_lines, color_code, meta_row_index)
+ - content_lines: List of rendered text lines
+ - color_code: ANSI color code for display
+ - meta_row_index: Row index of metadata line
+ """
+ # Use default font for all languages (simplified from original)
+ lang_font = font()
+
+ # Simple uppercase conversion (can be made language-aware later)
+ title_up = re.sub(r"\s+", " ", title.upper())
+
+ # Standardize quotes and dashes
+ for old, new in [
+ ("\u2019", "'"),
+ ("\u2018", "'"),
+ ("\u201c", '"'),
+ ("\u201d", '"'),
+ ("\u2013", "-"),
+ ("\u2014", "-"),
+ ]:
+ title_up = title_up.replace(old, new)
+
+ big_rows = big_wrap(title_up, w - 4, lang_font)
+
+ # Matrix-style color selection
+ hc = random.choice(
+ [
+ "\033[38;5;46m", # matrix green
+ "\033[38;5;34m", # dark green
+ "\033[38;5;82m", # lime
+ "\033[38;5;48m", # sea green
+ "\033[38;5;37m", # teal
+ "\033[38;5;44m", # cyan
+ "\033[38;5;87m", # sky
+ "\033[38;5;117m", # ice blue
+ "\033[38;5;250m", # cool white
+ "\033[38;5;156m", # pale green
+ "\033[38;5;120m", # mint
+ "\033[38;5;80m", # dark cyan
+ "\033[38;5;108m", # grey-green
+ "\033[38;5;115m", # sage
+ "\033[1;38;5;46m", # bold green
+ "\033[1;38;5;250m", # bold white
+ ]
+ )
+
+ content = [" " + r for r in big_rows]
+ content.append("")
+ meta = f"\u2591 {src} \u00b7 {ts}"
+ content.append(" " * max(2, w - len(meta) - 2) + meta)
+ return content, hc, len(content) - 1 # (rows, color, meta_row_index)
diff --git a/sideline/render/gradient.py b/sideline/render/gradient.py
new file mode 100644
index 0000000..1c73176
--- /dev/null
+++ b/sideline/render/gradient.py
@@ -0,0 +1,136 @@
+"""Gradient coloring for rendered block characters.
+
+Provides left-to-right and complementary gradient effects for terminal display.
+"""
+
+from sideline.terminal import RST
+
+# Left → right: white-hot leading edge fades to near-black
+GRAD_COLS = [
+ "\033[1;38;5;231m", # white
+ "\033[1;38;5;195m", # pale cyan-white
+ "\033[38;5;123m", # bright cyan
+ "\033[38;5;118m", # bright lime
+ "\033[38;5;82m", # lime
+ "\033[38;5;46m", # bright green
+ "\033[38;5;40m", # green
+ "\033[38;5;34m", # medium green
+ "\033[38;5;28m", # dark green
+ "\033[38;5;22m", # deep green
+ "\033[2;38;5;22m", # dim deep green
+ "\033[2;38;5;235m", # near black
+]
+
+# Complementary sweep for queue messages (opposite hue family from ticker greens)
+MSG_GRAD_COLS = [
+ "\033[1;38;5;231m", # white
+ "\033[1;38;5;225m", # pale pink-white
+ "\033[38;5;219m", # bright pink
+ "\033[38;5;213m", # hot pink
+ "\033[38;5;207m", # magenta
+ "\033[38;5;201m", # bright magenta
+ "\033[38;5;165m", # orchid-red
+ "\033[38;5;161m", # ruby-magenta
+ "\033[38;5;125m", # dark magenta
+ "\033[38;5;89m", # deep maroon-magenta
+ "\033[2;38;5;89m", # dim deep maroon-magenta
+ "\033[2;38;5;235m", # near black
+]
+
+
+def lr_gradient(rows, offset=0.0, grad_cols=None):
+ """Color each non-space block character with a shifting left-to-right gradient.
+
+ Args:
+ rows: List of text lines with block characters
+ offset: Gradient offset (0.0-1.0) for animation
+ grad_cols: List of ANSI color codes (default: GRAD_COLS)
+
+ Returns:
+ List of lines with gradient coloring applied
+ """
+ cols = grad_cols or GRAD_COLS
+ n = len(cols)
+ max_x = max((len(r.rstrip()) for r in rows if r.strip()), default=1)
+ out = []
+ for row in rows:
+ if not row.strip():
+ out.append(row)
+ continue
+ buf = []
+ for x, ch in enumerate(row):
+ if ch == " ":
+ buf.append(" ")
+ else:
+ shifted = (x / max(max_x - 1, 1) + offset) % 1.0
+ idx = min(round(shifted * (n - 1)), n - 1)
+ buf.append(f"{cols[idx]}{ch}{RST}")
+ out.append("".join(buf))
+ return out
+
+
+def lr_gradient_opposite(rows, offset=0.0):
+ """Complementary (opposite wheel) gradient used for queue message panels.
+
+ Args:
+ rows: List of text lines with block characters
+ offset: Gradient offset (0.0-1.0) for animation
+
+ Returns:
+ List of lines with complementary gradient coloring applied
+ """
+ return lr_gradient(rows, offset, MSG_GRAD_COLS)
+
+
+def msg_gradient(rows, offset):
+ """Apply message (ntfy) gradient using theme complementary colors.
+
+ Returns colored rows using ACTIVE_THEME.message_gradient if available,
+ falling back to default magenta if no theme is set.
+
+ Args:
+ rows: List of text strings to colorize
+ offset: Gradient offset (0.0-1.0) for animation
+
+ Returns:
+ List of rows with ANSI color codes applied
+ """
+ from engine import config
+
+ # Check if theme is set and use it
+ if config.ACTIVE_THEME:
+ cols = _color_codes_to_ansi(config.ACTIVE_THEME.message_gradient)
+ else:
+ # Fallback to default magenta gradient
+ cols = MSG_GRAD_COLS
+
+ return lr_gradient(rows, offset, cols)
+
+
+def _color_codes_to_ansi(color_codes):
+ """Convert a list of 256-color codes to ANSI escape code strings.
+
+ Pattern: first 2 are bold, middle 8 are normal, last 2 are dim.
+
+ Args:
+ color_codes: List of 12 integers (256-color palette codes)
+
+ Returns:
+ List of ANSI escape code strings
+ """
+ if not color_codes or len(color_codes) != 12:
+ # Fallback to default green if invalid
+ return GRAD_COLS
+
+ result = []
+ for i, code in enumerate(color_codes):
+ if i < 2:
+ # Bold for first 2 (bright leading edge)
+ result.append(f"\033[1;38;5;{code}m")
+ elif i < 10:
+ # Normal for middle 8
+ result.append(f"\033[38;5;{code}m")
+ else:
+ # Dim for last 2 (dark trailing edge)
+ result.append(f"\033[2;38;5;{code}m")
+ return result
diff --git a/sideline/sensors/__init__.py b/sideline/sensors/__init__.py
new file mode 100644
index 0000000..4ba8123
--- /dev/null
+++ b/sideline/sensors/__init__.py
@@ -0,0 +1,203 @@
+"""
+Sensor framework - PureData-style real-time input system.
+
+Sensors are data sources that emit values over time, similar to how
+PureData objects emit signals. Effects can bind to sensors to modulate
+their parameters dynamically.
+
+Architecture:
+- Sensor: Base class for all sensors (mic, camera, ntfy, OSC, etc.)
+- SensorRegistry: Global registry for sensor discovery
+- SensorStage: Pipeline stage wrapper for sensors
+- Effect param_bindings: Declarative sensor-to-param routing
+
+Example:
+ class GlitchEffect(EffectPlugin):
+ param_bindings = {
+ "intensity": {"sensor": "mic", "transform": "linear"},
+ }
+
+This binds the mic sensor to the glitch intensity parameter.
+"""
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+ from sideline.pipeline.core import PipelineContext
+
+
+@dataclass
+class SensorValue:
+ """A sensor reading with metadata."""
+
+ sensor_name: str
+ value: float
+ timestamp: float
+ unit: str = ""
+
+
+class Sensor(ABC):
+ """Abstract base class for sensors.
+
+ Sensors are real-time data sources that emit values. They can be:
+ - Physical: mic, camera, joystick, MIDI, OSC
+ - Virtual: ntfy, timer, random, noise
+
+ Each sensor has a name and emits SensorValue objects.
+ """
+
+ name: str
+ unit: str = ""
+
+ @property
+ def available(self) -> bool:
+ """Whether the sensor is currently available."""
+ return True
+
+ @abstractmethod
+ def read(self) -> SensorValue | None:
+ """Read current sensor value.
+
+ Returns:
+ SensorValue if available, None if sensor is not ready.
+ """
+ ...
+
+ @abstractmethod
+ def start(self) -> bool:
+ """Start the sensor.
+
+ Returns:
+ True if started successfully.
+ """
+ ...
+
+ @abstractmethod
+ def stop(self) -> None:
+ """Stop the sensor and release resources."""
+ ...
+
+
+class SensorRegistry:
+ """Global registry for sensors.
+
+ Provides:
+ - Registration of sensor instances
+ - Lookup by name
+ - Global start/stop
+ """
+
+ _sensors: dict[str, Sensor] = {}
+ _started: bool = False
+
+ @classmethod
+ def register(cls, sensor: Sensor) -> None:
+ """Register a sensor instance."""
+ cls._sensors[sensor.name] = sensor
+
+ @classmethod
+ def get(cls, name: str) -> Sensor | None:
+ """Get a sensor by name."""
+ return cls._sensors.get(name)
+
+ @classmethod
+ def list_sensors(cls) -> list[str]:
+ """List all registered sensor names."""
+ return list(cls._sensors.keys())
+
+ @classmethod
+ def start_all(cls) -> bool:
+ """Start all sensors.
+
+ Returns:
+ True if all sensors started successfully.
+ """
+ if cls._started:
+ return True
+
+ all_started = True
+ for sensor in cls._sensors.values():
+ if sensor.available and not sensor.start():
+ all_started = False
+
+ cls._started = all_started
+ return all_started
+
+ @classmethod
+ def stop_all(cls) -> None:
+ """Stop all sensors."""
+ for sensor in cls._sensors.values():
+ sensor.stop()
+ cls._started = False
+
+ @classmethod
+ def read_all(cls) -> dict[str, float]:
+ """Read all sensor values.
+
+ Returns:
+ Dict mapping sensor name to current value.
+ """
+ result = {}
+ for name, sensor in cls._sensors.items():
+ value = sensor.read()
+ if value:
+ result[name] = value.value
+ return result
+
+
+class SensorStage:
+ """Pipeline stage wrapper for sensors.
+
+ Provides sensor data to the pipeline context.
+ Sensors don't transform data - they inject sensor values into context.
+ """
+
+ def __init__(self, sensor: Sensor, name: str | None = None):
+ self._sensor = sensor
+ self.name = name or sensor.name
+ self.category = "sensor"
+ self.optional = True
+
+ @property
+ def stage_type(self) -> str:
+ return "sensor"
+
+ @property
+ def inlet_types(self) -> set:
+ from sideline.pipeline.core import DataType
+
+ return {DataType.ANY}
+
+ @property
+ def outlet_types(self) -> set:
+ from sideline.pipeline.core import DataType
+
+ return {DataType.ANY}
+
+ @property
+ def capabilities(self) -> set[str]:
+ return {f"sensor.{self.name}"}
+
+ @property
+ def dependencies(self) -> set[str]:
+ return set()
+
+ def init(self, ctx: "PipelineContext") -> bool:
+ return self._sensor.start()
+
+ def process(self, data: Any, ctx: "PipelineContext") -> Any:
+ value = self._sensor.read()
+ if value:
+ ctx.set_state(f"sensor.{self.name}", value.value)
+ ctx.set_state(f"sensor.{self.name}.full", value)
+ return data
+
+ def cleanup(self) -> None:
+ self._sensor.stop()
+
+
+def create_sensor_stage(sensor: Sensor, name: str | None = None) -> SensorStage:
+ """Create a pipeline stage from a sensor."""
+ return SensorStage(sensor, name)
diff --git a/sideline/sensors/mic.py b/sideline/sensors/mic.py
new file mode 100644
index 0000000..89533b2
--- /dev/null
+++ b/sideline/sensors/mic.py
@@ -0,0 +1,145 @@
+"""
+Mic sensor - audio input as a pipeline sensor.
+
+Self-contained implementation that handles audio input directly,
+with graceful degradation if sounddevice is unavailable.
+"""
+
+import atexit
+import time
+from collections.abc import Callable
+from dataclasses import dataclass
+from datetime import datetime
+from typing import Any
+
+try:
+ import numpy as np
+ import sounddevice as sd
+
+ _HAS_AUDIO = True
+except Exception:
+ np = None # type: ignore
+ sd = None # type: ignore
+ _HAS_AUDIO = False
+
+
+from sideline.events import MicLevelEvent
+from sideline.sensors import Sensor, SensorRegistry, SensorValue
+
+
+@dataclass
+class AudioConfig:
+ """Configuration for audio input."""
+
+ threshold_db: float = 50.0
+ sample_rate: float = 44100.0
+ block_size: int = 1024
+
+
+class MicSensor(Sensor):
+ """Microphone sensor for pipeline integration.
+
+ Self-contained implementation with graceful degradation.
+ No external dependencies required - works with or without sounddevice.
+ """
+
+ def __init__(self, threshold_db: float = 50.0, name: str = "mic"):
+ self.name = name
+ self.unit = "dB"
+ self._config = AudioConfig(threshold_db=threshold_db)
+ self._db: float = -99.0
+ self._stream: Any = None
+ self._subscribers: list[Callable[[MicLevelEvent], None]] = []
+
+ @property
+ def available(self) -> bool:
+ """Check if audio input is available."""
+ return _HAS_AUDIO and self._stream is not None
+
+ def start(self) -> bool:
+ """Start the microphone stream."""
+ if not _HAS_AUDIO or sd is None:
+ return False
+
+ try:
+ self._stream = sd.InputStream(
+ samplerate=self._config.sample_rate,
+ blocksize=self._config.block_size,
+ channels=1,
+ callback=self._audio_callback,
+ )
+ self._stream.start()
+ atexit.register(self.stop)
+ return True
+ except Exception:
+ return False
+
+ def stop(self) -> None:
+ """Stop the microphone stream."""
+ if self._stream:
+ try:
+ self._stream.stop()
+ self._stream.close()
+ except Exception:
+ pass
+ self._stream = None
+
+ def _audio_callback(self, indata, frames, time_info, status) -> None:
+ """Process audio data from sounddevice."""
+ if not _HAS_AUDIO or np is None:
+ return
+
+ rms = np.sqrt(np.mean(indata**2))
+ if rms > 0:
+ db = 20 * np.log10(rms)
+ else:
+ db = -99.0
+
+ self._db = db
+
+ excess = max(0.0, db - self._config.threshold_db)
+ event = MicLevelEvent(
+ db_level=db, excess_above_threshold=excess, timestamp=datetime.now()
+ )
+ self._emit(event)
+
+ def _emit(self, event: MicLevelEvent) -> None:
+ """Emit event to all subscribers."""
+ for callback in self._subscribers:
+ try:
+ callback(event)
+ except Exception:
+ pass
+
+ def subscribe(self, callback: Callable[[MicLevelEvent], None]) -> None:
+ """Subscribe to mic level events."""
+ if callback not in self._subscribers:
+ self._subscribers.append(callback)
+
+ def unsubscribe(self, callback: Callable[[MicLevelEvent], None]) -> None:
+ """Unsubscribe from mic level events."""
+ if callback in self._subscribers:
+ self._subscribers.remove(callback)
+
+ def read(self) -> SensorValue | None:
+ """Read current mic level as sensor value."""
+ if not self.available:
+ return None
+
+ excess = max(0.0, self._db - self._config.threshold_db)
+ return SensorValue(
+ sensor_name=self.name,
+ value=excess,
+ timestamp=time.time(),
+ unit=self.unit,
+ )
+
+
+def register_mic_sensor() -> None:
+ """Register the mic sensor with the global registry."""
+ sensor = MicSensor()
+ SensorRegistry.register(sensor)
+
+
+# Auto-register when imported
+register_mic_sensor()
diff --git a/sideline/sensors/oscillator.py b/sideline/sensors/oscillator.py
new file mode 100644
index 0000000..0901f7e
--- /dev/null
+++ b/sideline/sensors/oscillator.py
@@ -0,0 +1,161 @@
+"""
+Oscillator sensor - Modular synth-style oscillator as a pipeline sensor.
+
+Provides various waveforms that can be:
+1. Self-driving (phase accumulates over time)
+2. Sensor-driven (phase modulated by external sensor)
+
+Built-in waveforms:
+- sine: Pure sine wave
+- square: Square wave (0 to 1)
+- sawtooth: Rising sawtooth (0 to 1, wraps)
+- triangle: Triangle wave (0 to 1 to 0)
+- noise: Random values (0 to 1)
+
+Example usage:
+ osc = OscillatorSensor(waveform="sine", frequency=0.5)
+ # Or driven by mic sensor:
+ osc = OscillatorSensor(waveform="sine", frequency=1.0, input_sensor="mic")
+"""
+
+import math
+import random
+import time
+from enum import Enum
+
+from sideline.sensors import Sensor, SensorRegistry, SensorValue
+
+
+class Waveform(Enum):
+ """Built-in oscillator waveforms."""
+
+ SINE = "sine"
+ SQUARE = "square"
+ SAWTOOTH = "sawtooth"
+ TRIANGLE = "triangle"
+ NOISE = "noise"
+
+
+class OscillatorSensor(Sensor):
+ """Oscillator sensor that generates periodic or random values.
+
+ Can run in two modes:
+ - Self-driving: phase accumulates based on frequency
+ - Sensor-driven: phase modulated by external sensor value
+ """
+
+ WAVEFORMS = {
+ "sine": lambda p: (math.sin(2 * math.pi * p) + 1) / 2,
+ "square": lambda p: 1.0 if (p % 1.0) < 0.5 else 0.0,
+ "sawtooth": lambda p: p % 1.0,
+ "triangle": lambda p: 2 * abs(2 * (p % 1.0) - 1) - 1,
+ "noise": lambda _: random.random(),
+ }
+
+ def __init__(
+ self,
+ name: str = "osc",
+ waveform: str = "sine",
+ frequency: float = 1.0,
+ input_sensor: str | None = None,
+ input_scale: float = 1.0,
+ ):
+ """Initialize oscillator sensor.
+
+ Args:
+ name: Sensor name
+ waveform: Waveform type (sine, square, sawtooth, triangle, noise)
+ frequency: Frequency in Hz (self-driving mode)
+ input_sensor: Optional sensor name to drive phase
+ input_scale: Scale factor for input sensor
+ """
+ self.name = name
+ self.unit = ""
+ self._waveform = waveform
+ self._frequency = frequency
+ self._input_sensor = input_sensor
+ self._input_scale = input_scale
+ self._phase = 0.0
+ self._start_time = time.time()
+
+ @property
+ def available(self) -> bool:
+ return True
+
+ @property
+ def waveform(self) -> str:
+ return self._waveform
+
+ @waveform.setter
+ def waveform(self, value: str) -> None:
+ if value not in self.WAVEFORMS:
+ raise ValueError(f"Unknown waveform: {value}")
+ self._waveform = value
+
+ @property
+ def frequency(self) -> float:
+ return self._frequency
+
+ @frequency.setter
+ def frequency(self, value: float) -> None:
+ self._frequency = max(0.0, value)
+
+ def start(self) -> bool:
+ self._phase = 0.0
+ self._start_time = time.time()
+ return True
+
+ def stop(self) -> None:
+ pass
+
+ def _get_input_value(self) -> float:
+ """Get value from input sensor if configured."""
+ if self._input_sensor:
+ from sideline.sensors import SensorRegistry
+
+ sensor = SensorRegistry.get(self._input_sensor)
+ if sensor:
+ reading = sensor.read()
+ if reading:
+ return reading.value * self._input_scale
+ return 0.0
+
+ def read(self) -> SensorValue | None:
+ current_time = time.time()
+ elapsed = current_time - self._start_time
+
+ if self._input_sensor:
+ input_val = self._get_input_value()
+ phase_increment = (self._frequency * elapsed) + input_val
+ else:
+ phase_increment = self._frequency * elapsed
+
+ self._phase += phase_increment
+
+ waveform_fn = self.WAVEFORMS.get(self._waveform)
+ if waveform_fn is None:
+ return None
+
+ value = waveform_fn(self._phase)
+ value = max(0.0, min(1.0, value))
+
+ return SensorValue(
+ sensor_name=self.name,
+ value=value,
+ timestamp=current_time,
+ unit=self.unit,
+ )
+
+ def set_waveform(self, waveform: str) -> None:
+ """Change waveform at runtime."""
+ self.waveform = waveform
+
+ def set_frequency(self, frequency: float) -> None:
+ """Change frequency at runtime."""
+ self.frequency = frequency
+
+
+def register_oscillator_sensor(name: str = "osc", **kwargs) -> None:
+ """Register an oscillator sensor with the global registry."""
+ sensor = OscillatorSensor(name=name, **kwargs)
+ SensorRegistry.register(sensor)
diff --git a/sideline/sensors/pipeline_metrics.py b/sideline/sensors/pipeline_metrics.py
new file mode 100644
index 0000000..0992cc8
--- /dev/null
+++ b/sideline/sensors/pipeline_metrics.py
@@ -0,0 +1,114 @@
+"""
+Pipeline metrics sensor - Exposes pipeline performance data as sensor values.
+
+This sensor reads metrics from a Pipeline instance and provides them
+as sensor values that can drive effect parameters.
+
+Example:
+ sensor = PipelineMetricsSensor(pipeline)
+ sensor.read() # Returns SensorValue with total_ms, fps, etc.
+"""
+
+from typing import TYPE_CHECKING
+
+from sideline.sensors import Sensor, SensorValue
+
+if TYPE_CHECKING:
+ from sideline.pipeline.controller import Pipeline
+
+
+class PipelineMetricsSensor(Sensor):
+ """Sensor that reads metrics from a Pipeline instance.
+
+ Provides real-time performance data:
+ - total_ms: Total frame time in milliseconds
+ - fps: Calculated frames per second
+ - stage_timings: Dict of stage name -> duration_ms
+
+ Can be bound to effect parameters for reactive visuals.
+ """
+
+ def __init__(self, pipeline: "Pipeline | None" = None, name: str = "pipeline"):
+ self._pipeline = pipeline
+ self.name = name
+ self.unit = "ms"
+ self._last_values: dict[str, float] = {
+ "total_ms": 0.0,
+ "fps": 0.0,
+ "avg_ms": 0.0,
+ "min_ms": 0.0,
+ "max_ms": 0.0,
+ }
+
+ @property
+ def available(self) -> bool:
+ return self._pipeline is not None
+
+ def set_pipeline(self, pipeline: "Pipeline") -> None:
+ """Set or update the pipeline to read metrics from."""
+ self._pipeline = pipeline
+
+ def read(self) -> SensorValue | None:
+ """Read current metrics from the pipeline."""
+ if not self._pipeline:
+ return None
+
+ try:
+ metrics = self._pipeline.get_metrics_summary()
+ except Exception:
+ return None
+
+ if not metrics or "error" in metrics:
+ return None
+
+ self._last_values["total_ms"] = metrics.get("total_ms", 0.0)
+ self._last_values["fps"] = metrics.get("fps", 0.0)
+ self._last_values["avg_ms"] = metrics.get("avg_ms", 0.0)
+ self._last_values["min_ms"] = metrics.get("min_ms", 0.0)
+ self._last_values["max_ms"] = metrics.get("max_ms", 0.0)
+
+ # Provide total_ms as primary value (for LFO-style effects)
+ return SensorValue(
+ sensor_name=self.name,
+ value=self._last_values["total_ms"],
+ timestamp=0.0,
+ unit=self.unit,
+ )
+
+ def get_stage_timing(self, stage_name: str) -> float:
+ """Get timing for a specific stage."""
+ if not self._pipeline:
+ return 0.0
+ try:
+ metrics = self._pipeline.get_metrics_summary()
+ stages = metrics.get("stages", {})
+ return stages.get(stage_name, {}).get("avg_ms", 0.0)
+ except Exception:
+ return 0.0
+
+ def get_all_timings(self) -> dict[str, float]:
+ """Get all stage timings as a dict."""
+ if not self._pipeline:
+ return {}
+ try:
+ metrics = self._pipeline.get_metrics_summary()
+ return metrics.get("stages", {})
+ except Exception:
+ return {}
+
+ def get_frame_history(self) -> list[float]:
+ """Get historical frame times for sparklines."""
+ if not self._pipeline:
+ return []
+ try:
+ return self._pipeline.get_frame_times()
+ except Exception:
+ return []
+
+ def start(self) -> bool:
+ """Start the sensor (no-op for read-only metrics)."""
+ return True
+
+ def stop(self) -> None:
+ """Stop the sensor (no-op for read-only metrics)."""
+ pass
diff --git a/sideline/terminal.py b/sideline/terminal.py
new file mode 100644
index 0000000..b486693
--- /dev/null
+++ b/sideline/terminal.py
@@ -0,0 +1,108 @@
+"""
+ANSI escape codes and terminal control constants.
+
+Provides standard ANSI escape sequences for terminal manipulation.
+This module belongs in Sideline as it's a framework-level concern.
+"""
+
+# ─── ANSI RESET ──────────────────────────────────────────
+RST = "\033[0m"
+
+# ─── TEXT STYLES ─────────────────────────────────────────
+BOLD = "\033[1m"
+DIM = "\033[2m"
+UNDERLINE = "\033[4m"
+REVERSE = "\033[7m"
+
+# ─── MATRIX GREENS (Sideline default theme) ─────────────
+G_HI = "\033[38;5;46m" # Bright green
+G_MID = "\033[38;5;34m" # Medium green
+G_LO = "\033[38;5;22m" # Dark green
+G_DIM = "\033[2;38;5;34m" # Dim green
+
+# ─── COOL TONES ──────────────────────────────────────────
+W_COOL = "\033[38;5;250m" # Cool white
+W_DIM = "\033[2;38;5;245m" # Dim white
+W_GHOST = "\033[2;38;5;238m" # Ghost white
+C_DIM = "\033[2;38;5;37m" # Dim cyan
+
+# ─── TERMINAL CONTROL ────────────────────────────────────
+CLR = "\033[2J\033[H" # Clear screen and home cursor
+CURSOR_OFF = "\033[?25l" # Hide cursor
+CURSOR_ON = "\033[?25h" # Show cursor
+
+
+# ─── CURSOR POSITIONING ──────────────────────────────────
+def cursor_pos(row: int, col: int) -> str:
+ """Move cursor to position (row, col)."""
+ return f"\033[{row};{col}H"
+
+
+# ─── COLOR UTILITIES ─────────────────────────────────────
+def fg_color(code: int) -> str:
+ """Set foreground color (0-255)."""
+ return f"\033[38;5;{code}m"
+
+
+def bg_color(code: int) -> str:
+ """Set background color (0-255)."""
+ return f"\033[48;5;{code}m"
+
+
+# ─── COMMON COLORS ───────────────────────────────────────
+BLACK = "\033[30m"
+RED = "\033[31m"
+GREEN = "\033[32m"
+YELLOW = "\033[33m"
+BLUE = "\033[34m"
+MAGENTA = "\033[35m"
+CYAN = "\033[36m"
+WHITE = "\033[37m"
+
+# ─── BRIGHT COLORS ───────────────────────────────────────
+BRIGHT_BLACK = "\033[90m"
+BRIGHT_RED = "\033[91m"
+BRIGHT_GREEN = "\033[92m"
+BRIGHT_YELLOW = "\033[93m"
+BRIGHT_BLUE = "\033[94m"
+BRIGHT_MAGENTA = "\033[95m"
+BRIGHT_CYAN = "\033[96m"
+BRIGHT_WHITE = "\033[97m"
+
+__all__ = [
+ "RST",
+ "BOLD",
+ "DIM",
+ "UNDERLINE",
+ "REVERSE",
+ "G_HI",
+ "G_MID",
+ "G_LO",
+ "G_DIM",
+ "W_COOL",
+ "W_DIM",
+ "W_GHOST",
+ "C_DIM",
+ "CLR",
+ "CURSOR_OFF",
+ "CURSOR_ON",
+ "cursor_pos",
+ "fg_color",
+ "bg_color",
+ "BLACK",
+ "RED",
+ "GREEN",
+ "YELLOW",
+ "BLUE",
+ "MAGENTA",
+ "CYAN",
+ "WHITE",
+ "BRIGHT_BLACK",
+ "BRIGHT_RED",
+ "BRIGHT_GREEN",
+ "BRIGHT_YELLOW",
+ "BRIGHT_BLUE",
+ "BRIGHT_MAGENTA",
+ "BRIGHT_CYAN",
+ "BRIGHT_WHITE",
+]
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index c8b86c1..1e34287 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -29,6 +29,14 @@ class TestStageRegistry:
def test_discover_stages_registers_sources(self):
"""discover_stages registers source stages."""
+ # Register Mainline plugins first
+ try:
+ from engine.plugins import register_stages
+
+ register_stages(StageRegistry)
+ except ImportError:
+ pass
+
discover_stages()
sources = StageRegistry.list("source")
@@ -48,6 +56,14 @@ class TestStageRegistry:
def test_create_source_stage(self):
"""StageRegistry.create creates source stages."""
+ # Register Mainline plugins first
+ try:
+ from engine.plugins import register_stages
+
+ register_stages(StageRegistry)
+ except ImportError:
+ pass
+
discover_stages()
source = StageRegistry.create("source", "HeadlinesDataSource")